Exemplo n.º 1
0
    def _thread(cls):
        with PiCamera() as camera:
            # camera setup
            camera.resolution = (640, 480)
            camera.framerate = 30
            camera.iso = 80
            camera.sensor_mode = 1
            camera.awb_gains = 2

            # let camera warm up
            camera.start_preview()
            time.sleep(2)

            stream = PiRGBArray(camera, size=(640, 480))
            for foo in camera.capture_continuous(stream, 'bgr',
                                                 use_video_port=True):
                # store frame
                stream.seek(0)
                cls.frame = foo

                # reset stream for next frame
                stream.seek(0)
                stream.truncate()

                # if there hasn't been any clients asking for frames in
                # the last 10 seconds stop the thread
                if time.time() - cls.last_access > 10:
                    break
                elif CameraHandler.stop_camera is True:
                    break
        cls.thread = None
Exemplo n.º 2
0
def run_cam_example():

    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 60
    raw = PiRGBArray(camera, size=(640, 480))

    window = cv2.namedWindow("camera")

    time.sleep(1)

    for frame in camera.capture_continuous(raw,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array
        height, width = image.shape[:2]
        M = cv2.getRotationMatrix2D((width / 2, height / 2), 180, 1)
        image_flipped = cv2.warpAffine(image, M, (width, height))

        cv2.imshow("camera", image_flipped)
        raw.truncate()
        raw.seek(0)
        if cv2.waitKey(20) & 0xFF == ord('q'):
            break

    camera.close()
    cv2.destroyAllWindows()
Exemplo n.º 3
0
    def run(self):
        try:
            camera = PiCamera()
            # Gotta be careful with this, too much data can cause a segfault
            # Reduce framerate, resolution or go to grayscale to prevent
            camera.resolution = (320, 240)
            camera.framerate = 16
            rawCapture = PiRGBArray(camera, size=(320, 240))

            time.sleep(0.1)
            for frame in camera.capture_continuous(rawCapture,
                                                   format="bgr",
                                                   use_video_port=True):
                image = frame.array
                rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                rgb_image = cv2.rotate(rgb_image, cv2.ROTATE_180)
                h, w, ch = rgb_image.shape
                bytes_per_line = ch * w
                qt_image = QImage(rgb_image.data, w, h, bytes_per_line,
                                  QImage.Format_RGB888)
                pixmap = qt_image.scaled(640, 480, Qt.KeepAspectRatio)
                self.signals.change_pixmap.emit(pixmap)
                rawCapture.truncate(0)
                rawCapture.seek(0)
        except Exception:
            print('exception')
            traceback.print_exc()
            exctype, value = sys.exc_info()[:2]
            self.signals.error.emit((exctype, value, traceback.format_exc()))
        finally:
            self.signals.finished.emit()
Exemplo n.º 4
0
def run_on_video(Net, conf_thresh=0.5):
    resX = 320
    resY = 240

    camera = PiCamera()
    camera.resolution = (resX, resY)
    camera.framerate = 60

    rawcapture = PiRGBArray(camera)

    # Use this as our output
    rawCapture = PiRGBArray(camera, size=(resX, resY))

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array
        img_raw = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img_raw = inference(Net,
                            img_raw,
                            target_shape=(260, 260),
                            conf_thresh=conf_thresh)
        cv2.imshow('image', img_raw[:, :, ::-1])
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    rawcapture.seek(0)
    rawcapture.truncate()
Exemplo n.º 5
0
    def frames():
        with picamera.PiCamera() as camera:
            # camera setup
            camera.resolution = (width, height)
            camera.framerate = 25
            camera.hflip = True
            camera.vflip = True
            camera.brightness = 50

            # let camera warm up
            camera.start_preview()
            time.sleep(2)

            ##stream = io.BytesIO()
            stream = PiRGBArray(camera, size=camera.resolution)
            ##for _ in camera.capture_continuous(stream, 'jpeg', use_video_port=True):
            for _ in camera.capture_continuous(stream,
                                               format='bgr',
                                               use_video_port=True):
                # return current frame
                stream.seek(0)
                ##yield stream.read()
                yield _.array

                # reset stream for next frame
                stream.seek(0)
                stream.truncate()
Exemplo n.º 6
0
    def run(self):
        print "Launching Cam Thread"
        global camLookLeft
        global psiD

        warnings.filterwarnings('error')

        camera = picamera.PiCamera()
        photoHeight = 540
        image_size = (960 / 2, 544 / 2)  # (16*photoHeight/9, photoHeight)
        camera.resolution = image_size  # (960, 540)#(16*photoHeight/9, photoHeight)
        camera.framerate = 7
        camera.vflip = False
        camera.hflip = False
        # camera.exposure_mode='off'
        rawCapture = PiRGBArray(camera, size=image_size)
        # allow the camera to warmup
        time.sleep(0.1)
        edgeFinder = FindEdge(None, image_size[1] / 2)
        print 'ready for loop'

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            try:
                # grab the raw NumPy array representing the image, then initialize the timestamp
                # and occupied/unoccupied text
                image = frame.array
                rawCapture.truncate()
                rawCapture.seek(0)

                edgeFinder.set_look_left(camLookLeft)

                # show the frame
                # lines.project_on_road_debug(image)
                edgeFinder.set_new_image(image)
                edgeFinder.collect_des_edge()
                edge, pic = edgeFinder.draw_des_edge()
                psiD = edgeFinder.calc_phi_r()

                imgray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                retr, thresh = cv2.threshold(imgray, 127, 255, 0)
                contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
                temp = cv2.drawContours(image, contours, -1, (255, 0, 0))
                cv2.imshow("edge", edge)
                cv2.imshow("org", pic)

                key = cv2.waitKey(1) & 0xFF

                # clear the stream in preparation for the next frame

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

            except Exception as e:
                print e
                releaseAllLocks()  #if there is an error release all the locks
def newRecognition(name=None):
    #Initializing camera and grab a reference to the raw camera capture
    #camera=PiCamera()
    global camera
    global interrupt
    interrupt = False
    #Adjusting some properties
    camera.resolution = (640, 480)
    camera.framerate = 30
    #This function creates a 3D RGB-array from a RGB capture
    rawCapture = PiRGBArray(camera)

    #Allow camera to initialize. Not really necessary but can't hurt.
    time.sleep(0.1)
    #Input an Id to be linked to.
    name, userID = nameMapping(name)
    #SampleNum to let program exit inf loop
    sampleNum = 0

    #Capture frames from camera
    for frame in camera.capture_continuous(rawCapture, format='bgr'):
        #Grab NumPy array from image
        img = frame.array

        #Show the unprocessed frame
        #cv2.imshow("Imag",img)
        key = cv2.waitKey(1) & 0xFF

        sampleNum = sampleNum + 1

        #Convert RGB to grayscale
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #Detect faces
        faces = detector.detectMultiScale(gray, 1.3, 5)

        #Draw a rectangle around every face
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            #Write image to folder containing images
            writePath = '/home/pi/Documents/Facial recog/DataSet/' + name + '/'
            #print(writePath)
            makeDir(writePath)
            cv2.imwrite(
                writePath + name + '.' + str(userID) + '.' + str(sampleNum) +
                '.jpg', gray[y:y + h, x:x + w])

        cv2.imshow('frame', img)
        #Clear the stream to get ready for next frame.
        rawCapture.truncate(0)
        rawCapture.seek(0)
        if sampleNum > 40:
            break

        if key == ord("q"):
            break
        #Give the imshow some time to refresh
        cv2.waitKey(1)
    cv2.destroyAllWindows()
Exemplo n.º 8
0
    def run(self):

        self.recognizer.read(self.train_path[0])

        time.sleep(1)
        start_time = int(time.time())

        with PiCamera(resolution=(1280, 720), framerate=90) as camera:
            print("[Initial] Camera is active...")
            print("[Initial] Please look at the camera")

            # camera.rotation = 180
            camera.brightness = 60
            camera.contrast = 5
            stream = PiRGBArray(camera)

            check_count = 0
            check_name = "unknown"

            for frame in camera.capture_continuous(stream,
                                                   format="bgr",
                                                   use_video_port=True):
                image = frame.array
                gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
                name = "unknown"

                for (x, y, w, h) in faces:
                    cv.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
                    id, confidence = self.recognizer.predict(gray[y:y + h,
                                                                  x:x + w])
                    if (confidence < 50):
                        name = self.user_all[id][1]
                    cv.putText(image, name, (x + 5, y + h + 5),
                               cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

                # path_img = os.path.join(self.img_path, "img.jpg")
                cv.imwrite("img.jpg", image)

                delta_time = int(time.time()) - start_time
                if delta_time == 5 * 60:
                    print("[Close] Timeout")
                    break

                if not name is "unknown":
                    if not check_name is name:
                        check_name = name
                        print("[Found] " + name)
                        check_count = 0
                    else:
                        check_count += 1
                        if check_count == 5:
                            print("[Verify] " + name)
                            break

                stream.truncate()
                stream.seek(0)
Exemplo n.º 9
0
class Camera(Thread):
    """
        The camera is composed of :
            >>> 1 model world
            >>> 1 camera (PiCamera object)
            >>> 1 road follower object
    """
    __RESOLUTION = (640, 480)
    __FRAME_RATE = 32
    __VIDEO_CAPTURE_FORMAT = "bgr"

    def __init__(self, model):
        Thread.__init__(self)
        self.model = model
        self.camera = PiCamera()
        self.camera.resolution = Camera.__RESOLUTION
        self.camera.framerate = Camera.__FRAME_RATE
        self.rawCapture = PiRGBArray(self.camera, size=Camera.__RESOLUTION)
        self.roadFollower = RoadFollower()
        self.terminated = False

    def run(self):
        """
            Run function of the thread.
            The function that is launched when we start the thread.
        """

        while not self.terminated:
            self.camera.capture(self.rawCapture,
                                format=Camera.__VIDEO_CAPTURE_FORMAT)
            frame = self.rawCapture.array
            # cv2.imshow('frame', frame)
            self.roadFollower.update_frame(frame)
            self.roadFollower.filter()
            # self.roadFollower.display_masks()

            self.model.band_ycoord = self.roadFollower.compute_strip_position(
            )[1]
            if (self.model.band_ycoord > 0):
                self.model.sema_band_ycoord.release()

            #print(self.roadFollower.compute_strip_position())
            self.model.car.direction_motor.angle_camera = -self.roadFollower.compute_deviation(
            )

            self.rawCapture.seek(0)
            self.rawCapture.truncate(0)

            # Sleep periode to let the hand to an other thread
            time.sleep(SLEEP_CAMERA_THREAD)

    def stop(self):
        """
            Allow to stop the thread and quit it.
        """
        self.terminated = True
        print("camera thread closed")
Exemplo n.º 10
0
class PiVideoStream:
	def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
		# initialize the camera
		self.camera = PiCamera()

		# set camera parameters
		self.camera.resolution = resolution
		self.camera.framerate = framerate

		# set optional camera parameters (refer to PiCamera docs)
		for (arg, value) in kwargs.items():
			setattr(self.camera, arg, value)

		# initialize the stream
		self.rawCapture = PiRGBArray(self.camera, size=resolution)
		self.stream = self.camera.capture_continuous(self.rawCapture,
			format="bgr", use_video_port=True)

		# initialize the frame and the variable used to indicate
		# if the thread should be stopped
		self.frame = None
		self.stopped = False

	def start(self):
		# start the thread to read frames from the video stream
		t = Thread(target=self.update, args=())
		t.daemon = True
		t.start()
		return self

	def update(self):
		# keep looping infinitely until the thread is stopped
		for f in self.stream:
			# grab the frame from the stream and clear the stream in
			# preparation for the next frame
			self.rawCapture.truncate(0)
			self.rawCapture.seek(0)
			self.frame = f.array
			# self.rawCapture.truncate(0)
			# self.rawCapture.seek(0)

			# if the thread indicator variable is set, stop the thread
			# and resource camera resources
			if self.stopped:
				self.stream.close()
				self.rawCapture.close()
				self.camera.close()
				return

	def read(self):
		# return the frame most recently read
		return self.frame

	def stop(self):
		# indicate that the thread should be stopped
		self.stopped = True
    def run(self):

        rawCapture = PiRGBArray(self.camera, size=(640, 480))
        faceCascade = cv2.CascadeClassifier(self.cascPath)

        # Get the raw capture from the camera frame-by-frame.
        for cap in self.camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):

            frame = cap.array

            # Convert the frame to grayscale.
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # Detect the face using the cascade classifier.
            # The faces array contains coordinates for all
            # of the faces detected in the frame. In this
            # case, we will just use the first face detected.
            faces = faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE
            )

            if len(faces) > 0:
                (x, y, w, h) = faces[0]
                # print((x, y, w, h))

                # Display a bounding box around the
                # detected face.
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

                # If the face is going out of the left
                # side of the robot, turn the robot to
                # the left and vice versa.
                # Since the size of the frame is 640x480
                # the edges are specified as 40 pixels on
                # either sides.
                if (x < 40):
                    face_turn("left")
                elif (x+w > 600):
                    face_turn("right")

            # Display the resulting frame
            cv2.imshow('Video', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            # Empty the raw capture after every frame is
            # processed.
            rawCapture.truncate()
            rawCapture.seek(0)

        cv2.destroyAllWindows();
Exemplo n.º 12
0
def stream():
    global name
    global verif

    with PiCamera(resolution=(1280, 720), framerate=90) as camera:
        print("[Initial] Camera is active...")
        print("[Initial] Please look at the camera and wait a minute...")

        # camera.rotation = 180
        camera.brightness = 60
        camera.contrast = 5
        stream = PiRGBArray(camera)

        check_count = 0
        check_name = "unknown"
        name = "unknown"
        verif = "False"

        for frame in camera.capture_continuous(stream,
                                               format="bgr",
                                               use_video_port=True):
            image = frame.array
            gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
                id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
                if (confidence < 50):
                    name = user_all[id][1]
                # print(confidence)
                cv.putText(image, name, (x + 5, y - 5),
                           cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)

            cv.imwrite("img.jpg", image)

            if not name is "unknown":
                if not check_name is name:
                    check_name = name
                    print("[Found] " + name)
                    check_count = 0
                else:
                    check_count += 1
                    if check_count == 10:
                        print("[Verify] " + name)
                        chk = util.reqRes(name)
                        # print(chk)
                        if not chk is "Not found":
                            verif = "True"

            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' +
                   open('img.jpg', 'rb').read() + b'\r\n')

            stream.truncate()
            stream.seek(0)
Exemplo n.º 13
0
    def create_datafile(self,file_path):
        """
        create the original captured frames,encoded in png
        :param file_path: the path of the data pkl file
        """
        f_path=Path(file_path)
        if not f_path.parent.exists():
            f_path.parent.mkdir(parents=True)
        sampler=TestSampler(sample_len=7)
        fp=open(file_path,'wb')
        with picamera.PiCamera() as camera:
            rawFrame = PiRGBArray(camera, (640, 480))
            camera.resolution = (640, 480)
            camera.framerate = 15
            shape=(640,480)
            time.sleep(2)
            self.start = time.time()
            cnt=0
            start=time.time()
            prev_cnt=0
            len_sum=0
            for frame in camera.capture_continuous(rawFrame, 'rgb', use_video_port=True):
                frame = np.array(frame.array, dtype=np.uint8)
                #frame = cv2.resize(frame, dsize=(0, 0), fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)
                # frame=utils.imresize_np(frame,1/4,True)
                pickle.dump(utils.encode_img(frame), fp, -1)

                # sample_list, scaled = sampler.sample_and_filter(np.array(frame.array, dtype=np.uint8))
                # #self.logger.info('len of sample list is {}'.format(len(sample_list)))
                # len_sum=len_sum+len(sample_list)
                # obj={'img':utils.encode_img(scaled),'shape':shape,'sample':False}
                # pickle.dump(obj,fp,-1)
                #
                # for sample in sample_list:
                #     pickle.dump(sample,fp,-1)
                #     # self.logger.info('send sample')

                cnt = cnt + 1
                self.count = self.count + 1
                if cnt%10==0:
                    self.logger.info('total sample num is:{}'.format(len_sum))
                    self.logger.info("sent {} frames in {} sec,fps:{}".format(cnt, time.time() - start,
                                                                                 cnt / (time.time() - start)))
                    start=time.time()
                    cnt=0
                    len_sum=0
                if self.count>200:
                    break
                #self.logger.info(cnt)
                rawFrame.seek(0)
                rawFrame.truncate()
                if self.terminate:
                    self.logger.info('terminate')
                    break
        fp.close()
Exemplo n.º 14
0
 def __init__(self, sm):
     global listener
     listener = sm.listener
     global camera
     global rawCapture
     camera = PiCamera()
     camera.resolution = (320, 240)
     camera.framerate = 60
     camera.brightness = 70
     rawCapture = PiRGBArray(camera, size=(320, 240))
     rawCapture.seek(0)
     threading.Thread(target=start).start()
Exemplo n.º 15
0
def createDataset():
    global name
    global verif

    USER = name
    check_user = util.haveUser(USER)
    if check_user[0] is "0":
        USER_PATH = str(util.numUser()) + "." + USER
        PATH = os.path.join(util.IMGROOTPath(),USER_PATH)
        os.mkdir(PATH)
        print("[Initial] Create " + USER + " dataset")
    else:
        print("[Initial] Found " + check_user[1] + " dataset")
        print("[Initial] Recreate " + check_user[1] + " dataset")

    check_user = util.haveUser(USER)
    with PiCamera(resolution=(1280, 720), framerate=40) as camera:
        print("[Initial] Camera is active...")
        print("[Initial] Please look at the camera and wait a minute...")

        # camera.rotation = 180
        camera.brightness = 55
        camera.contrast = 5
        stream = PiRGBArray(camera)

        count = 0
        verif = "False"

        for frame in camera.capture_continuous(stream, format="bgr", use_video_port=True):
            if count >= 100:
                print("[Successful] create 100 images")
                verif = "True"
                break

            image = frame.array
            gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                count += 1
                cv.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
                filename = check_user[0] + '.' + check_user[1] + "." + str(count) + ".jpg"
                cv.imwrite(os.path.join(check_user[2], filename), image[y:y+h, x:x+w])
                print("[Save] " + filename)

            cv.imwrite("img.jpg", image)

            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + open('img.jpg', 'rb').read() + b'\r\n')

            stream.truncate()
            stream.seek(0)
Exemplo n.º 16
0
def main():
    args = get_arguments()

    range_filter = args['filter'].upper()

    if args['image']:
        image = cv2.imread(args['image'])

        if range_filter == 'RGB':
            frame_to_thresh = image.copy()
        else:
            frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    else:
        # initialize the camera and grab a reference to the raw camera capture
        camera = PiCamera()
        rawCapture = PiRGBArray(camera)
        # keep looping

    setup_trackbars(range_filter)

    for piFrame in camera.capture_continuous(rawCapture,
                                             format="bgr",
                                             use_video_port=True):
        if args['webcam']:
            rawCapture.truncate(0)
            rawCapture.seek(0)
            image = piFrame.array
            image = imutils.resize(image, width=400)

            if range_filter == 'RGB':
                frame_to_thresh = image.copy()
            else:
                frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(
            range_filter)

        thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min),
                             (v1_max, v2_max, v3_max))

        if args['preview']:
            preview = cv2.bitwise_and(image, image, mask=thresh)
            cv2.imshow("Preview", preview)
        else:
            #cv2.imshow("Original", image)
            cv2.imshow("Thresh", thresh)

        if cv2.waitKey(1) & 0xFF is ord('q'):
            break
Exemplo n.º 17
0
class PiVideoStream:
    def __init__(self, resolution=(480, 320), framerate=5):
        # initialize the camera and stream
        self.camera = PiCamera()
        self.camera.resolution = resolution
        self.camera.vflip = True
        self.camera.hflip = True
        self.camera.framerate = framerate
        #self.camera.video_stabilization = True
        self.rawCapture = PiRGBArray(self.camera, size=resolution)
        self.stream = self.camera.capture_continuous(self.rawCapture,
                                                     format="bgr",
                                                     use_video_port=True)

        # initialize the frame and the variable used to indicate
        # if the thread should be stopped
        self.frame = None
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        Thread(target=self.update, args=()).start()
        return self

    def update(self):
        # keep looping until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.seek(0)
            self.rawCapture.truncate(0)

            #time.sleep(1/16)
            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return

    def read(self):
        # return the frame most recently read
        return self.frame

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
Exemplo n.º 18
0
 def capture(self):
     # capture frames from camera
     #for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
     # grab the raw NumPy array representing the image, then initialize the timestamp
     # and occupied/unoccupied text
     rawCapture = PiRGBArray(self.camera, size=(320, 240))
     #self.camera.capture(rawCapture, 'bgr')
     for foo in self.camera.capture_continuous(
             rawCapture, format="bgr"):  #the output is an RGB Array
         break
     #cv2.imshow('test1', rawCapture.array)
     image = rawCapture.array
     rawCapture.seek(0)
     rawCapture.truncate()
     return image
Exemplo n.º 19
0
    def create_PSNR_testfile(self,file_path):
        """
        这部分或许可以直接在服务端验证
        :param file_path:
        :return:
        """
        f_path=Path(file_path)
        if not f_path.parent.exists():
            f_path.parent.mkdir(parents=True)
        sampler=TestSampler()
        fp=open(file_path,'wb')
        with picamera.PiCamera() as camera:
            rawFrame = PiRGBArray(camera, (640, 480))
            camera.resolution = (640, 480)
            camera.framerate = 20
            shape=(640,480)
            time.sleep(2)
            self.start = time.time()
            cnt=0
            start=time.time()
            for frame in camera.capture_continuous(rawFrame, 'rgb', use_video_port=True):
                origin = np.array(frame.array, dtype=np.uint8)
                sample_list, scaled = sampler.sample_and_filter(origin)

                h_n = int(80 * np.ceil(origin.shape[0] / 80))
                w_n = int(80 * np.ceil(origin.shape[1] / 80))
                normed = np.zeros((h_n, w_n, 3), dtype=np.float32) / 255.
                normed[0:origin.shape[0], 0:origin.shape[1], :] = origin

                obj = {'origin': utils.encode_img(normed), 'scaled': utils.encode_img(scaled),'shape':shape}
                pickle.dump(obj, fp, -1)
                for sample in sample_list:
                    pickle.dump(sample,fp,-1)
                cnt = cnt + 1
                if cnt%10:
                    self.logger.info("sent {} frames in {} sec,fps:{}".format(cnt, time.time() - start,
                                                                                 cnt / (time.time() - start)))
                if cnt>200:
                    break
                self.logger.info(cnt)
                self.count = self.count + 1
                rawFrame.seek(0)
                rawFrame.truncate()
                if self.terminate:
                    self.logger.info('terminate')
                    break
        fp.close()
Exemplo n.º 20
0
class Camera(Thread):
    last_image = None

    def __init__(self, width=480, height=360):
        global instance
        super(Camera, self).__init__()
        instance = self
        self.exit_flag = False
        self.width = width
        self.height = height
        self.camera = PiCamera()
        # self.camera.resolution = (1024, 768)
        # self.camera.framerate = 30
        # self.camera.iso = 400
        # self.camera.exposure_mode = 'auto'
        # self.camera.awb_mode = 'tungsten'
        # self.camera.meter_mode = 'average'
        # self.camera.exposure_compensation = 3
        time.sleep(1)
        self.stream = PiRGBArray(self.camera, size=(self.width, self.height))

    @staticmethod
    def get_instance():
        global instance
        return instance

    def run(self):
        logging.info('Starting %s thread' % self.__class__)
        while not self.exit_flag:
            self.capture()
        logging.info('Closing %s thread' % self.__class__)

    def capture(self):
        self.stream.seek(0)
        self.camera.capture(self.stream, format='bgr', use_video_port=True, resize=(self.width, self.height))
        self.last_image = self.stream.array

    def cleanup(self):
        logging.info('Cleaning up')
        self.exit_flag = True
        time.sleep(1)
        self.camera.close()
        self.stream.close()
Exemplo n.º 21
0
    def run(self):
        print "Launching Ribbon Tracking Thread"
        global psiD
        global speed

        camera = picamera.PiCamera()
        photoHeight = 540
        image_size = (960 / 2, 544 / 2)  # (16*photoHeight/9, photoHeight)
        camera.resolution = image_size  # (960, 540)#(16*photoHeight/9, photoHeight)
        camera.framerate = 7
        camera.vflip = False
        camera.hflip = False
        # camera.exposure_mode='off'
        rawCapture = PiRGBArray(camera, size=image_size)
        # allow the camera to warmup
        time.sleep(0.1)
        ribbonFinder = findRibbon()

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            try:
                # grab the raw NumPy array representing the image, then initialize the timestamp
                # and occupied/unoccupied text
                image = frame.array
                ribbonFinder.setImage(image)
                cv2.imshow("RibbonFinder", ribbonFinder.findRib())
                key = cv2.waitKey(1) & 0xFF

                # clear the stream in preparation for the next frame
                rawCapture.truncate()
                rawCapture.seek(0)

                psiDLock.acquire()
                psiD = findRibbon.calcPsiOffset()
                psiDLock.release()

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break
            except:
                releaseAllLocks()
Exemplo n.º 22
0
    def _thread(cls):
        with picamera.PiCamera() as camera:
            # camera setup
            conf  = json.load(open("./templates/conf.json"))
	    camera.resolution   = tuple(conf["resolution_raw"])
	    camera.framerate    = conf["stream_fps"]	    
            camera.hflip 	= False
            camera.vflip 	= False
            stream 		= PiRGBArray(camera)

            # let camera warm up
            #camera.start_preview()
            time.sleep(conf["camera_warmup_time"])

            
 	    for foo in camera.capture_continuous(stream,format="bgr",use_video_port=True):
		cls.npframe = foo.array                
                
		# stackoverflow.com/question/31077366/				
		cimage = io.BytesIO() 		              
		img = Image.fromarray(np.uint8(cv2.cvtColor(cls.npframe,cv2.COLOR_BGR2RGB)))
		#img = img.resize((320,280), Image.ANTIALIAS)
		img  = img.resize(tuple(conf["resolution_strm"]),Image.ANTIALIAS)
		img.save(cimage,format='JPEG')
		
		# store frame
		cimage.seek(0)		
		cls.frame = cimage.read()                
		
                # reset stream for next frame
		stream.seek(0)
                stream.truncate()
		
		cimage.seek(0)
		cimage.truncate()
		# if there hasn't been any clients asking for frames in
                # the last 10 seconds stop the thread    
		if time.time() - cls.last_access > 100000:
		    LED_LR().Cleanup()
                    break

        cls.thread = None
Exemplo n.º 23
0
class Camera(threading.Thread):
    ''' Raspberry camera module python implementation '''

    __slots__ = '_camera', '_stream', '_frames', '_lanes', '_lane_detector', '_lock'

    def __init__(self, resolution=(640, 480), framerate=30, nb_frames=5):
        super(Camera, self).__init__()
        self._lock = threading.Lock()
        self._camera = PiCamera(framerate=framerate, resolution=resolution)
        self._stream = PiRGBArray(self._camera)
        self._lane_detector = LaneDetector()
        self._frames = deque(maxlen=nb_frames)
        self._lanes = deque(maxlen=nb_frames)
        self.start()

    def run(self):
        for frame in self._camera.capture_continuous(self._stream,
                                                     format='bgr',
                                                     use_video_port=True):
            # New frame in numpy format
            img = frame.array
            # Detect lanes in each frames
            # lanes = self._lane_detector.process(img)
            # Truncate and reset read position for next frames
            self._stream.truncate()
            self._stream.seek(0)

            # Store frames with thread lock
            with self._lock:
                self._frames.append(img)
                # self._lanes.append(lanes)

    def lanes(self):
        with self._lock:
            return list(self._lanes)

    def last_frame(self):
        with self._lock:
            if not self._frames:
                return None
            return self._frames[-1]
Exemplo n.º 24
0
class Camera(PiCamera):
    def __init__(self,
                 sonar=None,
                 clientAddr=None,
                 clientPort=5555,
                 resolution=(640, 480)):
        PiCamera.__init__(self)
        self.resolution = resolution
        self.mode = None
        self.sonar = sonar

        self.rawCapture = PiRGBArray(self)
        self.rawCapture.truncate(0)
        self.rawCapture.seek(0)

        # Start stream
        #self.stream()

    def stream(self, addr='192.168.0.169'):
        '''Send live stream'''

        # Now connect the camera socket
        context = zmq.Context()
        footageSocket = context.socket(zmq.PUB)
        footageSocket.connect('tcp://{}:5555'.format(addr))

        self.rawCapture.truncate(0)
        self.rawCapture.seek(0)

        #font = cv2.FONT_HERSHEY_SIMPLEX
        for frame in self.capture_continuous(output=self.rawCapture,
                                             format="bgr",
                                             use_video_port=True):

            image = frame.array

            # draw cross on image
            cv2.line(image, (300, 240), (340, 240), (128, 255, 128), 1)
            cv2.line(image, (320, 220), (320, 260), (128, 255, 128), 1)

            if self.mode == 'opencv':
                pass  #TODO: impliment open cv modes

            try:
                if self.sonar.distance < 8:
                    cv2.putText(image,
                                '%s m' % str(round(self.sonar.distance, 2)),
                                (40, 40), font, 0.5, (255, 255, 255), 1,
                                cv2.LINE_AA)
            except:
                pass

            # send image to client
            encoded, buffer = cv2.imencode('.jpg', image)
            jpg_as_text = base64.b64encode(buffer)
            footageSocket.send(jpg_as_text)

            self.rawCapture.truncate(0)
            self.rawCapture.seek(0)
Exemplo n.º 25
0
 def auto_frames():
     # adding path
     
     with picamera.PiCamera() as camera:
         rawCapture = PiRGBArray(camera, size=(640, 480))
         # let camera warm up
         camera.resolution = (640, 480)
         camera.hflip = True
         camera.vflip = True
         time.sleep(2)
         for frame in camera.capture_continuous(rawCapture, 'bgr',
                                              use_video_port=True):
             # return current frame
             #img = cv2.cvtColor(frame.array, cv2.COLOR_BGR2GRAY)
             img, cmd = process_image(frame.array)
               
             status, buf = cv2.imencode('.jpeg', img)
             yield (np.array(buf).tostring(), cmd)
             
             
             rawCapture.truncate()
             rawCapture.seek(0)
Exemplo n.º 26
0
def use_picamera():
    from picamera.array import PiRGBArray
    from picamera import PiCamera
    import time
    # Init the camera and grab a ref to the raw camera capture
    print("Starting Pi Camera...........")
    camera = PiCamera()
    #camera = cv2.VideoCapture(0)
    camera.resolution = (640, 480)
    #camera.framerate = 32
    rawCapture = PiRGBArray(camera, (640, 480))

    # Allow Camera to Warm up
    time.sleep(0.1)

    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array
        display_face(image)
        rawCapture.truncate(0)
        rawCapture.seek(0)
def main():
    lines = Lines()
    lines.look_ahead = 10
    lines.remove_pixels = 100
    lines.enlarge = 2.25

    image_size = (320, 192)
    camera = picamera.PiCamera()
    camera.resolution = image_size
    camera.framerate = 7
    camera.vflip = False
    camera.hflip = False
    #camera.exposure_mode='off'
    rawCapture = PiRGBArray(camera, size=image_size)

    # allow the camera to warmup
    time.sleep(0.1)

    # capture frames from the camera
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text
        image = frame.array

        # show the frame
        #lines.project_on_road_debug(image)
        cv2.imshow("Rpi lane detection", lines.project_on_road_debug(image))
        key = cv2.waitKey(1) & 0xFF

        # clear the stream in preparation for the next frame
        rawCapture.truncate()
        rawCapture.seek(0)

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
Exemplo n.º 28
0
class PiCameraVideoStream:
    def __init__(self):
        self.camera = PiCamera()
        self.camera.vflip = True
        self.camera.framerate = 32
        self.camera.resolution = (640, 400)
        self.rawCapture = PiRGBArray(self.camera, size=(640, 400))
        time.sleep(0.1)

    def getFrame(self):
        for frame in self.camera.capture_continuous(self.rawCapture,
                                                    format="bgr",
                                                    use_video_port=True):
            return frame.array
        return None

    def reset(self):
        self.rawCapture.truncate()
        self.rawCapture.seek(0)
        return

    def close(self):
        return
    def get_frame(self):
        with PiCamera() as camera:

            camera.resolution = (640, 480)
            camera.framerate = 32
            rawCapture = PiRGBArray(camera, size=(640, 480))

            face_cascade = cv2.CascadeClassifier(
                "/home/pi/Ras/opencv_data/haarcascade_frontalface_alt.xml")
            time.sleep(0.1)

            t_start = time.time()
            fps = 0

            for frame in camera.capture_continuous(rawCapture,
                                                   format="bgr",
                                                   use_video_port=True):

                img = frame.array

                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                faces = face_cascade.detectMultiScale(gray)
                for (x, y, w, h) in faces:
                    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255),
                                  1)

                fps = fps + 1
                sfps = fps / (time.time() - t_start)
                cv2.putText(img, "FPS:" + str(int(sfps)), (10, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)

                jpeg = cv2.imencode('.jpg', img)[1]

                rawCapture.seek(0)
                return jpeg.tobytes()

                rawCapture.truncate(0)
Exemplo n.º 30
0
 def start_capture_no_bolck(self,sock):
     self.srhandler=SRHandler(sock,self.sampler)
     self.srhandler.setDaemon(True)
     self.srhandler.start()#开启接收线程
     with picamera.PiCamera() as camera:
         rawFrame = PiRGBArray(camera, (640, 480))
         camera.resolution = (640, 480)
         camera.framerate = 15 #目前能够到达17fps左右的采集速度,推测此前瓶颈在于网络传输
         shape=(640,480)
         time.sleep(2)
         self.start = time.time()
         cnt=0
         start=self.start
         for frame in camera.capture_continuous(rawFrame, 'rgb', use_video_port=True):
             frame=np.array(frame.array, dtype=np.uint8)
             sample_list, scaled = self.sampler.sample_and_filter(frame)
             obj={'img':utils.encode_img(scaled),'shape':shape,'sample':False}
             network.send_obj(sock,obj)
             for sample in sample_list:
                 network.send_obj(sock, sample)
             #TODO:考虑还有没有优化的策略
             cnt = cnt + 1
             if cnt%10==0:
                 curr_time=time.time()
                 self.logger.info("sent {} frames in {} sec,fps:{}".format(cnt, curr_time - start,
                                                                              cnt / (curr_time - start)))
             if cnt>200:
                 self.logger.info('process end')
                 break
             #self.logger.info(cnt)
             self.count = self.count + 1
             rawFrame.seek(0)
             rawFrame.truncate()
             if self.terminate:
                 self.logger.info('terminate')
                 break
	def getCorners(self,tol_arg):
		lower_IR = np.array([0,0,50]) # lower bound of CV filter
		upper_IR = np.array([180,100,255]) # upper bound of CV filter
		rawCapture = PiRGBArray(self.cam_handle, size=self.cam_res) # capture to array
		try:
			# init test point arrays (last one on each array is for the avg)
			top_left = [(0,0),(0,0),(0,0),(0,0),(0,0)]
			top_right = [(0,0),(0,0),(0,0),(0,0),(0,0)]
			bottom_left = [(0,0),(0,0),(0,0),(0,0),(0,0)]
			bottom_right = [(0,0),(0,0),(0,0),(0,0),(0,0)]
			app = wx.App() # start the app to get the screen resolution
			screenSize = wx.DisplaySize() # get the screen size
			names = [('Top Left',(0,0)),('Top Right',(screenSize[0]-25,0)),('Bottom Left',(0,screenSize[1]-25)),('Bottom Right',(screenSize[0]-25,screenSize[1]-25))] # list of names of the corners
			i = 0 # iterator to step through corner names
			wx.MessageBox('Please click and hold the pen button'
						'\nat each of the 4 red boxes until they disappear.'
						'\n(Top Left, Top Right, Bottom Left, Bottom Right)','Calibrate')
			for curr_corner in [top_left,top_right,bottom_left,bottom_right]:
				aCalibFrame = calibFrame(None,names[i][1],'Red') # make the starting frame
				print "Go to ", names[i][0], ':' # prompt user to move 
				time.sleep(2) # wait 2 seconds to avoid picking up on the next point too early
				failcount = 0 # init the failcount
				for frame in self.cam_handle.capture_continuous(rawCapture, format = 'bgr', use_video_port=True):
					maxLoc = (0,0) # init maxLoc
					image = frame.array # get the array values for that frame
					mask = cv2.inRange(image, lower_IR, upper_IR) # mask according to lower/upper filter values
					maxLoc = cv2.minMaxLoc(mask)[3] # find max location of image
					cv2.waitKey(1) & 0xFF # needed for preview
										
					if maxLoc != (0,0): # if not a default value
						print "Found: ", maxLoc #DEBUG INFO PRINTOUT
						if curr_corner[0] == (0,0): # if this corner's array has yet to be initialized
							curr_corner[0] = maxLoc # store the max location
						else: # if the first array value is already set
							match = True # set match flag to be true
							next = 0 # init the next index to be filled
							for x in range(0,4):
								if curr_corner[x] != (0,0): # if dealing with an initialized coordinate
									if (maxLoc[0] not in range(curr_corner[x][0] - tol_arg, curr_corner[x][0] + tol_arg)) \
											or (maxLoc[1] not in range(curr_corner[x][1] - tol_arg, curr_corner[x][1] + tol_arg)):
										match = False # set flag to false if a point outside of the tolerance is found
										failcount += 1 # increment the fail count
								else:
									next = x # store the index of the next open slot
									break # move to the next item in the list
							if next == 0: # if we're at the end of the list
								aCalibFrame.Destroy() # remove existing frame
								# get average of the points
								avg_x = 0 # init the average x
								avg_y = 0 # init the average y
								for y in range(0,4):
									avg_x += curr_corner[y][0]# sum the x values
									avg_y += curr_corner[y][1]# sum the y values
								curr_corner[4] = ((avg_x/4),(avg_y/4)) # int divide by 4 to get average
								if i == 0:
									self.top_left_corner = ((avg_x/4),(avg_y/4)) # store the top left corner values
								elif i == 1:
									self.top_right_corner = ((avg_x/4),(avg_y/4)) # store the top right corner values
								elif i == 2:
									self.bottom_left_corner = ((avg_x/4),(avg_y/4)) # store the bottom left corner values
								elif i == 3:
									self.bottom_right_corner = ((avg_x/4),(avg_y/4)) # store the bottom right corner values
								i += 1 # increment the iterator
								rawCapture.seek(0) # return to the 0th byte
								rawCapture.truncate() # clear array for next frame
								break
							elif failcount >=20: # if at least 20 failed attempts
								wx.MessageBox('The calibration process was unsuccsessful.\nAn accurate reading could not be determined.'
												'\nPlease try again.','Calibration FAILED', wx.ICON_ERROR)
								rawCapture.seek(0) # return to the 0th byte
								rawCapture.truncate() # clear array for next frame
								return(False) # return with fail case
							else:
								if match: # if a value was read in within the tolerance allowed
									curr_corner[next] = maxLoc # set the max location as the next entry to be averaged
					rawCapture.seek(0) # return to the 0th byte
					rawCapture.truncate() # clear array for next frame
			print "Corners:\nTop Left = ", self.top_left_corner, "\nTop Right = ",self.top_right_corner \
				, "\nBottom Left = ",self.bottom_left_corner, "\nBottom Right = ",self.bottom_right_corner # DEBUG INFO PRINTOUT
			self.calibrate() # calibrate the screen to account for skew
		except KeyboardInterrupt: # used to quit the function
			pass
		return(True) # return with success case
Exemplo n.º 32
0
class Vision(object):

    def __init__(self, resolution):
        print "Vision object started..."
        self._seqno = 0
        self._log = Logger.Logger("Vision")
        #self._contourFinder = ContourFinder.ContourFinder()
        #self._faceDetector = FaceDetector.FaceDetector()
        self._cam = PiCamera()
        self._cam.resolution = resolution
        self._cam.framerate = 10
        self._rawCapture = PiRGBArray(self._cam, size=resolution)
        self._center = (resolution[0]/2, resolution[1]/2)
        #self._laserfinder = LaserFinder.LaserFinder()

        #TODO: check that streamer is running


    def initialize(self):
        self._lastframetime = time.time()
        self._imagegenerator = self._cam.capture_continuous(self._rawCapture, format="bgr", use_video_port=True)



    def update(self):
        self._log.info("Update started")
         #TODO: make threaded in exception catcher
        # https://picamera.readthedocs.org/en/release-1.10/recipes2.html#rapid-capture-and-processing

        frame = self._imagegenerator.next()
        self._rawCapture.truncate()
        self._rawCapture.seek(0)
        self._frame = frame.array

        if roverconfig["Vision"]["WriteRawImageToFile"]:
            cv2.imwrite("/home/pi/LegoRover/Imgs/camseq"+str(self._seqno)+".jpg",frame)
        #TODO: deliver found obstacles back to main-loop or sensor-module
        #self._contourFinder.update(self._frame)
        #self._faceDetector.update(frame)
        #self._laserfinder.update(frame)
        self._log.info("Update finnished")
        return self._frame

    def draw(self, frame):
        self._log.info("Draw started")
        framerate = 1/(time.time()-self._lastframetime)
        print "Vision framerate: " + str(framerate)
        self._lastframetime= time.time()
        #frame = self._contourFinder.draw(frame)
       # frame = self._faceDetector.draw(frame)
        #frame = self._laserfinder.draw(frame)

        #draw cross for center of image
        cv2.line(frame,(self._center[0]-20,self._center[1]),(self._center[0]+20, self._center[1]),(255,255,255),2)
        cv2.line(frame,(self._center[0],self._center[1]-20),(self._center[0],self._center[1]+20),(255,255,255),2)
        #cv2.line(frame, self._laserfinder._point, self._center,(0,255,0),2)
        cv2.putText(frame,"Streamer: " + roverconfig["Streamer"]["StreamerImage"] + " Current framerate: " + str(round(framerate, 2)), (5,20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)
        #Draw to streamer lib to 'publish'
        cv2.imwrite(roverconfig["Streamer"]["StreamerImage"],frame)
        if roverconfig["Vision"]["WriteCvImageToFile"]:
            cv2.imwrite("/home/pi/LegoRover/Imgs/cvseq"+str(self._seqno)+".jpg",frame)
        self._seqno = self._seqno+1 #Used globally but set here        #TODO: set up a defined (max) framerate from config
        self._log.info("Draw finnished")

    def __del__(self):
        print "Vision object deleted..."
        self._cam.close()
            xDeg, yDeg = objectAngle(camcar, bx, by, camW, camH)
            camcar.setXServoRotation(xDeg)
            camcar.setYServoRotation(yDeg)
            # Move the motors to the x angle
            ls, rs = getMotorSpeed(xDeg, -40)
            camcar.runMotor(CamCar.MOTOR_LEFT, ls)
            camcar.runMotor(CamCar.MOTOR_RIGHT, rs)
            # Set the mood to normal
            camcar.moodNormal()

        # AGGRESSIVE
        elif mode == MODE_AGGRSV:
            # Get the angle of the cup and rotate the servos to these angles
            camW, camH = camSize
            xDeg, yDeg = objectAngle(camcar, cx, cy, camW, camH)
            camcar.setXServoRotation(xDeg)
            camcar.setYServoRotation(yDeg)
            # Drive away (add 180 to the angle)
            ls, rs = getMotorSpeed(xDeg + 180, -60)
            camcar.runMotor(CamCar.MOTOR_LEFT, ls)
            camcar.runMotor(CamCar.MOTOR_RIGHT, rs)
            # Set the mood to angry
            camcar.moodAngry()



        # Clear the buffer and increase the counter
        rawCapture.truncate()
        rawCapture.seek(0)
        counter += 1
Exemplo n.º 34
0
    for contour in contours:
        area = cv2.contourArea(contour)
        if area > max_area:
            max_area = area
            best_contour = contour

    M = cv2.moments(best_contour)
    cx, cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])

    cv2.circle(blur, (cx, cy), 10, (0, 0, 255), -1)

    
    ###########

    #cv2.imshow("Frame", blur) #displays the image
    cv2.imshow("Frame - Threshold", thresh2)
    #cv2.imshow("frame", image) #shows contours
    cv2.startWindowThread() #has to be used in order to actually close the window
    key = cv2.waitKey(1) & 0xFF #has to be used in conjunction with imshow, to close window
    
    rawCapture.seek(0) #goes to the first image in the array
    rawCapture.truncate(0) #deletes previously seen image

    #closes the program when you hit q
    if key == ord("q"):
        cv2.destroyAllWindows()
        break

    #actual image stuff
    
Exemplo n.º 35
0
 def do_GET(self):
     global cameraQuality
     try:
         self.path=re.sub('[^.a-zA-Z0-9]', "",str(self.path))
         if self.path=="" or self.path==None or self.path[:1]==".":
             return
         if self.path.endswith(".html"):
             f = open(curdir + sep + self.path)
             self.send_response(200)
             self.send_header('Content-type',	'text/html')
             self.end_headers()
             self.wfile.write(f.read())
             f.close()
             return
         if self.path.endswith(".mjpeg"):                
             
             self.send_response(200)
             self.wfile.write("Content-Type: multipart/x-mixed-replace; boundary=--aaboundary")
             self.wfile.write("\r\n\r\n")
             self.end_headers()
             #stream=io.BytesIO()
             global camera
             rawCapture = PiRGBArray(camera, size=(640, 480))
             canvas = np.zeros((608, 1600, 3), dtype = "uint8")
             # capture frames from the camera
             try:
                 global cascade
                 nface = 0
                 for frame in camera.capture_continuous(rawCapture, format="bgr"): #, use_video_port=True):
                     # Reset the stream for the next capture
                     rawCapture.seek(0)
                     ##start=time.time()
                     # grab the raw NumPy array representing the image, then initialize the timestamp
                     # and occupied/unoccupied text
                     image = frame.array
                     ##print("Time to capture bgr+numpy = %.4f" % (time.time()-start))
                     #Convert to grayscale
                     gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
                     gray = cv.equalizeHist(gray)
                     #Look for faces in the image using the loaded cascade file
                     rects = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=3, minSize=(20, 20), flags = cv.CASCADE_SCALE_IMAGE)
                     if len(rects) == 0:
                         # clear the stream in preparation for the next frame
                         #rawCapture.truncate()
                         continue
                         rects = []
                     #else:
                     #      rects[:,2:] += rects[:,:2]
                     ##print("Time to Cascade = %.4f" % (time.time()-start))
                     print "Found "+str(len(rects))+" face(s)"
                     #Draw a rectangle around every found face
                     for (x,y,w,h) in rects:
                         #print "Found rect "+str((x,y,w,h))
                         (nx,ny,nw,nh) = scale_rect(x, y, w, h, 1.5)
                         #cv.rectangle(canvas,(nx,ny),(nx+nw,ny+nh),(255,0,0),2)
                         roi  = np.copy(image[ny:ny+nh,nx:nx+nw])
                         print "ROI size "+str(roi.shape)
                         # we need to keep in mind aspect ratio so the image does
                         # not look skewed or distorted -- therefore, we calculate
                         # the ratio of the new image to the old image
                         larg = 320
                         r = round (larg / roi.shape[1])
                         nr = roi.shape[0] * int(r)
                         dim = (larg, nr)
                         # perform the actual resizing of the image and show it
                         resized = cv.resize(roi, dim, interpolation = cv.INTER_AREA)
                         print "Resize "+str(resized.shape)
                         facepos = (nface * larg)
                         canvas[0:resized.shape[0], facepos:facepos + resized.shape[1]] = resized
                         nface += 1
                         if nface == 5:
                             nface = 0
                          
                     ret, cv2mat=cv.imencode(".jpeg",canvas) #,(cv.IMWRITE_JPEG_QUALITY,cameraQuality))
                     JpegData=bytearray(cv2mat)
                     ##print("Time to encode JPG = %.4f" % (time.time()-start))
                     self.wfile.write("--aaboundary\r\n")
                     self.send_header('Content-type','image/jpeg')
                     self.send_header('Content-length',len(JpegData))
                     self.end_headers()
                     self.wfile.write(JpegData)
                     self.wfile.write("\r\n\r\n\r\n")
                     # Reset the stream for the next capture
                     rawCapture.seek(0)
                     # clear the stream in preparation for the next frame
                     rawCapture.truncate()
                     #time.sleep(.5)
             except KeyboardInterrupt:
                 pass 
             return
         if self.path.endswith(".jpeg"):
             f = open(curdir + sep + self.path)
             self.send_response(200)
             self.send_header('Content-type','image/jpeg')
             self.end_headers()
             self.wfile.write(f.read())
             f.close()
             return
         return
     except IOError:
         self.send_error(404,'File Not Found: %s' % self.path)
Exemplo n.º 36
0
class Vision(object):

    def __init__(self):
        print "Vision object started..."
        self._seqno = 0
        #TODO: check that streamer is running


    def initialize(self):
        print "Vision initialised"
        print "Starting streamer..."

        print os.system('sudo mkdir /tmp/stream')
        print os.system('sudo LD_LIBRARY_PATH=/home/pi/mjpg-streamer/mjpg-streamer  /home/pi/mjpg-streamer/mjpg-streamer/mjpg_streamer -i "input_file.so -f /tmp/stream -n pic.jpg" -o "output_http.so -w /home/pi/mjpg-streamer/mjpg-streamer/www" &')

        print "CAM init..."

        resolution = deviceconfig["cam"]["res"]
        self._cam = picamera.PiCamera()
        self._cam.resolution = resolution
        self._center = (resolution[0]/2, resolution[1]/2)
        # TODO: Read accelerometer and adjust flipping depending om camera rotation
        self._cam.hflip = False
        self._cam.vflip = False
        #self._cam.framerate = deviceconfig["cam"]["framerate"]
        #print "Wait for the automatic gain control to settle"
        #time.sleep(2)
        #print "Setting cam fix values"
        # Now fix the values
        #self._cam.shutter_speed = self._cam.exposure_speed
        #self._cam.exposure_mode = 'off'
        #g = self._cam.awb_gains
        #self._cam.awb_mode = 'off'
        #self._cam.awb_gains = g
        print "Starting image-generator..."
        self._lastframetime = time.time()
        self._rawCapture = PiRGBArray(self._cam, size=resolution)
        self._imagegenerator = self._cam.capture_continuous(self._rawCapture, format="bgr", use_video_port=True)
        #self._contourFinder.initialize()
        frame =  self.update()
       # self._videow = cv2.VideoWriter(deviceconfig["Streamer"]["VideoFile"], cv2.cv.CV_FOURCC('P','I','M','1'), 20, resolution )
        return frame

    def update(self):
        #TODO: make threaded in exception catcher
        rawframe = self._imagegenerator.next()
        self._rawCapture.truncate()
        self._rawCapture.seek(0)
        frame = rawframe.array
        #self._contourFinder.update(frame)
        if deviceconfig["Vision"]["WriteFramesToSeparateFiles"]:
            cv2.imwrite("camseq"+str(self._seqno)+".jpg",frame)
        return frame

    def draw(self, frame, framerate=0, text = ""):
        #self._contourFinder.draw(frame)
        if deviceconfig["Vision"]["PrintFrameRate"] and framerate!=0:
            cv2.putText(frame, "Framerate: " + str(framerate), (10, 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2)
        if text != "":
            cv2.putText(frame, text , (5, self._cam.resolution[1]/2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),2)
        #Write to actual frame for MJPG streamer
        cv2.imwrite(deviceconfig["Streamer"]["StreamerImage"], frame)

        if deviceconfig["Vision"]["WriteFramesToSeparateFiles"]:
            #pickle.dump(self._contourFinder._cnts,open('cv2cnts' +str(self._seqno),'wb'))
            cv2.imwrite("cv2seq"+str(self._seqno)+".jpg",frame)
            self._seqno=self._seqno+1

    def setRotation(self, rot):
        if rot in [0,90,180,270]:
            self._cam.rotation = rot


    def __del__(self):
        print "Vision object deleted..."
        self._cam.close()
	def showVideo(self,click_allow_arg):
		lower_IR = np.array([0,0,50]) # lower bound of CV filter
		upper_IR = np.array([255,255,255]) # upper bound of CV filter
		rawCapture = PiRGBArray(self.cam_handle, size=self.cam_res) # capture to array
		m = PyMouse() # make mouse object
		prev_maxLoc = (0,0) # init previous maxLoc
		try:
			for frame in self.cam_handle.capture_continuous(rawCapture, format = 'bgr', use_video_port=True):
				image = frame.array # get the array values for that frame
				mask = cv2.inRange(image, lower_IR, upper_IR) # mask according to lower/upper filter values
				(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(mask) # find min/max values of image
				true_pnt = self.get_true_point(maxLoc)
				print true_pnt #DEBUG INFO PRINTOUT
				adjusted_maxx = int(((true_pnt[0]) * self.output_res[0])/(self.screen_res[0])) # take the distance from the edge and perform dimensional analysis
				adjusted_maxy = int(((true_pnt[1]) * self.output_res[1])/(self.screen_res[1])) # take the distance from the edge and perform dimensional analysis
				if click_allow_arg: # if user wants to have clicks when IR found
					if maxLoc != (0,0): # if not on the default location			
						m.press(adjusted_maxx,adjusted_maxy, 1) # press the "mouse" button at location found
					elif prev_maxLoc != (0,0) and maxLoc == (0,0): # if the current value is the default and the last value wasn't the default, release
						m.release(prev_maxLoc[0],prev_maxLoc[1], 1) # release the "mouse" button
				else: # only move the mouse, no clicking
					if maxLoc != (0,0): # if not on the default location
						m.move(adjusted_maxx,adjusted_maxy) # move the mouse
				prev_maxLoc = (adjusted_maxx,adjusted_maxy) # set previous to be current max loc
				cv2.circle(image, maxLoc, 21, (255,0,0), 2) # place circle on brightest spot
				cv2.imshow("TestWindow", cv2.resize(image,(160,120))) # show the image in a tiny window
				cv2.waitKey(1) & 0xFF # needed for preview
				rawCapture.seek(0) # return to the 0th byte
				rawCapture.truncate() # clear array for next frame
				print "Mouse at: ", m.position() #DEBUG INFO PRINTOUT
		except KeyboardInterrupt: # used to quit the function
			rawCapture.seek(0) # return to the 0th byte
			rawCapture.truncate() # clear array for next frame