Ejemplo n.º 1
0
def Phase_2_QR_Match():
    print("PHASE 2: QR MATCH")
    vs = PiVideoStream().start()                 
    time.sleep(2.0)
    while True:
        global MATCH_QR_CODE
        GPIO.output(4, True)

        frame = vs.read()
        qr_display(frame, decode(frame, qr_data_match_len)[0])
        if len(qr_data_match_len) > 0:
            qr_data_match = str(qr_data_match_len[len(qr_data_match_len) - 1])
            qr_data_match_split = qr_data_match.replace("'", '').replace('b', "").split(",")
            qr_data_replace = str(qr_data).replace("'", '').replace('b', "")
            for i in range(len(qr_data_match_split)):
                if qr_data_replace in qr_data_match_split[i]:
                    print("{0} is in {1}".format(qr_data_match_split[i], qr_data_replace))  # prints data match
                    MATCH_QR_CODE =True
                    break
                else:
                    print("{0} no match to {1} line follower".format(qr_data_match_split[i],qr_data_replace))  # prints data match
                    print("forward")
        if MATCH_QR_CODE == True:
            break
            
        cv2.imshow('frame', frame)
        cv2.waitKey(10)
    vs.stop()
    GPIO.output(4, False)
Ejemplo n.º 2
0
class QRDetector(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream(resolution=(400, 304), framerate=3).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        decoded_objs = self.decode(frame)
        # 認識したQRコードの位置を描画する
        frame = self.draw_positions(frame, decoded_objs)

        num = 0
        if len(decoded_objs) > 0:
            num = decoded_objs

        cv2.putText(frame, 'Num: {}'.format(num), (15, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255), 1)

        return frame

        detected = False
        if len(decoded_objs) > 0:
            detected = True

        cv2.putText(frame, 'Detected: {}'.format(detected), (15, 30),
                    cv2.FONT_HERSHEY_DUPLEX, 0.8, (0, 255, 255), 1)

        return frame

    def decode(self, frame):
        decoded_objs = pyzbar.decode(frame, scan_locations=True)
        for obj in decoded_objs:
            print(datetime.now().strftime('%H:%M:%S.%f'))
            print('Type: ', obj.type)
            print('Data: ', obj.data)

        return decoded_objs

    def draw_positions(self, frame, decoded_objs):
        for obj in decoded_objs:
            left, top, width, height = obj.rect
            frame = cv2.rectangle(frame, (left, top),
                                  (left + width, height + top), (0, 255, 0), 2)
            data = obj.data.decode('utf-8')

        return frame
Ejemplo n.º 3
0
def videoStream():
	##PiVideoStream starts camera capture continous in a seperate thread
	#Keeping call to I/O in seperate thread keeps our main loop uniterupted and acheives higher FPS
	encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
	vs = PiVideoStream().start()
	time.sleep(1.0)
	print("Capturing video...")
	#keep reading frames
	try:
		while True:
			frame = vs.read()
			#compress image - allows for faster live stream
			ret, frame = cv2.imencode('.jpg',frame, encode_param)

			#View stream on RPI HDMI
			#cv2.imshow("frame", frame)
			#cv2.waitKey(1) & 0xFF

			#Now serialize the data with pickle
			data = pickle.dumps(frame)
			#add msgLength to a struct- send message length first, then data
			message_size = struct.pack("L", len(data))
			#SEND msg length + DATA
			videoSocket.sendall(message_size + data)
	except:
		vs.stop()
		videoSocket.close()
		commSocket.close()
		exit()
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        self.hog = cv2.HOGDescriptor()
        self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
class VideoCamera(object):
    def __init__(self):
        self.vs = PiVideoStream().start()
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def get_frame(self):
        frame = self.vs.read()
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        found_objects = False
        frame = self.vs.read().copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects)
Ejemplo n.º 6
0
    def __init__(self, config={}):
        self.config = config
        self.flip = self.config['flip_cam']
        self.frame_width = self.config['res_x']
        self.frame_height = self.config['res_y']
        if not isLoadPiCam:
            self.cap = cv2.VideoCapture(0)
            # getting width and height from the capture device
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.frame_width)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.frame_height)
        else:
            self.cap = PiVideoStream(resolution=(
                self.frame_width, self.frame_height)).start()
        self.canRecord = True
        self.timeWithoutBody = 0
        self.startRecordingTime = 0
        self.pauseRecording = 0
        # self.fourcc = 0x00000021
        self.fourcc = cv2.VideoWriter_fourcc(*'avc1')

        self.out = cv2.VideoWriter()
        self.maxRecordingTime = self.config['max_record_time']
        self.classifier = cv2.CascadeClassifier(self.config['classifier'])
        self.isRecording = False
        self.currentFrame = None
Ejemplo n.º 7
0
class Camera(object):
    def __init__(self, calib_img, resolution, framerate):
        self.stream = PiVideoStream(resolution=resolution, framerate=framerate)
        calib = undistort(cv2.imread(calib_img))
        self.transform = birdseye_transform(calib)

    def start(self):
        self.stream.start()

    def stop(self):
        self.stream.stop()

    def get_line(self):
        frame = self.stream.read()
        test = undistort(frame)
        bird = birdseye(test, transform=self.transform)

        u = u_channel(bird)

        filtered = sobel(u)

        thresh = threshold(filtered)

        lines, line_coeff = detect_lines(thresh)
        # print("line:", p)
        # print("curvature: {}, slope: {}, offset: {}".format(*line_coeff))
        return line_coeff
Ejemplo n.º 8
0
 def __init__(self, flip=False):
     self.vs = PiVideoStream().start()
     self.vs.camera.rotation = 270
     self.flip = flip
     self.conf = json.load(open("conf.json"))
     self.client = boto3.resource('s3')
     time.sleep(2.0)
Ejemplo n.º 9
0
class QRDetector(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        pass

    def decode(self, frame):
        pass

    def draw(self, frame, decoded_objs):
        pass
Ejemplo n.º 10
0
def detect_face():
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    #cap = cv2.VideoCapture(0)
    cap = PiVideoStream().start()
    time.sleep(2.0)
    face_detected = False
    while (True):
        # Read the frame
        #_, img = cap.read()
        frame = cap.read()
        # Convert to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Detect the faces
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        # Draw the rectangle around each face
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 0), 2)
#            print("face detected")
            face_detected = True

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    if face_detected == True:
            return True
    return False
Ejemplo n.º 11
0
    def run(self):
        HOST = ''
        PORT = 7878
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.bind((HOST, PORT))
        sock.listen(1)
        vs = PiVideoStream().start()
        #time.sleep(2.0)

        while True:
            print('WAIT FOR PORT 7878')
            conn, addr = sock.accept()

            while True:
                try:
                    frame = vs.read()
                    ret, buff = cv2.imencode('.jpg', frame,
                                             [cv2.IMWRITE_JPEG_QUALITY, 100])
                    data = np.array(buff)
                    stringData = data.tostring()
                    b = binascii.hexlify(stringData)
                    conn.send(b)
                    conn.send(b'\n')
                except socket.error as msg:
                    break
Ejemplo n.º 12
0
    def __init__(self, flip = True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier('camera/processor/model/haarcascades/haarcascade_frontalface_default.xml')
Ejemplo n.º 13
0
    def createVideoStream(system, source, fps, resx, resy, begin):

        try:  # Camera as source
            source = int(source)

            if system == "Windows":
                vs = cv2.VideoCapture(source)
                vs.set(3,
                       resx)  # Set resolution x (3 = CV_CAP_PROP_FRAME_WIDTH)
                vs.set(4,
                       resy)  # Set resolution y (4 = CV_CAP_PROP_FRAME_HEIGHT)
                vs.set(5, fps)  # Set frame rate (5 = CV_CAP_PROP_FPS )

                cv2.ocl.setUseOpenCL(
                    False)  # Necessary to avoid background subtraction errors

            elif system == "Raspi":
                vs = PiVideoStream((resx, resy), fps).start()

        except ValueError:  # Video as source
            vs = cv2.VideoCapture(source)
            vs.set(
                0,
                begin)  # Set start of video in ms (0 = CV_CAP_PROP_POS_MSEC)

            if system == "Windows":
                cv2.ocl.setUseOpenCL(
                    False)  # Necessary to avoid background subtraction errors

        return vs
Ejemplo n.º 14
0
class Camera(object):
    def __init__(self):
        if cv2.__version__.startswith('2'):
            PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        elif cv2.__version__.startswith('3'):
            PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT

#        self.video = cv2.VideoCapture(0)
        self.video = PiVideoStream().start()

        #self.video = cv2.VideoCapture(1)
        #self.video.set(PROP_FRAME_WIDTH, 640)
        #self.video.set(PROP_FRAME_HEIGHT, 480)
#        self.video.set(PROP_FRAME_WIDTH, 320)
#        self.video.set(PROP_FRAME_HEIGHT, 240)
    
    def __del__(self):
        self.video.stop()
    
    def get_frame(self):
        while True:
            try:
                image = self.video.read()
                ret, jpeg = cv2.imencode('.jpg', image)
                return jpeg.tostring()
            except:
                continue
Ejemplo n.º 15
0
class PersonDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(640, 480)).start()
        self.flip = flip
        self.last_uploaded = datetime.now()
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        # PersonDetectorはFlipしない
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        persons = 0
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)
        net.setInput(blob)
        detections = net.forward()

        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            # 認識した物体の確からしさ
            if confidence < 0.5:
                continue

            idx = int(detections[0, 0, i, 1])
            # 15は人
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1)
            persons += 1

        if persons > 0:
            timestamp = datetime.now()
            if (timestamp - self.last_uploaded).seconds >= 30:
                cv2.imwrite("image.jpg", frame)
                print('Uploading...')
                upload(persons)
                self.last_uploaded = timestamp
                print('Finished.')

        return frame
Ejemplo n.º 16
0
 def __init__(self, flip=False):
     try:
         self.vs = PiVideoStream(resolution=(640, 480)).start()
     except:
         print(slf.vs)
     self.flip = flip
     time.sleep(2.0)
Ejemplo n.º 17
0
    def __init__(self,
                 model_path,
                 threshold=0.8,
                 resolution=(512, 352),
                 framerate=5,
                 head_line=80):
        """初期化処理"""
        # True: 立っている状態, False: かがんでいる状態
        self.is_standing = True
        # is_standing の判定に使うしきい値
        self.head_line = head_line

        # カメラへの接続
        self.camera = PiVideoStream(resolution=resolution, framerate=framerate)

        # どのくらいの尤度で人として検知するか
        self.threshold = threshold
        # TFLiteモデルの読み込み
        self.detector = tf.lite.Interpreter(model_path=model_path)
        # TFLiteの初期化
        self.detector.allocate_tensors()
        self.detector.set_num_threads(4)
        # モデルの入出力情報の取得
        self.input_details = self.detector.get_input_details()
        self.output_details = self.detector.get_output_details()
        self.input_height = self.input_details[0]['shape'][1]
        self.input_width = self.input_details[0]['shape'][2]
Ejemplo n.º 18
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.mydb = mysql.connector.connect(host="#################",
                                            user="******",
                                            passwd="#################",
                                            database="################")
        self.carsarray = []
        self.vs = PiVideoStream().start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(15, 15),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        numofcars = 0
        for (x, y, w, h) in objects:
            numofcars = numofcars + 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            #print (numofcars)
        print("Analyzing...")
        self.carsarray.append(numofcars)
        cv2.imshow('video', frame)
        cv2.waitKey(50)
        if len(self.carsarray) > 9:
            avginimage = math.ceil(sum(self.carsarray) / 10)
            print(avginimage)
            mycursor = self.mydb.cursor()
            #sql = "UPDATE cars SET Cars =%s%s"
            #val = (" ", avginimage)
            mycursor.execute("UPDATE cars SET Cars = %s" % (avginimage))
            self.mydb.commit()
            self.carsarray = []
        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects)
Ejemplo n.º 19
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream().start() self.flip = fliptime.sleep(2.0)

    def __del__(self): self.vs.stop()

    def flip_if_needed(self, frame): if self.flip:
        return np.flip(frame, 0) return frame
class PersonDetector(object):
    def __init__(self, flip=True):
        self.last_upload = time.time()
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        frame = imutils.resize(frame, width=300)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 0)
        net.setInput(blob)
        detections = net.forward()

        count = 0
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            if confidence < 0.2:
                continue

            idx = int(detections[0, 0, i, 1])
            if idx != 15:
                continue

            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')
            label = '{}: {:.2f}%'.format('Person', confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1)
            count += 1

        if count > 0:
            print('Count: {}'.format(count))
            elapsed = time.time() - self.last_upload
            if elapsed > 60:
                cv2.imwrite('hello.jpg', frame)
                upload()
                self.last_upload = time.time()

        return frame
class FaceDetector(object):
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        # opencvの顔分類器(CascadeClassifier)をインスタンス化する
        self.face_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_frontalface_default.xml'
        )
        self.eye_cascade = cv2.CascadeClassifier(
            'camera/processor/model/haarcascades/haarcascade_eye.xml')

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        frame = self.process_image(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def process_image(self, frame):
        # opencvでframe(カラー画像)をグレースケールに変換
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 上記でグレースケールに変換したものをインスタンス化した顔分類器の
        # detectMultiScaleメソッドで処理し、認識した顔の座標情報を取得する
        faces = self.face_cascade.detectMultiScale(gray, 1.3, 3)

        # 文字列を作成する
        iSize = len(faces)
        # iRow = frame.shape[0]
        # iCol = frame.shape[1]
        font = cv2.FONT_HERSHEY_SIMPLEX

        # 取得した座標情報を元に、cv2.rectangleを使ってframe上に
        # 顔の位置を描画する
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (128, 128, 0), 2)
            face = frame[y:y + h, x:x + w]
            face_gray = gray[y:y + h, x:x + w]
            eyes = self.eye_cascade.detectMultiScale(face_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0),
                              2)

        # 文字列を書く
        cv2.putText(frame, str(iSize), (10, 100), font, 4, (128, 0, 255), 3,
                    cv2.LINE_AA)
        # frameを戻り値として返す
        return frame
    def __init__(self, flip = True):
        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip
        time.sleep(2.0)

        self.argvs = sys.argv
        self.face_cascade = cv2.CascadeClassifier('camera/processor/model/haarcascades/haarcascade_frontalface_default.xml')
        if len(self.argvs) >= 2 and "eyes" in self.argvs:
            self.eye_cascade = cv2.CascadeClassifier('camera/processor/model/haarcascades/haarcascade_eye.xml')
 def __init__(self, flip=True):
     self.vs = PiVideoStream(resolution=(800, 608)).start()
     self.flip = flip
     time.sleep(2.0)
     self.firstFrame = None
     self.move_time = time.time()
     self.movelist = []
     self.enter = 0
     self.leave = 0
Ejemplo n.º 24
0
 def __init__(self, flip=False):
     self.mydb = mysql.connector.connect(host="#################",
                                         user="******",
                                         passwd="#################",
                                         database="################")
     self.carsarray = []
     self.vs = PiVideoStream().start()
     self.flip = flip
     time.sleep(2.0)
Ejemplo n.º 25
0
    def __init__(self):
        if cv2.__version__.startswith('2'):
            PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        elif cv2.__version__.startswith('3'):
            PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT

        self.video = PiVideoStream().start()
Ejemplo n.º 26
0
    def __init__(self, flip=False):
        #self.camera = PiCamera()
        #self.camera.resolution = (640, 480)
        #self.camera.framerate = 32
        #self.rawCapture = PiRGBArray(self.camera, size=(640, 480))

        self.vs = PiVideoStream(resolution=(800, 608)).start()
        self.flip = flip

        time.sleep(2.0)
Ejemplo n.º 27
0
 def __init__(self):
     PiVideoStream.__init__(self, (640, 480))
     # camera.vflip = True
     camera.hflip = True
     camera.awb_mode = 'off'
     # camera.image_effect = 'solarize'
     # camera.image_effect_params = 0
     camera.brightness = 75
     camera.awb_gains = (1.45, 1.45)
     camera.exposure_mode = 'off'
Ejemplo n.º 28
0
    def __init__(self, flip=True):
        self.vs = PiVideoStream((640, 480)).start()
        self.vs.camera.vflip = True
        #        self.vs.camera.framerate = 30
        #self.vs.camera.exposure_mode = 'night'
        #self.vs.camera.vflip = True
        self.flip = flip
        self.vs.camera.start_preview()

        time.sleep(2.0)
Ejemplo n.º 29
0
class VideoCamera(object):
    def __init__(self, flip=False):
        self.vs = PiVideoStream().start()
        self.vs.camera.rotation = 270
        self.flip = flip
        self.conf = json.load(open("conf.json"))
        self.vs.framerate = self.conf["fps"]
        self.vs.resolution = self.conf["resolution"]
        self.client = boto3.resource('s3')
        time.sleep(2.0)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self):
        frame = self.flip_if_needed(self.vs.read())
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):
        timestamp = datetime.datetime.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S.%f%p")
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(gray,
                                              scaleFactor=1.05,
                                              minNeighbors=5,
                                              minSize=(60, 60),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            #t = TempImage()
            #cv2.imwrite(t.path, frame[y:y+h,x:x+w])
            #self.client.meta.client.upload_file(t.path, self.conf["s3bucket_faces"], t.key )
            #t.cleanup()

        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        #cv2.imshow("feed",frame)
        #cv2.waitKey(1000)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return (jpeg.tobytes(), found_objects, frame)
Ejemplo n.º 30
0
class VideoCamera(object):
    def __init__(self, flip = False):
        self.vs = PiVideoStream(resolution=(480 , 320),framerate=32).start()  #resolution and framerate
        self.flip = flip
        time.sleep(0.1)

    def __del__(self):
        self.vs.stop()

    def flip_if_needed(self, frame):
        if self.flip:
            return np.flip(frame, 0)
        return frame

    def get_frame(self, classifier):
        frame = self.flip_if_needed(self.vs.read()).copy() 
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(
            gray,
            scaleFactor=1.5,
            minNeighbors=4,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()

    def get_object(self, classifier):        
        found_objects = False
        frame = self.flip_if_needed(self.vs.read()).copy() 
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        objects = classifier.detectMultiScale(
            gray,
            scaleFactor=1.5,
            minNeighbors=4,
            minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

        if len(objects) > 0:
            found_objects = True

        # Draw a rectangle around the objects
        for (x, y, w, h) in objects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', frame)
        #print('called = ', found_objects )
        return (jpeg.tobytes(), found_objects,frame)
Ejemplo n.º 31
0
def run_main():
	# created a "threaded" video stream, allow the camera sensor to warmup
	vs = PiVideoStream((640,480)).start()
	time.sleep(2.0)
	
	# read the first frame of the video
	frame = vs.read()
	
	# Set the ROI
	c, r, w, h=200, 100, 70, 70
	track_window=(c, r, w, h)
	
	# Create mask and nomralized histogram
	roi = frame[r:r+h, c:c+w]
	hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
	mask = cv2.inRange(hsv_roi, np.array((0., 30., 32.)), np.array((180.,255.,255.)))
	roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
	cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
	
	# Setup the termination criteria, either 80 iteration or move by atleast 1pt
	term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
	
	while True:
		frame = vs.read()
	
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
		dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
		
		# apply meanshift to get the new location
		ret, track_window = cv2.meanShift(dst, track_window, term_crit)
		
		# apply camshift to get the new location
		#ret, track_window = cv2.CamShift(dst, track_window, term_crit)

		# draw it on image
		x, y, w, h = track_window
		cv2.rectangle(frame, (x, y), (x+w, y+h), 255, 2)
		cv2.putText(frame, 'Tracked M', (x-25, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
		
		cv2.imshow('Tracking', frame)
		
		# if the 'q' key is pressed, break from the loop
		key = cv2.waitKey(1) & 0xFF
		if key == ord("q"):
			break
			
	vs.stop()
	cv2.destroyAllWindows()
Ejemplo n.º 32
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.running = True
     self.fps_counter = FPS().start()
     self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
     self.camera.start()
     time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
     self.frame = None
     self.frame_chroma = None
     self.centre = config.CENTRE
     self.can_see_lines = False
     self.last_yellow_mean = config.WIDTH * 0.8
     self.last_blue_mean = config.WIDTH * 0.2
     self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
     self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest
        return self.data

if __name__ == '__main__':
    import sys

    print(__doc__)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    '''camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCap= PiRGBArray(camera, size = (640, 480))'''
    cap =  PiVideoStream().start()


    def process_frame(frame, t0):
        # some intensive computation...
        frame = cv2.medianBlur(frame, 19)
        # frame = cv2.medianBlur(frame, 19)
        return frame, t0

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
 
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
 
# do a bit of cleanup
cv2.destroyAllWindows()
stream.close()
rawCapture.close()
camera.close()

# created a *threaded *video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from `picamera` module...")
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
 
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"]:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
 
    # check to see if the frame should be displayed to our screen
    if args["display"] > 0:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
 
Ejemplo n.º 35
0
from imutils.video.pivideostream import PiVideoStream
from imutils.video import FPS # Pour mesurer les FPS
import cv2  # Librairie OpenCV
import time # Fonctions temporelles

# Definition des seuils HSV pour le vert
greenLower = (85, 126, 60)
greenUpper = (95, 255, 255)
# Creation de la fenetre d'affichage
cv2.namedWindow('Tracking', cv2.WINDOW_NORMAL)
# Demarage du flux video sur un thread different
vs = PiVideoStream()
vs.camera.video_stabilization = True
vs.start()
# Laisse le temps de chauffer a la camera
time.sleep(1.0)
# Demarre le compteur de FPS
fps = FPS().start()
# Boucle principale 
while True :
    frame = vs.read()
    #frame = imutils.resize(frame, width = 400)
    # Convertis la frame en HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # Construis un masque pour la couleur définie
    mask = cv2.inRange(hsv, greenLower, greenUpper)
    # Effectue 2 passages d'erosion pour retirer les petits éléments
    mask = cv2.erode(mask, None, iterations = 2)  
    # Effectue 2 passages de dilatation pour conserver la taille
    mask = cv2.dilate(mask, None, iterations = 2)
    # Detecte les contours dans le masque 
Ejemplo n.º 36
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW, config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma, config.YELLOW_CHROMA_LOW, config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)
            
            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES, config.HOUGH_ROT_RES, config.HOUGH_VOTES, config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2-y1, x2-x1))
                    if config.MIN_LINE_ANGLE < abs(angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1,y1), (x2,y2), (0,0,255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20), 10, (0,0,255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX, config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)    
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b,g,r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
	flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"])
	print( "[INFO] Authorize this application: {}".format(flow.start()))
	authCode = input("Enter auth code here: ").strip()

	# finish the authorization and grab the Dropbox client
	(accessToken, userID) = flow.finish(authCode)
	client = DropboxClient(accessToken)
	print( "[SUCCESS] dropbox account linked")

# initialize the camera and grab a reference to the raw camera capture
#camera = PiCamera()
#camera.rotation = conf["rotation"]
#camera.resolution = tuple(conf["resolution"])
#camera.framerate = conf["fps"]
#rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))
vs = PiVideoStream((320, 240), 30, conf["rotation"]).start()

# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print( "[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0

cv2.namedWindow("Security Feed")
cv2.namedWindow("ctrl", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Security Feed",moveMask)
cv2.createTrackbar('1:Exit app',"ctrl",0,1,quit)
cv2.createTrackbar('Mask size',"ctrl",maskw,255,setMaskSize)
cv2.createTrackbar('0:Off\n1:On',"ctrl",0,1,startDetect)