Example #1
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use. ',
                        default=0)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    videostream = VideoStream().start()
    while True:
        frame = videostream.read()
        #if not ret:
        #    break
        cv2_im = frame

        cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
        pil_im = Image.fromarray(cv2_im_rgb)

        common.set_input(interpreter, pil_im)
        interpreter.invoke()
        objs = get_output(interpreter,
                          score_threshold=args.threshold,
                          top_k=args.top_k)
        cv2_im = append_objs_to_img(cv2_im, objs, labels)

        cv2.imshow('frame', cv2_im)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    videostream.stop()
    cv2.destroyAllWindows()
Example #2
0
    from videostream import VideoStream

    vision = VideoProcess(debug=True, goal='yellow', ball=True, obstacles=True)
    stream = VideoStream().start()
    time.sleep(2)

    try:
        while True:

            frame = stream.read()

            if frame is None:
                break

            # (obs1, obs2, obs3), goal, ball
            (obs), (ball), (goal), (wall) = vision.update(frame)

            cv2.imshow("scene", vision.get())
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break

    except KeyboardInterrupt:
        cv2.destroyAllWindows()
        stream.stop()

    finally:
        cv2.destroyAllWindows()
        stream.stop()
Example #3
0
    for symbol in image:
        # do something useful with results
        print 'decoded', symbol.type, 'symbol', '"%s"' % symbol.data
        #  print symbol.location

        try:
            frame = cv2.line(frame, symbol.location[0], symbol.location[1],
                             (255, 0, 0), 3)
            frame = cv2.line(frame, symbol.location[1], symbol.location[2],
                             (255, 0, 0), 3)
            frame = cv2.line(frame, symbol.location[2], symbol.location[3],
                             (255, 0, 0), 3)
            frame = cv2.line(frame, symbol.location[0], symbol.location[3],
                             (255, 0, 0), 3)
        except:
            print 'Not QR'

    # show the frame

    cv2.imshow("#Code Reader", frame)

    # Wait for the magic key
    keypress = cv2.waitKey(1) & 0xFF
    if keypress == ord('q'):
        break
    d_time = ticks - time.time()
    print 1 / d_time

cv2.destroyAllWindows()
vs.stop()
Example #4
0
class VideoCamera(object):
    INIT_TIME = 100
    FRAME_WIDTH = 640
    FRAME_HEIGHT = 480
    HISTORY = 25
    THRESHOLD = 18

    def __init__(self, usePiCamera=True):
        # initialize the camera and grab a reference to the raw camera capture
        self.vs = VideoStream(usePiCamera=usePiCamera,
                              src=0,
                              resolution=(self.FRAME_WIDTH, self.FRAME_HEIGHT),
                              framerate=12)

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=self.INIT_TIME,
                                                       varThreshold=12,
                                                       detectShadows=False)
        self.face_cascade = cv2.CascadeClassifier(
            'models/haarcascade_frontalface_default.xml')

        self.background1 = cv2.imread('backgrounds/background1.jpeg')
        self.background1 = cv2.resize(self.background1,
                                      (self.FRAME_WIDTH, self.FRAME_HEIGHT))

        self.hats = [
            cv2.imread('./hats/' + file, cv2.IMREAD_UNCHANGED)
            for file in os.listdir('./hats')
        ]

        self.stopped = False
        self.frame = None
        self.state = 0
        self.time = 0
        self.lastReady = 0
        self.lastCapture = 0
        self.history = np.zeros(self.HISTORY)

    def start(self):
        self.vs.start()
        # start the thread to process frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def stop(self):
        self.vs.stop()
        # indicate that the thread should be stopped
        self.stopped = True

    def update(self):
        # Keep looping infinitely until the thread is stopped
        while True:
            # If the thread indicator variable is set, stop the thread
            if self.stopped:
                return

            # Grab an image from the video stream
            frame = self.vs.read()

            # Wait until frames start to be available
            if frame is None:
                time.sleep(1)
                continue

            # Resize frame to fit working dimensions
            frame = cv2.resize(frame, (self.FRAME_WIDTH, self.FRAME_HEIGHT), 0,
                               0, cv2.INTER_CUBIC)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            fgmask = self.fgbg.apply(frame,
                                     learningRate=0 if self.state > 0 else -1)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel)
            bgmask = 255 - fgmask

            # State machine
            # S0: Training
            if self.state == 0:
                cv2.putText(img=fgmask,
                            text='Training Segmentation Algorithm (' +
                            str(int(100.0 * self.time / self.INIT_TIME)) +
                            ' %)',
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S1
                if self.time >= self.INIT_TIME:
                    self.state = 1

                # Show mask when training.
                self.frame = fgmask

            # S1: Ready to capture
            elif self.state == 1:
                # Detect and draw faces
                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
                for (x, y, w, h) in faces:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

                # Update history
                self.history[self.time % self.HISTORY] = min(len(faces), 1)

                self.frame = frame
                #self.frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                cv2.putText(self.frame,
                            text='Frame ' + str(self.time),
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S2 if faces present.
                if np.sum(
                        self.history
                ) >= self.THRESHOLD and self.time > self.lastCapture + self.HISTORY:
                    self.lastReady = self.time
                    self.state = 2
                # Transition to S0 if no faces and time has gone, reset all
                elif np.sum(
                        self.history
                ) < self.THRESHOLD and self.time > 4 * self.INIT_TIME:
                    self.time = 0
                    self.lastReady = 0
                    self.lastCapture = 0
                    self.state = 0

            # S2: Cont down
            elif self.state == 2:
                self.frame = frame

                cv2.putText(self.frame,
                            text=str(3 -
                                     int((self.time - self.lastReady) / 10)),
                            org=(int(self.FRAME_WIDTH / 2),
                                 int(self.FRAME_HEIGHT / 2)),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=3,
                            color=(255, 255, 255),
                            thickness=2)

                if self.time - self.lastReady >= 29:
                    self.state = 3

            # S3: Capture
            elif self.state == 3:
                #frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)

                for (x, y, w, h) in faces:
                    # Select a hat at fit it to face
                    hat = random.choice(self.hats)
                    x_factor = 1.45
                    y_offset = 0.15
                    hat = cv2.resize(
                        hat, (int(w * x_factor),
                              int(w * x_factor / hat.shape[1] * hat.shape[0])),
                        0, 0, cv2.INTER_CUBIC)
                    # Get fitted hat width/hight, and crop to bounding box.
                    h_h, h_w = hat.shape[:2]
                    h_x1 = max(int(x - (h_w - w) / 2), 0)
                    h_x2 = min(int(x + w + (h_w - w) / 2), self.FRAME_WIDTH)
                    h_y1 = max(int(y + h * y_offset - h_h), 0)
                    h_y2 = min(int(y + h * y_offset), self.FRAME_HEIGHT)
                    # Recalulate hat width/hight if cropped by bounding box.
                    h_w = h_x2 - h_x1
                    h_h = h_y2 - h_y1

                    # Blend hat to frame with alpha-channel.
                    for c in range(0, 3):
                        alpha = hat[-h_h:, :h_w, 3] / 255.0
                        color = hat[-h_h:, :h_w, c] * alpha
                        beta = frame[h_y1:h_y2, h_x1:h_x2, c] * (1.0 - alpha)
                        frame[h_y1:h_y2, h_x1:h_x2, c] = color + beta

                # Save the image to disk.
                filename = 'images/' + datetime.datetime.now().strftime(
                    "%Y%m%d-%H%M%S") + '.png'
                print('Saving image: ', filename)
                cv2.imwrite(filename, frame)

                self.frame = frame
                time.sleep(10)

                # Transition to S1
                self.lastCapture = self.time
                self.state = 1

            self.time = self.time + 1
            time.sleep(1 / 16)

    def get_frame(self):
        # Wait until frames start to be available
        while self.frame is None:
            time.sleep(0)

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', self.frame)
        return jpeg.tobytes()
Example #5
0
pt1y = 30

cv2.namedWindow("River Angle")

cv2.createTrackbar("Degrees", "River Angle", 90, 180, nothing)

while True:
    
    image = cap.read()
    
    ang = cv2.getTrackbarPos("Degrees", "River Angle")
    ang = ang - 90
    
    riv_ang = ang * (pi / 180)
    strrivang = "%d degrees" % round(ang)
    
    pt2x = round(pt1x+100*sin(riv_ang))
    pt2y = round(pt1y+100*cos(riv_ang))
    
    cv2.putText(image, strrivang, (pt1x-15, pt1y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0 , 0, 255), 2)
    cv2.arrowedLine(image, (pt1x, pt1y), (pt2x, pt2y), (0, 0, 255), 3)
    
    cv2.imshow("Frame", image)
    
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.stop()
cv2.destroyAllWindows()

np.savetxt("/home/pi/PartD_Project/Variables/river_angle.csv", np.array([ang]))
Example #6
0
class PeekabooController(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        self.running = False

        self.soundCtrlr = SoundController()

        self.initVideo()

        self.whereIsEveryoneFlag = False
        self.iSeeSomeoneFlag = False
        self.failure = False
        self.record = False

    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)

    #called by the thread
    def run(self):
        self._start()

    def toggleRecord(self):
        if(self.record == True):
            self.record = False
            print("video recording stopped")
        else:
            self.record = True
            print("video recording started")

    # start looking
    def _start(self):
        self.running = True

        zeros = None

        previousX = 0
        direction = "NONE"

        self.writer = None
        (h, w) = (None, None)

        # run until the controller is stopped
        while (True):
            # capture frames from the camera
            if(self.running == True):
                frame = self.video.read()
                if(frame is None):
                    print("ERROR: cannot read frame from video, stopping Peekaboo. If you want Peekaboo to work, connect camera and restart R2.py")
                    self.failure = True
                    self.stop()
                    break

                # resize the frame and convert it to grayscale
                frame = self.resizeImage(frame, width=500)

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # detect faces in the image and then clone the frame
                # so that we can draw on it
                faceRects = self.faceDetector.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
                frameClone = frame.copy()

                # where is everyone?
                if len(faceRects) <= 0:
                    cv2.putText(frameClone, "WHERE IS EVERYONE?", (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
                    self.whereIsEveryone()
                else:
                    # peekaboo!
                    # R2 is happy to see someone
                    self.iSeeSomeone()

                # loop over the face bounding boxes and draw them
                for (fX, fY, fW, fH) in faceRects:
                    cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 0, 0), 2)

                    # only turn head if face gets far out of center
                    if ((previousX - 10) < fX < (previousX + 10)):
                        direction = "NONE"
                    elif fX < (previousX + 10):
                        direction = "LEFT"
                    elif fX > (previousX - 10):
                        direction = "RIGHT"

                    # turn R2's head to keep face centered
                    # if direction == "LEFT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight - 10)
                    # elif direction == "RIGHT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight + 10)

                    cv2.putText(frameClone, "PEEKABOO!".format(direction), (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.55, (0, 255, 0), 2)

                    if direction != "NONE":
                        cv2.putText(frameClone, "<Turn {}>".format(direction), (fX, fY - 0), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.55, (0, 255, 0), 2)

                    previousX = fX

                # show our detected faces, then clear the frame in preparation for the next frame
                # NOTE: comment this out if you don't want the video stream window to show in terminal
                cv2.imshow("Face", frameClone)

                # write video to file
                if self.record == True:
                    if self.writer is None:
                        # store the image dimensions, initialize the video writer,
                        # and construct the zeros array
                        (h, w) = frameClone.shape[:2]
                        self.writer = cv2.VideoWriter("r2_recording.avi", self.fourcc, 4,
                                                      (w, h), True)
                    output = np.zeros((h, w, 3), dtype="uint8")
                    output[0:h, 0:w] = frameClone
                    self.writer.write(output)

                # NOTE: comment this out if you don't want the video stream window to show in terminal
                # if the 'q' key is pressed, stop the loop
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    print("keypress 'q', stopping Peekaboo")
                    self.stop()
                    break

    def resume(self):
        print("starting PeekabooController")
        if(self.failure == True):
            print("ERROR: the video had failed to load.  If you want Peekaboo to work, you will need to connect the camera and restart R2.py")
            return
        self.running = True

    def stop(self):
        print("stopping PeekabooController")
        self.running = False
        if self.writer is not None:
            self.writer.release()

    def stopVideo(self):
        if(self.video is not None):
            self.video.stop()

        try:
            cv2.destroyAllWindows()
        except:
            print("")

    def whereIsEveryone(self):
        if(self.whereIsEveryoneFlag == False):
            SoundController.worried(self.soundCtrlr)
            self.whereIsEveryoneFlag = True
            self.iSeeSomeoneFlag = False

    def iSeeSomeone(self):
        if(self.iSeeSomeoneFlag == False):
            SoundController.whistle(self.soundCtrlr)
            self.whereIsEveryoneFlag = False
            self.iSeeSomeoneFlag = True

    def resizeImage(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        # initialize the dimensions of the image to be resized and
        # grab the image size
        dim = None
        (h, w) = image.shape[:2]

        # if both the width and height are None, then return the
        # original image
        if width is None and height is None:
            return image

        # check to see if the width is None
        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)

        # otherwise, the height is None
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        # resize the image
        resized = cv2.resize(image, dim, interpolation=inter)

        # return the resized image
        return resized
Example #7
0
        time_appear_drone=0
    
    date =str(y)+str(mo)+str(d)+str(h)+str(mi)
    reimage = cv2.resize(capframe,(800,600)) 
   
    if distance < 0:
        distance=0
    
    text = "Number of drone is : {} ".format(len(num))
    # Draw framerate in corner of frame
    cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
    cv2.putText(frame, text, (30, 100), cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
    cv2.putText(frame,'Distance: {0:.1f}cm'.format(distance),(30,150),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
    cv2.putText(frame,'DAY: ' + str(y) +'Y'+str(mo)+'M'+str(d)+'D'+str(h)+'H'+str(mi)+'m',(30,650),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
     
    # All the results have been drawn on the frame, so it's time to display it.
    cv2.imshow('Object detector', frame)

    # Calculate framerate
    t2 = cv2.getTickCount()
    time1 = (t2-t1)/freq
    frame_rate_calc= 1/time1

    # Press 'q' to quit
    if cv2.waitKey(1) == ord('q'):
        break

# Clean up
cv2.destroyAllWindows()
videostream.stop()