Esempio n. 1
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use. ',
                        default=0)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    videostream = VideoStream().start()
    while True:
        frame = videostream.read()
        #if not ret:
        #    break
        cv2_im = frame

        cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
        pil_im = Image.fromarray(cv2_im_rgb)

        common.set_input(interpreter, pil_im)
        interpreter.invoke()
        objs = get_output(interpreter,
                          score_threshold=args.threshold,
                          top_k=args.top_k)
        cv2_im = append_objs_to_img(cv2_im, objs, labels)

        cv2.imshow('frame', cv2_im)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    videostream.stop()
    cv2.destroyAllWindows()
Esempio n. 2
0
            return [None, None]
        return [None, None]


if __name__ == '__main__':

    from videostream import VideoStream

    vision = VideoProcess(debug=True, goal='yellow', ball=True, obstacles=True)
    stream = VideoStream().start()
    time.sleep(2)

    try:
        while True:

            frame = stream.read()

            if frame is None:
                break

            # (obs1, obs2, obs3), goal, ball
            (obs), (ball), (goal), (wall) = vision.update(frame)

            cv2.imshow("scene", vision.get())
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break

    except KeyboardInterrupt:
        cv2.destroyAllWindows()
Esempio n. 3
0
import imagezmq
import argparse
import socket
import time

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
    "-s",
    "--server-ip",
    required=True,
    help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(
    connect_to="tcp://{}:5555".format(args["server_ip"]))

# get the host name, initialize the video stream, and allow the
# camera sensor to warmup
rpiName = socket.gethostname()
vs = VideoStream(usePiCamera=True).start()
#vs = VideoStream(usePiCamera=True, resolution=(320, 240)).start()
#vs = VideoStream(src=0).start() # Used for a usb camera
time.sleep(2.0)

while True:
    # read the frame from the camera and send it to the server
    frame = vs.read()
    #frame = imutils.resize(frame, width=320)  between Lines 28 and 29 to resize # Used for resizing frames.
    sender.send_image(rpiName, frame)
Esempio n. 4
0
class VideoCamera(object):
    INIT_TIME = 100
    FRAME_WIDTH = 640
    FRAME_HEIGHT = 480
    HISTORY = 25
    THRESHOLD = 18

    def __init__(self, usePiCamera=True):
        # initialize the camera and grab a reference to the raw camera capture
        self.vs = VideoStream(usePiCamera=usePiCamera,
                              src=0,
                              resolution=(self.FRAME_WIDTH, self.FRAME_HEIGHT),
                              framerate=12)

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=self.INIT_TIME,
                                                       varThreshold=12,
                                                       detectShadows=False)
        self.face_cascade = cv2.CascadeClassifier(
            'models/haarcascade_frontalface_default.xml')

        self.background1 = cv2.imread('backgrounds/background1.jpeg')
        self.background1 = cv2.resize(self.background1,
                                      (self.FRAME_WIDTH, self.FRAME_HEIGHT))

        self.hats = [
            cv2.imread('./hats/' + file, cv2.IMREAD_UNCHANGED)
            for file in os.listdir('./hats')
        ]

        self.stopped = False
        self.frame = None
        self.state = 0
        self.time = 0
        self.lastReady = 0
        self.lastCapture = 0
        self.history = np.zeros(self.HISTORY)

    def start(self):
        self.vs.start()
        # start the thread to process frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def stop(self):
        self.vs.stop()
        # indicate that the thread should be stopped
        self.stopped = True

    def update(self):
        # Keep looping infinitely until the thread is stopped
        while True:
            # If the thread indicator variable is set, stop the thread
            if self.stopped:
                return

            # Grab an image from the video stream
            frame = self.vs.read()

            # Wait until frames start to be available
            if frame is None:
                time.sleep(1)
                continue

            # Resize frame to fit working dimensions
            frame = cv2.resize(frame, (self.FRAME_WIDTH, self.FRAME_HEIGHT), 0,
                               0, cv2.INTER_CUBIC)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            fgmask = self.fgbg.apply(frame,
                                     learningRate=0 if self.state > 0 else -1)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel)
            bgmask = 255 - fgmask

            # State machine
            # S0: Training
            if self.state == 0:
                cv2.putText(img=fgmask,
                            text='Training Segmentation Algorithm (' +
                            str(int(100.0 * self.time / self.INIT_TIME)) +
                            ' %)',
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S1
                if self.time >= self.INIT_TIME:
                    self.state = 1

                # Show mask when training.
                self.frame = fgmask

            # S1: Ready to capture
            elif self.state == 1:
                # Detect and draw faces
                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
                for (x, y, w, h) in faces:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

                # Update history
                self.history[self.time % self.HISTORY] = min(len(faces), 1)

                self.frame = frame
                #self.frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                cv2.putText(self.frame,
                            text='Frame ' + str(self.time),
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S2 if faces present.
                if np.sum(
                        self.history
                ) >= self.THRESHOLD and self.time > self.lastCapture + self.HISTORY:
                    self.lastReady = self.time
                    self.state = 2
                # Transition to S0 if no faces and time has gone, reset all
                elif np.sum(
                        self.history
                ) < self.THRESHOLD and self.time > 4 * self.INIT_TIME:
                    self.time = 0
                    self.lastReady = 0
                    self.lastCapture = 0
                    self.state = 0

            # S2: Cont down
            elif self.state == 2:
                self.frame = frame

                cv2.putText(self.frame,
                            text=str(3 -
                                     int((self.time - self.lastReady) / 10)),
                            org=(int(self.FRAME_WIDTH / 2),
                                 int(self.FRAME_HEIGHT / 2)),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=3,
                            color=(255, 255, 255),
                            thickness=2)

                if self.time - self.lastReady >= 29:
                    self.state = 3

            # S3: Capture
            elif self.state == 3:
                #frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)

                for (x, y, w, h) in faces:
                    # Select a hat at fit it to face
                    hat = random.choice(self.hats)
                    x_factor = 1.45
                    y_offset = 0.15
                    hat = cv2.resize(
                        hat, (int(w * x_factor),
                              int(w * x_factor / hat.shape[1] * hat.shape[0])),
                        0, 0, cv2.INTER_CUBIC)
                    # Get fitted hat width/hight, and crop to bounding box.
                    h_h, h_w = hat.shape[:2]
                    h_x1 = max(int(x - (h_w - w) / 2), 0)
                    h_x2 = min(int(x + w + (h_w - w) / 2), self.FRAME_WIDTH)
                    h_y1 = max(int(y + h * y_offset - h_h), 0)
                    h_y2 = min(int(y + h * y_offset), self.FRAME_HEIGHT)
                    # Recalulate hat width/hight if cropped by bounding box.
                    h_w = h_x2 - h_x1
                    h_h = h_y2 - h_y1

                    # Blend hat to frame with alpha-channel.
                    for c in range(0, 3):
                        alpha = hat[-h_h:, :h_w, 3] / 255.0
                        color = hat[-h_h:, :h_w, c] * alpha
                        beta = frame[h_y1:h_y2, h_x1:h_x2, c] * (1.0 - alpha)
                        frame[h_y1:h_y2, h_x1:h_x2, c] = color + beta

                # Save the image to disk.
                filename = 'images/' + datetime.datetime.now().strftime(
                    "%Y%m%d-%H%M%S") + '.png'
                print('Saving image: ', filename)
                cv2.imwrite(filename, frame)

                self.frame = frame
                time.sleep(10)

                # Transition to S1
                self.lastCapture = self.time
                self.state = 1

            self.time = self.time + 1
            time.sleep(1 / 16)

    def get_frame(self):
        # Wait until frames start to be available
        while self.frame is None:
            time.sleep(0)

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', self.frame)
        return jpeg.tobytes()
Esempio n. 5
0
if __name__ == '__main__':

    from videostream import VideoStream
    from videostream import FPS

    fuente = ['../installationFiles/mySquare.mp4', 0]

    # Create  BG object and get source input
    bg = CreateBGCNT()
    vs = VideoStream(src=fuente[0], resolution=(640, 480)).start()  # 0.5 pmx

    fps = FPS().start()

    while True:
        frame, frame_resized = vs.read()
        # Feed frames
        bg.alimentar(frame_resized)

        # You want the matches?
        #print(bg.matches)

        # Want to see?, put the next two lines
        bg.draw()
        cv2.imshow('frame', frame_resized)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        fps.update()

    # stop the timer and display FPS information
Esempio n. 6
0
dnum = 0
time_appear_drone = 0
boxthickness = 3
linethickness = 2
# Initialize video stream
#videostream = VideoStream(resolution=(800,600),framerate=30).start()

rectangule_color = (10, 255, 0)

sender = imagezmq.ImageSender(connect_to='tcp://192.168.0.44:5555')
print("send")
rpi_name = socket.gethostname()  # send RPi hostname with each image
print("name")
#picam = VideoStream(usePiCamera=False).start()
picam = VideoStream(resolution=(800, 600), framerate=30).start()
print(picam.read().shape)
print("picam")
time.sleep(2.0)  # allow camera sensor to warm up
print("sleep")

while True:  # send images as stream until Ctrl-C
    # Start timer (for calculating frame rate)
    t1 = cv2.getTickCount()
    frame1 = picam.read()
    # Acquire frame and resize to expected shape [1xHxWx3]
    frame = frame1.copy()
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize(frame_rgb, (width, height))
    input_data = np.expand_dims(frame_resized, axis=0)

    # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
Esempio n. 7
0
time.sleep(0.1)

pi = math.pi
cos = math.cos
sin = math.sin

pt1x = 860
pt1y = 30

cv2.namedWindow("River Angle")

cv2.createTrackbar("Degrees", "River Angle", 90, 180, nothing)

while True:
    
    image = cap.read()
    
    ang = cv2.getTrackbarPos("Degrees", "River Angle")
    ang = ang - 90
    
    riv_ang = ang * (pi / 180)
    strrivang = "%d degrees" % round(ang)
    
    pt2x = round(pt1x+100*sin(riv_ang))
    pt2y = round(pt1y+100*cos(riv_ang))
    
    cv2.putText(image, strrivang, (pt1x-15, pt1y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0 , 0, 255), 2)
    cv2.arrowedLine(image, (pt1x, pt1y), (pt2x, pt2y), (0, 0, 255), 3)
    
    cv2.imshow("Frame", image)
    
Esempio n. 8
0
class PeekabooController(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        self.running = False

        self.soundCtrlr = SoundController()

        self.initVideo()

        self.whereIsEveryoneFlag = False
        self.iSeeSomeoneFlag = False
        self.failure = False
        self.record = False

    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)

    #called by the thread
    def run(self):
        self._start()

    def toggleRecord(self):
        if(self.record == True):
            self.record = False
            print("video recording stopped")
        else:
            self.record = True
            print("video recording started")

    # start looking
    def _start(self):
        self.running = True

        zeros = None

        previousX = 0
        direction = "NONE"

        self.writer = None
        (h, w) = (None, None)

        # run until the controller is stopped
        while (True):
            # capture frames from the camera
            if(self.running == True):
                frame = self.video.read()
                if(frame is None):
                    print("ERROR: cannot read frame from video, stopping Peekaboo. If you want Peekaboo to work, connect camera and restart R2.py")
                    self.failure = True
                    self.stop()
                    break

                # resize the frame and convert it to grayscale
                frame = self.resizeImage(frame, width=500)

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # detect faces in the image and then clone the frame
                # so that we can draw on it
                faceRects = self.faceDetector.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
                frameClone = frame.copy()

                # where is everyone?
                if len(faceRects) <= 0:
                    cv2.putText(frameClone, "WHERE IS EVERYONE?", (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
                    self.whereIsEveryone()
                else:
                    # peekaboo!
                    # R2 is happy to see someone
                    self.iSeeSomeone()

                # loop over the face bounding boxes and draw them
                for (fX, fY, fW, fH) in faceRects:
                    cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 0, 0), 2)

                    # only turn head if face gets far out of center
                    if ((previousX - 10) < fX < (previousX + 10)):
                        direction = "NONE"
                    elif fX < (previousX + 10):
                        direction = "LEFT"
                    elif fX > (previousX - 10):
                        direction = "RIGHT"

                    # turn R2's head to keep face centered
                    # if direction == "LEFT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight - 10)
                    # elif direction == "RIGHT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight + 10)

                    cv2.putText(frameClone, "PEEKABOO!".format(direction), (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.55, (0, 255, 0), 2)

                    if direction != "NONE":
                        cv2.putText(frameClone, "<Turn {}>".format(direction), (fX, fY - 0), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.55, (0, 255, 0), 2)

                    previousX = fX

                # show our detected faces, then clear the frame in preparation for the next frame
                # NOTE: comment this out if you don't want the video stream window to show in terminal
                cv2.imshow("Face", frameClone)

                # write video to file
                if self.record == True:
                    if self.writer is None:
                        # store the image dimensions, initialize the video writer,
                        # and construct the zeros array
                        (h, w) = frameClone.shape[:2]
                        self.writer = cv2.VideoWriter("r2_recording.avi", self.fourcc, 4,
                                                      (w, h), True)
                    output = np.zeros((h, w, 3), dtype="uint8")
                    output[0:h, 0:w] = frameClone
                    self.writer.write(output)

                # NOTE: comment this out if you don't want the video stream window to show in terminal
                # if the 'q' key is pressed, stop the loop
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    print("keypress 'q', stopping Peekaboo")
                    self.stop()
                    break

    def resume(self):
        print("starting PeekabooController")
        if(self.failure == True):
            print("ERROR: the video had failed to load.  If you want Peekaboo to work, you will need to connect the camera and restart R2.py")
            return
        self.running = True

    def stop(self):
        print("stopping PeekabooController")
        self.running = False
        if self.writer is not None:
            self.writer.release()

    def stopVideo(self):
        if(self.video is not None):
            self.video.stop()

        try:
            cv2.destroyAllWindows()
        except:
            print("")

    def whereIsEveryone(self):
        if(self.whereIsEveryoneFlag == False):
            SoundController.worried(self.soundCtrlr)
            self.whereIsEveryoneFlag = True
            self.iSeeSomeoneFlag = False

    def iSeeSomeone(self):
        if(self.iSeeSomeoneFlag == False):
            SoundController.whistle(self.soundCtrlr)
            self.whereIsEveryoneFlag = False
            self.iSeeSomeoneFlag = True

    def resizeImage(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        # initialize the dimensions of the image to be resized and
        # grab the image size
        dim = None
        (h, w) = image.shape[:2]

        # if both the width and height are None, then return the
        # original image
        if width is None and height is None:
            return image

        # check to see if the width is None
        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)

        # otherwise, the height is None
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        # resize the image
        resized = cv2.resize(image, dim, interpolation=inter)

        # return the resized image
        return resized
Esempio n. 9
0
    isPiCamera = False
    cam_type = "usbcamera"

if int(sys.argv[2]) == 640:
    resolution = (640, 480)
elif int(sys.argv[2]) == 320:
    resolution = (320, 240)
else:
    print "Not valid resolution"
    quit()

vs = VideoStream(isPiCamera=isPiCamera, resolution=resolution).start()
numb_of_pics = 0

while (1):
    img = vs.read()

    cv2.imshow("Press Space to Save Frame", img)

    keypress = cv2.waitKey(1) & 0xFF
    if keypress == 32:
        numb_of_pics += 1
        file_name = cam_type + str(resolution[0]) + '_' + str(numb_of_pics)
        cv2.imwrite('./calib_pics/' + file_name + '.jpg', img)

    if keypress == ord('q'):
        print str(numb_of_pics) + ' pictures saved'
        break

vs.stop()
cv2.destroyAllWindows()
Esempio n. 10
0
'''
### Example.1: 打开USB摄像头 
'''
import cv2
from videostream import VideoStream
from imutils.video import FPS

camera_id = 0

vs = VideoStream(camera_id)

vs.start()
fps = FPS().start()

while True:
    res, frame = vs.read()
    fps.update()
    if not(res):
        print("Camera Read Failed")
        break
    
    cv2.imshow("test", frame)
    waikey = cv2.waitKey(1) & 0xFF
    if waikey == ord('q'):
        break

fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
vs.release()
Esempio n. 11
0
input_mean = 127.5
input_std = 127.5

# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()

dnum=0
time_appear_drone = 0
zero_count=0
boxthickness = 3
linethickness = 2
# Initialize video stream
videostream = VideoStream(resolution=(800,600),framerate=30).start()
time.sleep(1)
print(videostream.read().shape)
rectangule_color = (10, 255, 0)

#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:

    # Start timer (for calculating frame rate)
    t1 = cv2.getTickCount()
    d=0
    distance = 0
    # Grab frame from video stream
    frame1 = videostream.read()

    # Acquire frame and resize to expected shape [1xHxWx3]
    frame = frame1.copy()
    capframe = frame1.copy()