Ejemplo n.º 1
0
 def newplay(self):
     ''' get replay '''
     # put image play_b in btn_toogle_pause
     self.btn_toogle_pause['image'] = self.photos._play
     # load video stream
     self.vid = VideoStream(self.video_source)
     # set delay using
     self.delay = self.vid.f_rate
     # set scale value.
     self.var_t.set(0.1)
     # get and set duration video stream
     self.duracion = self.vid.duration
     # print time duration
     print('>> newplay: self.duracion ->', self.duracion)
     # set value maximun scale to
     self.scale.configure(to=self.duracion)
     # print configure values scale
     # print('>> self.scale.configure ->', self.scale)
     # ajuste valores de sonido al nivel seleccionado.
     self.vid.player.set_volume(float(self.soundvar.get()))
     # dimension vertical ventana.
     h = self.conten_controls.winfo_height()
     h_f = self.vid.h + h + 2
     w_f = self.vid.w + 2
     cad = f'{w_f}x{h_f}'
     self._width = w_f
     self._height = h_f
     self.master.geometry(cad)
     self.canvas.config(width=self.vid.w, height=self.vid.h)
Ejemplo n.º 2
0
def main_debug(displaying):
    video_file = os.path.join(os.path.dirname(__file__), "video/staircase.mp4")

    vid_stream = VideoStream(video_file, interval=0.03)
    vid_stream.start()

    if debug:
        import ptvsd
        ptvsd.enable_attach(('0.0.0.0', 56781))
        ptvsd.wait_for_attach()
        ptvsd.break_into_debugger()

    while True:
        _, frame = vid_stream.get_frame_with_id()
        detections = detector.detect(frame)
        #logging.info(detections)

        if not displaying:
            logging.info(detections)
            continue

        frame = display(frame, detections)
        # # check to see if the output frame should be displayed to our
        # # screen
        cv2.imshow("Frame", frame)

        key = cv2.waitKey(1) & 0xFF

        if key == ord('q') or key == 27:
            break

    cv2.destroyAllWindows()
Ejemplo n.º 3
0
    def __init__(self, usePiCamera=True):
        # initialize the camera and grab a reference to the raw camera capture
        self.vs = VideoStream(usePiCamera=usePiCamera,
                              src=0,
                              resolution=(self.FRAME_WIDTH, self.FRAME_HEIGHT),
                              framerate=12)

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=self.INIT_TIME,
                                                       varThreshold=12,
                                                       detectShadows=False)
        self.face_cascade = cv2.CascadeClassifier(
            'models/haarcascade_frontalface_default.xml')

        self.background1 = cv2.imread('backgrounds/background1.jpeg')
        self.background1 = cv2.resize(self.background1,
                                      (self.FRAME_WIDTH, self.FRAME_HEIGHT))

        self.hats = [
            cv2.imread('./hats/' + file, cv2.IMREAD_UNCHANGED)
            for file in os.listdir('./hats')
        ]

        self.stopped = False
        self.frame = None
        self.state = 0
        self.time = 0
        self.lastReady = 0
        self.lastCapture = 0
        self.history = np.zeros(self.HISTORY)
Ejemplo n.º 4
0
    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)
Ejemplo n.º 5
0
def main():
    default_model_dir = '../all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use. ',
                        default=0)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    interpreter = common.make_interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)

    videostream = VideoStream().start()
    while True:
        frame = videostream.read()
        #if not ret:
        #    break
        cv2_im = frame

        cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
        pil_im = Image.fromarray(cv2_im_rgb)

        common.set_input(interpreter, pil_im)
        interpreter.invoke()
        objs = get_output(interpreter,
                          score_threshold=args.threshold,
                          top_k=args.top_k)
        cv2_im = append_objs_to_img(cv2_im, objs, labels)

        cv2.imshow('frame', cv2_im)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    videostream.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
 def __init__(self, usePiCamera, width, height, camPort):
     self.ct = CentroidTracker()
     self.targetID = -1000000
     self.targetCentroid = []
     self.radius = 20
     self.cap = VideoStream(usePiCamera, width, height, camPort).start()
     self.haar_cascade = cv2.CascadeClassifier("data/HS.xml")
     self.frameCount = 0
     self.frameCaptureNumber = 7
     self.thresholdX = int(width / 2)
     self.thresholdY = (int)(height / 2)
     self.width = width
     self.height = height
Ejemplo n.º 7
0
    def describe(self, request):
        self.print_log('Process method DESCRIBE')

        header = {
            'CSeq': request.seqnum,
            'Session': self.sessionid
        }

        try:
            self.videostream = VideoStream(request.filename)
            header['Frame-Rate'] = self.videostream.fps
            header['Frame-Count'] = self.videostream.frm_cnt
            print(self.videostream.fps)
            return Response(200, header)
        except FileNotFoundError:
            return Response(404)
Ejemplo n.º 8
0
 def __init__(self, **kwargs):
     super(Main, self).__init__(**kwargs)
     parser = argparse.ArgumentParser()
     parser.add_argument('left',
                         metavar='LEFT',
                         help='left stream  e.g. rtsp://192.168.0.16:8086')
     parser.add_argument('right',
                         metavar='RIGHT',
                         help='right stream e.g. rtsp://192.168.0.23:8086')
     args = parser.parse_args()
     self.streams = [
         VideoStream(args.left, 640, 480),
         VideoStream(args.right, 640, 480)
     ]
     self.separation = 0.125
     self.vert_offset = 0
Ejemplo n.º 9
0
async def stream(request: Request, url: str = Form(...), action: str = Form(...)):
    if action == 'yolov4':
        track = False
    elif action == 'yolov4 + deepsort':
        track = True
    
    return StreamingResponse(VideoStream(url).process_stream(track=track),
                             media_type="multipart/x-mixed-replace; boundary=frame")
Ejemplo n.º 10
0
    ap.add_argument("-r", "--resolution", type=str, default='640x480',
                    help='resolution of cameras (\'max\' for max resolution)')
    ap.add_argument("-s", "--source", type=str, default='-1',
                    help='indicies of cameras to use')

    # parse arguments
    args = vars(ap.parse_args())

    # create a list of video streams to reference in generate()
    vslist = []

    # find up to 10 attached cameras and try and start streams on them
    if args['source'] == '-1':
        print('shotgun approach')
        for i in range(0, 9):
            vs = VideoStream(i, 'vs{}'.format(i+1), args['resolution'])
            if vs.stream.isOpened() is False:
                vs.release()
            else:
                if maxframesize is None:
                    maxframesize = vs.width
                vslist.append(vs)
    # open cameras based on arguments passed
    else:
        sources = parseindicies(args['source'])
        print('opening ' + args['source'])
        for i in sources:
            vs = VideoStream(i, 'vs{}'.format(i+1), args['resolution'])
            if vs.stream.isOpened() is False:
                vs.release()
            else:
Ejemplo n.º 11
0
ALARM_MODE = False
ALARM_MODE_MOUTH = False

print("Facial Landmark Prediction System Loading...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

FACIAL_LANDMARKS=FL()
# retrieve the indexes of the facial landmarks for the left and right eye
(lStart, lEnd) = FACIAL_LANDMARKS["left_eye"]
(rStart, rEnd) = FACIAL_LANDMARKS["right_eye"]
(mStart,mEnd) = FACIAL_LANDMARKS["mouth"]

print("Video Stream Loading...")
vs = VideoStream(src=args["webcam"]).start()
time.sleep(1.0)

# loop over frames from the video stream
while True:

	frame = vs.read()
	(h, w) = frame.shape[:2]
	frame = cv2.resize(frame, (w,h), interpolation=cv2.INTER_AREA)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	rects = detector(gray, 0)

	# loop over the face detections
	for rect in rects:
		
Ejemplo n.º 12
0
import sys
import cv2
import zbar
from videostream import VideoStream
from PIL import Image

# Initialise OpenCV window
print "OpenCV version: %s" % (cv2.__version__)
print "Press q to exit ..."

if int(sys.argv[1]) == 1:
    isPiCamera = True
else:
    isPiCamera = False

vs = VideoStream(isPiCamera=isPiCamera, resolution=(640, 480)).start()
time.sleep(2.0)

scanner = zbar.ImageScanner()
scanner.parse_config('enable')

# Capture frames from the camera
while (True):

    ticks = time.time()
    frame = vs.read()

    # raw detection code
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, dstCn=0)
    pil = Image.fromarray(gray)
    width, height = pil.size
Ejemplo n.º 13
0
class PersonDetector():
    def __init__(self, usePiCamera, width, height, camPort):
        self.ct = CentroidTracker()
        self.targetID = -1000000
        self.targetCentroid = []
        self.radius = 20
        self.cap = VideoStream(usePiCamera, width, height, camPort).start()
        self.haar_cascade = cv2.CascadeClassifier("data/HS.xml")
        self.frameCount = 0
        self.frameCaptureNumber = 7
        self.thresholdX = int(width / 2)
        self.thresholdY = (int)(height / 2)
        self.width = width
        self.height = height

    def frameAdd(self):
        self.frameCount += 1

    def checkFrameCount(self):
        if self.frameCount % self.frameCaptureNumber == 0:
            return True
        return False

    def robotMoveDirection(self, centroid, center):
        print(centroid, " ", center)
        cX = center[0]
        cY = center[1]
        targetX = centroid[0]
        targetY = centroid[1]
        diffX = cX - targetX
        diffY = cY - targetY
        hors = "Move: "
        verts = " and "
        if targetX < cX - self.radius:
            hors += "Left " + str(np.abs(diffX)) + " pixels"
        elif targetX > cX + self.radius:
            hors += "Right " + str(np.abs(diffX)) + " pixels"
        else:
            hors = ""
            verts = "Move: "
        if targetY < cY - self.radius:
            verts += "Up " + str(np.abs(diffY)) + " pixels"
        elif targetY > cY + self.radius:
            verts += "Down " + str(np.abs(diffY)) + " pixels"
        else:
            verts = ""
        return hors + verts

    def updateObjectIDs(self, upper_body):
        return self.ct.update(upper_body)

    def detectPerson(self):
        capturedframe = self.cap.getFrame()
        frame = capturedframe[0:self.height, 0:int(self.width / 2)]
        centerX = -10000
        centerY = -100000
        cColor = (0, 0, 255)
        if self.checkFrameCount():
            try:
                # using a greyscale picture, also for faster detection
                gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
                upper_body = self.haar_cascade.detectMultiScale(
                    gray,
                    scaleFactor=1.1,
                    minNeighbors=12,
                    # Min size for valid detection, changes according to video size or body size in the video.
                    minSize=(50, 100),
                    flags=cv2.CASCADE_SCALE_IMAGE)
                if len(upper_body) > 0:
                    (x, y, w, h) = upper_body[0]
                    # creates green color rectangle with a thickness size of 1
                    centerX = (int)(x + w / 2)
                    centerY = (int)(y + h / 2)
                    cv2.rectangle(capturedframe, (x, y), (x + w, y + h),
                                  (0, 255, 0), 1)
                    cv2.line(capturedframe, (centerX, y), (centerX, y + h),
                             (255, 0, 0), 1)
                    cv2.line(capturedframe, (x, centerY), (x + w, centerY),
                             (255, 0, 0), 1)
                    cv2.circle(capturedframe, (centerX, centerY), 1,
                               (0, 0, 255), 1)
                    # creates green color text with text size of 0.5 & thickness size of 2
                    cv2.putText(capturedframe, "Head and Shoulders Detected",
                                (x + 5, y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (0, 255, 0), 2)
                    if np.abs(centerX - self.thresholdX) <= 20 and np.abs(
                            centerY - self.thresholdY) <= 20:
                        cColor = (0, 255, 0)
                    else:
                        cColor = (0, 0, 255)
                objects = self.updateObjectIDs(upper_body)
                # checking if the id is the same as the one detected
                if len(list(objects.items())) > 0:
                    targetID = list(objects.items())[0][0]
                    targetCentroid = list(objects.items())[0][1]
                    if targetCentroid[0] == centerX and targetCentroid[
                            1] == centerY:
                        cv2.putText(capturedframe, str(targetID),
                                    (targetCentroid[0], targetCentroid[1]),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                                    2)
                        cv2.circle(capturedframe,
                                   (targetCentroid[0], targetCentroid[1]), 4,
                                   (0, 255, 0), -1)
                else:
                    targetID = -100000
                    targetCentroid = []
                print(
                    self.robotMoveDirection(targetCentroid,
                                            (self.width / 2, self.height / 2)))
            except Exception as e:
                print(str(e))
                return capturedframe
            #threshold center lines
        cv2.circle(capturedframe, (self.thresholdX, self.thresholdY),
                   self.radius, cColor, 3)
        return capturedframe

    def release(self):
        self.cap.release()
Ejemplo n.º 14
0
class VideoCamera(object):
    INIT_TIME = 100
    FRAME_WIDTH = 640
    FRAME_HEIGHT = 480
    HISTORY = 25
    THRESHOLD = 18

    def __init__(self, usePiCamera=True):
        # initialize the camera and grab a reference to the raw camera capture
        self.vs = VideoStream(usePiCamera=usePiCamera,
                              src=0,
                              resolution=(self.FRAME_WIDTH, self.FRAME_HEIGHT),
                              framerate=12)

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=self.INIT_TIME,
                                                       varThreshold=12,
                                                       detectShadows=False)
        self.face_cascade = cv2.CascadeClassifier(
            'models/haarcascade_frontalface_default.xml')

        self.background1 = cv2.imread('backgrounds/background1.jpeg')
        self.background1 = cv2.resize(self.background1,
                                      (self.FRAME_WIDTH, self.FRAME_HEIGHT))

        self.hats = [
            cv2.imread('./hats/' + file, cv2.IMREAD_UNCHANGED)
            for file in os.listdir('./hats')
        ]

        self.stopped = False
        self.frame = None
        self.state = 0
        self.time = 0
        self.lastReady = 0
        self.lastCapture = 0
        self.history = np.zeros(self.HISTORY)

    def start(self):
        self.vs.start()
        # start the thread to process frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def stop(self):
        self.vs.stop()
        # indicate that the thread should be stopped
        self.stopped = True

    def update(self):
        # Keep looping infinitely until the thread is stopped
        while True:
            # If the thread indicator variable is set, stop the thread
            if self.stopped:
                return

            # Grab an image from the video stream
            frame = self.vs.read()

            # Wait until frames start to be available
            if frame is None:
                time.sleep(1)
                continue

            # Resize frame to fit working dimensions
            frame = cv2.resize(frame, (self.FRAME_WIDTH, self.FRAME_HEIGHT), 0,
                               0, cv2.INTER_CUBIC)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)

            fgmask = self.fgbg.apply(frame,
                                     learningRate=0 if self.state > 0 else -1)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel)
            bgmask = 255 - fgmask

            # State machine
            # S0: Training
            if self.state == 0:
                cv2.putText(img=fgmask,
                            text='Training Segmentation Algorithm (' +
                            str(int(100.0 * self.time / self.INIT_TIME)) +
                            ' %)',
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S1
                if self.time >= self.INIT_TIME:
                    self.state = 1

                # Show mask when training.
                self.frame = fgmask

            # S1: Ready to capture
            elif self.state == 1:
                # Detect and draw faces
                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
                for (x, y, w, h) in faces:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

                # Update history
                self.history[self.time % self.HISTORY] = min(len(faces), 1)

                self.frame = frame
                #self.frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                cv2.putText(self.frame,
                            text='Frame ' + str(self.time),
                            org=(0, self.FRAME_HEIGHT - 5),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=.7,
                            color=(255, 255, 255),
                            thickness=1)

                # Transition to S2 if faces present.
                if np.sum(
                        self.history
                ) >= self.THRESHOLD and self.time > self.lastCapture + self.HISTORY:
                    self.lastReady = self.time
                    self.state = 2
                # Transition to S0 if no faces and time has gone, reset all
                elif np.sum(
                        self.history
                ) < self.THRESHOLD and self.time > 4 * self.INIT_TIME:
                    self.time = 0
                    self.lastReady = 0
                    self.lastCapture = 0
                    self.state = 0

            # S2: Cont down
            elif self.state == 2:
                self.frame = frame

                cv2.putText(self.frame,
                            text=str(3 -
                                     int((self.time - self.lastReady) / 10)),
                            org=(int(self.FRAME_WIDTH / 2),
                                 int(self.FRAME_HEIGHT / 2)),
                            fontFace=cv2.FONT_HERSHEY_DUPLEX,
                            fontScale=3,
                            color=(255, 255, 255),
                            thickness=2)

                if self.time - self.lastReady >= 29:
                    self.state = 3

            # S3: Capture
            elif self.state == 3:
                #frame = cv2.bitwise_and(self.background1, self.background1, mask=bgmask) + cv2.bitwise_and(frame, frame, mask=fgmask)

                faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)

                for (x, y, w, h) in faces:
                    # Select a hat at fit it to face
                    hat = random.choice(self.hats)
                    x_factor = 1.45
                    y_offset = 0.15
                    hat = cv2.resize(
                        hat, (int(w * x_factor),
                              int(w * x_factor / hat.shape[1] * hat.shape[0])),
                        0, 0, cv2.INTER_CUBIC)
                    # Get fitted hat width/hight, and crop to bounding box.
                    h_h, h_w = hat.shape[:2]
                    h_x1 = max(int(x - (h_w - w) / 2), 0)
                    h_x2 = min(int(x + w + (h_w - w) / 2), self.FRAME_WIDTH)
                    h_y1 = max(int(y + h * y_offset - h_h), 0)
                    h_y2 = min(int(y + h * y_offset), self.FRAME_HEIGHT)
                    # Recalulate hat width/hight if cropped by bounding box.
                    h_w = h_x2 - h_x1
                    h_h = h_y2 - h_y1

                    # Blend hat to frame with alpha-channel.
                    for c in range(0, 3):
                        alpha = hat[-h_h:, :h_w, 3] / 255.0
                        color = hat[-h_h:, :h_w, c] * alpha
                        beta = frame[h_y1:h_y2, h_x1:h_x2, c] * (1.0 - alpha)
                        frame[h_y1:h_y2, h_x1:h_x2, c] = color + beta

                # Save the image to disk.
                filename = 'images/' + datetime.datetime.now().strftime(
                    "%Y%m%d-%H%M%S") + '.png'
                print('Saving image: ', filename)
                cv2.imwrite(filename, frame)

                self.frame = frame
                time.sleep(10)

                # Transition to S1
                self.lastCapture = self.time
                self.state = 1

            self.time = self.time + 1
            time.sleep(1 / 16)

    def get_frame(self):
        # Wait until frames start to be available
        while self.frame is None:
            time.sleep(0)

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.
        ret, jpeg = cv2.imencode('.jpg', self.frame)
        return jpeg.tobytes()
Ejemplo n.º 15
0
frame_rate_calc = 1
freq = cv2.getTickFrequency()

dnum=0
time_appear_drone = 0
boxthickness = 3
linethickness = 2
# Initialize video stream
#videostream = VideoStream(resolution=(800,600),framerate=30).start()
Auto_flag = True
rectangule_color = (10, 255, 0)


sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(args.server))
rpi_name = socket.gethostname() # send RPi hostname with each image
picam = VideoStream(resolution=(imW,imH),framerate=30).start()
print(picam.read().shape)
time.sleep(1.0)  # allow camera sensor to warm up
frame1 = picam.read()
rows, cols, _ = frame1.shape
x_medium = int(cols / 2)
x_center = int(cols / 2)
y_medium = int(rows / 2)
y_center = int(rows / 2)

while True:  # send images as stream until Ctrl-C
  # Start timer (for calculating frame rate)
  t1 = cv2.getTickCount()
  frame1 = picam.read()
  # Acquire frame and resize to expected shape [1xHxWx3]
  frame = frame1.copy()
Ejemplo n.º 16
0
            cv2.rectangle(self.frame_resized, (x, y), (x + w - 1, y + h - 1),
                          (0, 0, 255), 1)
            cv2.circle(self.frame_resized, centroid, 2, (0, 255, 0), -1)
        #cv2.imshow('boxes', self.frame_resized)


if __name__ == '__main__':

    from videostream import VideoStream
    from videostream import FPS

    fuente = ['../installationFiles/mySquare.mp4', 0]

    # Create  BG object and get source input
    bg = CreateBGCNT()
    vs = VideoStream(src=fuente[0], resolution=(640, 480)).start()  # 0.5 pmx

    fps = FPS().start()

    while True:
        frame, frame_resized = vs.read()
        # Feed frames
        bg.alimentar(frame_resized)

        # You want the matches?
        #print(bg.matches)

        # Want to see?, put the next two lines
        bg.draw()
        cv2.imshow('frame', frame_resized)
Ejemplo n.º 17
0
dnum = 0
time_appear_drone = 0
boxthickness = 3
linethickness = 2
# Initialize video stream
#videostream = VideoStream(resolution=(800,600),framerate=30).start()

rectangule_color = (10, 255, 0)

sender = imagezmq.ImageSender(connect_to='tcp://192.168.0.44:5555')
print("send")
rpi_name = socket.gethostname()  # send RPi hostname with each image
print("name")
#picam = VideoStream(usePiCamera=False).start()
picam = VideoStream(resolution=(800, 600), framerate=30).start()
print(picam.read().shape)
print("picam")
time.sleep(2.0)  # allow camera sensor to warm up
print("sleep")

while True:  # send images as stream until Ctrl-C
    # Start timer (for calculating frame rate)
    t1 = cv2.getTickCount()
    frame1 = picam.read()
    # Acquire frame and resize to expected shape [1xHxWx3]
    frame = frame1.copy()
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize(frame_rgb, (width, height))
    input_data = np.expand_dims(frame_resized, axis=0)
Ejemplo n.º 18
0
def video_feed():
    return Response(gen(VideoStream()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Ejemplo n.º 19
0
if int(sys.argv[1]) == 1:
    isPiCamera = True
    cam_type = "picamera"
else:
    isPiCamera = False
    cam_type = "usbcamera"

if int(sys.argv[2]) == 640:
    resolution = (640, 480)
elif int(sys.argv[2]) == 320:
    resolution = (320, 240)
else:
    print "Not valid resolution"
    quit()

vs = VideoStream(isPiCamera=isPiCamera, resolution=resolution).start()
numb_of_pics = 0

while (1):
    img = vs.read()

    cv2.imshow("Press Space to Save Frame", img)

    keypress = cv2.waitKey(1) & 0xFF
    if keypress == 32:
        numb_of_pics += 1
        file_name = cam_type + str(resolution[0]) + '_' + str(numb_of_pics)
        cv2.imwrite('./calib_pics/' + file_name + '.jpg', img)

    if keypress == ord('q'):
        print str(numb_of_pics) + ' pictures saved'
Ejemplo n.º 20
0
import imagezmq
import argparse
import socket
import time

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
    "-s",
    "--server-ip",
    required=True,
    help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())

# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(
    connect_to="tcp://{}:5555".format(args["server_ip"]))

# get the host name, initialize the video stream, and allow the
# camera sensor to warmup
rpiName = socket.gethostname()
vs = VideoStream().start()
#vs = VideoStream(src=0).start()
time.sleep(2.0)

while True:
    # read the frame from the camera and send it to the server
    frame = vs.read()
    sender.send_image(rpiName, frame)
Ejemplo n.º 21
0
class ScreenPlayer(tk.Frame):
    
    def __init__(self, master=None, title=None, video=None):
        ''' Initialice window app '''
        super().__init__(master)        
        self.master = master
        # self.master.geometry('422x624+629+231') -> desarrollo ahead.
        self.pack(fill=tk.BOTH, expand=1)
        self.master.title(title)
        self.master['bg'] = 'Black'
        self.master.protocol('WM_DELETE_WINDOW', self.confirmExit)
        self.master.protocol('WM_TAKE_FOCUS', self.confirmOpen)
        self.master.protocol('WM_SAVE_YOURSELF', self.confirmSave)
        self.init = True
        self.setingfile = 'flash_seting.ini'
        # self.window.resizable(width=False, height=False)
        self.n_size = None
        self.photo = None
        self.photos = Photos()
        # self.master.call('wm', 'iconphoto', self.master, self.photos._apply)
        self.soundvar = tk.DoubleVar(value=0.9)
        self.master.wm_iconphoto(True, self.photos._apply)
        self.dirImages = None
        self.dirpathmovies = tk.StringVar()  # directorio path video
        self.video_source = video
        w = 350; h = 230; self.duracion = 100.0
        self.active_scale = False
        self.vid = None
        self.val = 'paused'
        if self.video_source is not None:
            # open video source (by default this will try to open the computer webcam)
            try:
                self.dirpathmovies.set(os.path.dirname(self.video_source))
                self.vid = VideoStream(self.video_source)
                # w_f, h_f = self.vid.w, self.vid.h
                self.duracion = self.vid.duration
                self.soundvar.set(self.vid.player.get_volume())
                frame = None
                while frame is None:
                    self.val, self.pts, frame = self.vid.get_frame()
                size = frame.get_size()
                arr = frame.to_memoryview()[0] # array image
                self.imagen = Image.frombytes("RGB", size, arr.memview)
                w_f, h_f = self.imagen.size
                self.delay = self.vid.f_rate
                self.vid.toggle_pause()
                self._wxh = (w_f, (h_f + 40))
                self._twh = (300, 300)
            except Exception as e:
                print(e)
                self.video_source = None
        else:
            self.imagen = self.photos._logo
            w_f, h_f = self.imagen.size
            self.delay = 16
            self._wxh = (422, 624)
            self._twh = (629, 231)
            pass
        # ajuste ventana.
        str_window = str(self._wxh[0])+ 'x'+str(self._wxh[1])+ '+' + str(self._twh[0])+ '+' + str(self._twh[1])
        self.master.geometry(str_window)
        print('str ventana de inicio:', str_window)
        # event changed de volume
        self.soundvar.trace('w', self.soundvar_adjust)
        # Create a canvas that can fit the above video source size
        self.canvas = tk.Canvas(self, bg='red')
        # contenedor de controles
        self.conten_controls = tk.LabelFrame(self, height=4)
        # Button that lets the user take a snapshot
        self.btn_snapshot=tk.Button(self.conten_controls, text="Snapshot", command=self.snapshot)
        self.btn_snapshot['image'] = self.photos._snapshot 
        self.btn_snapshot.pack(side='left')
        # Button open
        self.btn_open = tk.Button(self.conten_controls, text='...', command=self.open_file)
        self.btn_open['image'] = self.photos._open
        self.btn_open.pack(side='right')
        # button volum
        self.btn_volume = tk.Button(self.conten_controls, text='volume', command=self.open_adjust_volumen)
        self.btn_volume['image'] = self.photos._volume
        self.btn_volume.pack(side='right')
        # Button replay
        self.btn_replay = tk.Button(self.conten_controls, text=">>", command=self.replay)
        self.btn_replay['image'] = self.photos._repeat
        self.btn_replay.pack(side='right')
        # Button play-pausa
        self.btn_toogle_pause = tk.Button(self.conten_controls, text="[]", command=self.toogle_pause)
        self.btn_toogle_pause['image'] = self.photos._play
        self.btn_toogle_pause.pack(side='right')
        # Slade
        self.var_t = tk.DoubleVar()
        self.scale = tk.Scale(self.conten_controls, from_=0.0, to= self.duracion, showvalue=0, orient='horizontal', variable=self.var_t, 
                        resolution=0.3, sliderrelief='flat', command=self.onScale )
        self.scale.pack(side='left', fill='x', expand=1)
        self.scale.bind('<ButtonPress-1>', self.scale_button_press)
        self.scale.bind('<ButtonRelease-1>', self.scale_button_release)
        self.master.bind('<Configure>', self.master_on_resize)
        # self.master.bind_all('<ButtonPress-1>', self.master_button_press)
        # self.master.bind('<ButtonRelease-1>', self.master_button_release)
        # self.master.wm_withdraw()
        self.canvas.pack(side =tk.TOP, fill=tk.BOTH, expand=1)
        self.conten_controls.pack(side='bottom', fill='x')
        
        
        if self.video_source is not None:
            self.scale.configure(to=self.duracion)
            self.btn_toogle_pause['image'] = self.photos._pause
            self.vid.player.set_volume(float(self.soundvar.get()))
            self.canvas_tags = None
            
        self.photo = ImageTk.PhotoImage(self.imagen)
        self.canvas.configure(width = w_f, height=h_f)
        self.canvas.create_image(w_f/2, h_f/2, image = self.photo, 
                                        anchor='center', tags='img')
        self.val = None
        self.pts = None
        #
        
      
        if self.video_source is None:
            self.get_init_status()
        self.update()
        self.master.mainloop()

    
    def get_init_status(self):
        '''
        extract init status of app
        Return:
        '''
        if not os.path.exists(self.setingfile):
            return
        config = configparser.RawConfigParser()
        config.read(self.setingfile)
        dirpathmovies = config.get('Setings', 'path_movies')
        if os.path.exists(dirpathmovies):
            self.dirpathmovies.set(dirpathmovies)
            # inicializa la lista con directorio duardao
        dirpathimages =config.get('Setings', 'path_images')
        if os.path.exists(dirpathimages):
            self.dirImages = dirpathimages
            # inicializa la lista con directorio duardao
    
    def set_init_status(self):
        '''
        write init status of app
        Return:
        '''
        config = configparser.RawConfigParser()
        config.add_section('Setings')
        config.set('Setings', 'path_movies', self.dirpathmovies.get())
        config.set('Setings', 'path_images', self.dirImages)
        with open(self.setingfile, 'w') as configfile:
            config.write(configfile)
        print('Write config file')

    def confirmExit(self):
        if messagebox.askokcancel('Quit', 'Are you sure you want to exit?'):
            self.master.quit()
        self.set_init_status()
        print('end process')

    def confirmOpen(self):
        '''here play the video if exist and window activate at init of app'''
        print(f">> Confirm Open -- init:{self.init}")
        if self.init:
            if self.vid is not None:
                self.vid.toggle_pause()
                self.btn_toogle_pause['image'] = self.photos._play
            self.init=False


    def confirmSave(self):
        print('>> push confirm Save.')
        pass


    def master_button_press(self, event):
        print('>> master_button_press')

    def master_button_release(self, event):
        print('>> master_button_release')

    def master_on_resize(self, event):
        _ventana = event.widget
        print(_ventana)
        widget_size =(_ventana.winfo_width(), _ventana.winfo_height() )
        if str(_ventana) == '.!screenplayer.!canvas':
            print('>> canvas:')
        m_wxh = (event.x, event.y)
        print('>> rezise:', m_wxh, self._wxh, 'widget_size:', widget_size)
        if self._wxh != m_wxh and str(_ventana) == '.!screenplayer.!canvas' and m_wxh == (0, 0):
            print('>> on_resize_master ')
            self._wxh = widget_size
            # self.master.configure(width=m_wxh[0], height=m_wxh[1])
            h_c = self.conten_controls.winfo_height()
            w = m_wxh[0] -2
            h = m_wxh[1] - h_c -2
            self.canvas.configure(width=w, height=h)
            # si es nulo self.vid or self.imagen is not None or self.vid.player.get_pause()
            # print('wm_withdraw: ', self.master.wm_withdraw())
            if self.vid is None or self.val =='paused':               
                w = self.canvas.winfo_width()
                h = self.canvas.winfo_height()
                if w <= 0 or h <= 0:
                    return
                self.canvas.delete(tk.ALL) # borra todos los objetos con ese tags....
                #  w_i, h_i = self.imagen.size
                marco = w, h  # marco de ventana
                self.imagen_copy = self.imagen.copy()
                self.imagen_copy = image_adjustment(self.imagen_copy, marco)
                self.photo = ImageTk.PhotoImage(self.imagen_copy)
                self.canvas.create_image(w/2, h/2, anchor='center', image = self.photo, tags='img')
            
    def scale_button_press(self, event):
        print('>> scale_button_press')
        self.active_scale = True
    
    def scale_button_release(self, event):
        print('>> scale_button_release')
        self.active_scale = False
    
    def open_adjust_volumen(self):
        from overpanel import Over
        param = {'textvariable': self.soundvar}
        dialog = Over(master=self.master, cnf=param)
        dialog.mainloop()
    
    def soundvar_adjust(self, *args):
        print('sound adjust ->', self.soundvar.get())
        if self.vid:
            print('if self.vid: soundvar_adjust')
            self.vid.player.set_volume(self.soundvar.get())

    def isurl(self, tip=None):
        '''
        Return bolean true si existe and it's ends with
        ('.mp4', '.MP4', '.flv', '.FLV', '.mpg', '.avi')
        '''
        if not tip:
            return False
        exten = ('.mp4', '.MP4', '.flv', '.FLV', '.mpg', '.avi')
        if os.path.exists(tip):
            if tip.endswith(exten):
                return True
        return False

    def open_file(self):
        ''' Open file with menu ... '''
        if self.dirpathmovies.get()=='':
            dirpath='.'
        else:
            dirpath = self.dirpathmovies.get()
        file = filedialog.askopenfile(initialdir=dirpath, title='select file', 
                                        filetypes={('flv','*.flv'), ('mp4','*.mp4'),
                                        ('avi','*.avi'), ('mpg','*.mpg')}, 
                                        defaultextension='*.mp4')
        print(file)
        if not file == None:
            if os.path.exists(file.name):
                self.video_source = file.name
                self.dirpathmovies.set(os.path.dirname(self.video_source))
                self.newplay()
        
    def toogle_pause(self):
        '''
        toogle pause
        '''
        if not self.vid:
            return
        if self.val =='paused':
            self.btn_toogle_pause['image'] = self.photos._play
            self.vid.toggle_pause()
        else:
            self.btn_toogle_pause['image'] = self.photos._pause
            self.vid.toggle_pause()

    def onScale(self, val):
        # print('>> scale onScale type val ->', type(val), val)
        if not self.vid:
            return
        try:
            # self.active_scale = True
            self.vid.seek(pts=float(val))
            # self.active_scale = False
        except Exception as e:
            print(e)
            # self.active_scale = False
        # self.var_t.set(v)

    def replay(self):
        if not self.vid:
            return
        if self.val == 'paused':
            self.vid.seek(pts=0.01)
            self.vid.toggle_pause()
        else:
            self.vid.seek(pts=0.01)
        pass

    def newplay(self):
        ''' get replay '''
        # put image play_b in btn_toogle_pause
        self.btn_toogle_pause['image'] = self.photos._play
        # load video stream
        self.vid = VideoStream(self.video_source)
        # set delay using
        self.delay = self.vid.f_rate
        # set scale value.
        self.var_t.set(0.1)
        # get and set duration video stream
        self.duracion = self.vid.duration
        # print time duration
        print('>> newplay: self.duracion ->', self.duracion)
        # set value maximun scale to
        self.scale.configure(to=self.duracion)
        # print configure values scale
        # print('>> self.scale.configure ->', self.scale)
        # ajuste valores de sonido al nivel seleccionado.
        self.vid.player.set_volume(float(self.soundvar.get()))
        # dimension vertical ventana.
        h = self.conten_controls.winfo_height()
        h_f = self.vid.h + h + 2
        w_f = self.vid.w + 2
        cad = f'{w_f}x{h_f}'
        self._width = w_f
        self._height = h_f
        self.master.geometry(cad)
        self.canvas.config(width=self.vid.w, height=self.vid.h)

    def snapshot(self):
        '''
        Get a frame from the video source
        '''
        # Get a frame from the video source
        #frame = self.vid.get_frame()[2]
        if not self.vid:
            return
        frame = self.imagen.copy()
        if frame is None:
            print('the video stream is closed')
            return
        # define options for opening
        time_str = time.strftime("%d-%m-%Y-%H-%M-%S")
        filename  = f"frame-{time_str}.jpg"
        title='Save file as'
        filetypes={('png','*.png'), ('jpg','*.jpg')}
        fileExt = '*.jpg'
        options = {}
        options['defaultextension'] = fileExt
        options['filetypes'] = filetypes
        options['initialfile'] = filename
        options['title'] = title
        if self.dirImages is not None:
            options['initialdir'] = self.dirImages
        else:
            options['initialdir'] = os.getenv('HOME')
        
        filesave = filedialog.asksaveasfilename(**options)
        
        if filesave !='':
            frame.save(filesave)
            # actualiza directorio si este ha cambiado
            self.dirImages = os.path.dirname(filesave)
        #self.vid.snapshot()
        print(filesave)
        pass
 
    def update(self):
        # Get a frame from the video source
        if not self.vid:
            pass
        else:
            self.val, self.pts, frame = self.vid.get_frame()

            if frame is not None and not self.active_scale:
                size = frame.get_size()
                arr = frame.to_memoryview()[0] # array image
                self.imagen = Image.frombytes("RGB", size, arr.memview)
                self.canvas.delete(tk.ALL) # borra todos los objetos con ese tags....
                self.imagen_copy = self.imagen.copy()
                w = self.canvas.winfo_width()
                h = self.canvas.winfo_height()
                marco = w, h  # marco de ventana
                self.imagen_copy = self.imagen.copy()
                self.imagen_copy = image_adjustment(self.imagen_copy, marco)
                self.photo = ImageTk.PhotoImage(self.imagen_copy)
                self.canvas.create_image(w/2, h/2, anchor='center', image = self.photo, tags='img')
                # print('>>> self.active_scale:', self.active_scale)
                # if not self.active_scale:
                self.var_t.set(self.pts)

        self.master.after(self.delay, self.update)
Ejemplo n.º 22
0
from __future__ import print_function
from videostream import VideoStream
import RPi.GPIO as GPIO
import numpy as np
import warnings
import ultrasonic
import imutils
import time
import cv2
import csv

GPIO.setwarnings(False)

# initialise the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=False, resolution=(1024,576), framerate=20).start()
TRIG,ECHO = ultrasonic.ultrasonic_setup()

p = 0
dv,t,ts = [],[],[]
 
# initialise the FourCC, video writer, dimensions of the frame, and zeros array
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = None
(h, w) = (None, None)

cv2.namedWindow("Window")
print("[INFO] video recoding...")

# loop over frames from the video stream
while True:
Ejemplo n.º 23
0
class Handler:
    """
    Request handler
    """
    BUF_SIZE = 2048

    # transmission state
    INIT = 0
    READY = 1
    PLAYING = 2

    def __init__(self, addr):
        self.addr = addr
        self.state = self.INIT
        self.sessionid = self.random_session_id()
        self.rtpport = 0
        self.rtpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.teardown = False
        self.videostream = None
        self.signal = threading.Event()

        self.call = {
            'PLAY': self.play,
            'PAUSE': self.pause,
            'SETUP': self.setup,
            'DESCRIBE': self.describe,
            'TEARDOWN': self.teardown
        }

    def random_session_id(self):
        return random.randint(100000, 999999)

    def print_log(self, msg):
        curTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        print('\033[31m{} \033[0m: \033[32m{}\033[0m'.format(curTime, msg))

    def process(self, request):
        req_type = request.reqtype
        try:
            return self.call[req_type](request)
        except KeyError:
            # unknown method
            print(req_type)
            return self.unknown(request)

    def play(self, request):
        self.print_log('Process method PLAY')

        if self.state != self.INIT:
            header = {
                'CSeq': request.seqnum,
                'Session': self.sessionid
            }
            self.signal.set()
            time.sleep(1)

            npt = int(request.header['NPT'])
            if npt:
                self.videostream.vidcap.set(cv2.CAP_PROP_POS_MSEC, npt)

            self.state = self.PLAYING
            self.signal.clear()
            threading.Thread(target=self.send_rtp_pkt).start()
            return Response(200, header)
        else:
            return Response(455)

    def pause(self, request):
        self.print_log('Process method PAUSE')

        if self.state == self.PLAYING:
            header = {
                'CSeq': request.seqnum,
                'Session': self.sessionid
            }

            self.state = self.READY
            self.signal.set()
            return Response(200, header)
        else:
            return Response(455)

    def setup(self, request):
        self.print_log('Process method SETUP')

        if self.state == self.INIT:
            header = {
                'CSeq': request.seqnum,
                'Session': self.sessionid
            }

            quality = int(request.header['Quality'])
            self.videostream.quality = quality
            self.state = self.READY
            self.rtpport = int(request.lines[2].split()[3])
            return Response(200, header)
        else:
            return Response(455)

    def describe(self, request):
        self.print_log('Process method DESCRIBE')

        header = {
            'CSeq': request.seqnum,
            'Session': self.sessionid
        }

        try:
            self.videostream = VideoStream(request.filename)
            header['Frame-Rate'] = self.videostream.fps
            header['Frame-Count'] = self.videostream.frm_cnt
            print(self.videostream.fps)
            return Response(200, header)
        except FileNotFoundError:
            return Response(404)

    def teardown(self, request):
        header = {
            'CSeq': request.seqnum,
            'Session': self.sessionid
        }

        self.print_log('Process method TEARDOWN')
        self.signal.set()
        self.rtpsock.close()
        self.teardown = True
        return Response(200, header)

    def unknown(self, request):
        self.print_log(str(request))
        self.print_log('Receive unknown method: {}'.format(request.reqtype))
        return Response(501)

    # Send RTP packets(picture) to client
    def send_rtp_pkt(self):
        while not self.signal.is_set():
            data = self.videostream.next_pkt()
            # transmission completed
            if not data:
                return
            seqnum = self.videostream.get_pktnum()
            rtppkt = self.pack_rtp_pkt(data, seqnum)

            self.rtpsock.sendto(rtppkt, (self.addr[0], self.rtpport))

    def pack_rtp_pkt(self, data, seqnum):
        """Packetize picture data into RTP packet"""
        rtppkt = RtpPacket()
        rtppkt.encode(2, 0, 0, 0, seqnum, 0, 26, 0, data)

        return rtppkt.getPacket()
Ejemplo n.º 24
0
class PeekabooController(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        self.running = False

        self.soundCtrlr = SoundController()

        self.initVideo()

        self.whereIsEveryoneFlag = False
        self.iSeeSomeoneFlag = False
        self.failure = False
        self.record = False

    def initVideo(self):
        try:
            self.video = VideoStream(src=0)
        except:
            print("video stream not found")
        if(self.video is None):
            print("video stream was not initialized")
            return

        try:
            self.video.start()
        except:
            print("video failed to start")

        # construct the face detector and allow the camera to warm up
        try:
            face = "cascades/haarcascade_frontalface_default.xml"
            self.faceDetector = FaceDetector(face)
            sleep(0.1)
        except:
            print("face detector init failed")

        # choose xvid codec
        try:
            self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
        except:
            print("video writer not found")

        sleep(0.1)

    #called by the thread
    def run(self):
        self._start()

    def toggleRecord(self):
        if(self.record == True):
            self.record = False
            print("video recording stopped")
        else:
            self.record = True
            print("video recording started")

    # start looking
    def _start(self):
        self.running = True

        zeros = None

        previousX = 0
        direction = "NONE"

        self.writer = None
        (h, w) = (None, None)

        # run until the controller is stopped
        while (True):
            # capture frames from the camera
            if(self.running == True):
                frame = self.video.read()
                if(frame is None):
                    print("ERROR: cannot read frame from video, stopping Peekaboo. If you want Peekaboo to work, connect camera and restart R2.py")
                    self.failure = True
                    self.stop()
                    break

                # resize the frame and convert it to grayscale
                frame = self.resizeImage(frame, width=500)

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # detect faces in the image and then clone the frame
                # so that we can draw on it
                faceRects = self.faceDetector.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
                frameClone = frame.copy()

                # where is everyone?
                if len(faceRects) <= 0:
                    cv2.putText(frameClone, "WHERE IS EVERYONE?", (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
                    self.whereIsEveryone()
                else:
                    # peekaboo!
                    # R2 is happy to see someone
                    self.iSeeSomeone()

                # loop over the face bounding boxes and draw them
                for (fX, fY, fW, fH) in faceRects:
                    cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 0, 0), 2)

                    # only turn head if face gets far out of center
                    if ((previousX - 10) < fX < (previousX + 10)):
                        direction = "NONE"
                    elif fX < (previousX + 10):
                        direction = "LEFT"
                    elif fX > (previousX - 10):
                        direction = "RIGHT"

                    # turn R2's head to keep face centered
                    # if direction == "LEFT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight - 10)
                    # elif direction == "RIGHT":
                        # self.mainCtrlr.rightThumbX(self.mainCntlr, self.mainCtrlr.xValueRight + 10)

                    cv2.putText(frameClone, "PEEKABOO!".format(direction), (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.55, (0, 255, 0), 2)

                    if direction != "NONE":
                        cv2.putText(frameClone, "<Turn {}>".format(direction), (fX, fY - 0), cv2.FONT_HERSHEY_SIMPLEX,
                                    0.55, (0, 255, 0), 2)

                    previousX = fX

                # show our detected faces, then clear the frame in preparation for the next frame
                # NOTE: comment this out if you don't want the video stream window to show in terminal
                cv2.imshow("Face", frameClone)

                # write video to file
                if self.record == True:
                    if self.writer is None:
                        # store the image dimensions, initialize the video writer,
                        # and construct the zeros array
                        (h, w) = frameClone.shape[:2]
                        self.writer = cv2.VideoWriter("r2_recording.avi", self.fourcc, 4,
                                                      (w, h), True)
                    output = np.zeros((h, w, 3), dtype="uint8")
                    output[0:h, 0:w] = frameClone
                    self.writer.write(output)

                # NOTE: comment this out if you don't want the video stream window to show in terminal
                # if the 'q' key is pressed, stop the loop
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    print("keypress 'q', stopping Peekaboo")
                    self.stop()
                    break

    def resume(self):
        print("starting PeekabooController")
        if(self.failure == True):
            print("ERROR: the video had failed to load.  If you want Peekaboo to work, you will need to connect the camera and restart R2.py")
            return
        self.running = True

    def stop(self):
        print("stopping PeekabooController")
        self.running = False
        if self.writer is not None:
            self.writer.release()

    def stopVideo(self):
        if(self.video is not None):
            self.video.stop()

        try:
            cv2.destroyAllWindows()
        except:
            print("")

    def whereIsEveryone(self):
        if(self.whereIsEveryoneFlag == False):
            SoundController.worried(self.soundCtrlr)
            self.whereIsEveryoneFlag = True
            self.iSeeSomeoneFlag = False

    def iSeeSomeone(self):
        if(self.iSeeSomeoneFlag == False):
            SoundController.whistle(self.soundCtrlr)
            self.whereIsEveryoneFlag = False
            self.iSeeSomeoneFlag = True

    def resizeImage(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        # initialize the dimensions of the image to be resized and
        # grab the image size
        dim = None
        (h, w) = image.shape[:2]

        # if both the width and height are None, then return the
        # original image
        if width is None and height is None:
            return image

        # check to see if the width is None
        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)

        # otherwise, the height is None
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        # resize the image
        resized = cv2.resize(image, dim, interpolation=inter)

        # return the resized image
        return resized
Ejemplo n.º 25
0
 def __init__(self, master=None, title=None, video=None):
     ''' Initialice window app '''
     super().__init__(master)        
     self.master = master
     # self.master.geometry('422x624+629+231') -> desarrollo ahead.
     self.pack(fill=tk.BOTH, expand=1)
     self.master.title(title)
     self.master['bg'] = 'Black'
     self.master.protocol('WM_DELETE_WINDOW', self.confirmExit)
     self.master.protocol('WM_TAKE_FOCUS', self.confirmOpen)
     self.master.protocol('WM_SAVE_YOURSELF', self.confirmSave)
     self.init = True
     self.setingfile = 'flash_seting.ini'
     # self.window.resizable(width=False, height=False)
     self.n_size = None
     self.photo = None
     self.photos = Photos()
     # self.master.call('wm', 'iconphoto', self.master, self.photos._apply)
     self.soundvar = tk.DoubleVar(value=0.9)
     self.master.wm_iconphoto(True, self.photos._apply)
     self.dirImages = None
     self.dirpathmovies = tk.StringVar()  # directorio path video
     self.video_source = video
     w = 350; h = 230; self.duracion = 100.0
     self.active_scale = False
     self.vid = None
     self.val = 'paused'
     if self.video_source is not None:
         # open video source (by default this will try to open the computer webcam)
         try:
             self.dirpathmovies.set(os.path.dirname(self.video_source))
             self.vid = VideoStream(self.video_source)
             # w_f, h_f = self.vid.w, self.vid.h
             self.duracion = self.vid.duration
             self.soundvar.set(self.vid.player.get_volume())
             frame = None
             while frame is None:
                 self.val, self.pts, frame = self.vid.get_frame()
             size = frame.get_size()
             arr = frame.to_memoryview()[0] # array image
             self.imagen = Image.frombytes("RGB", size, arr.memview)
             w_f, h_f = self.imagen.size
             self.delay = self.vid.f_rate
             self.vid.toggle_pause()
             self._wxh = (w_f, (h_f + 40))
             self._twh = (300, 300)
         except Exception as e:
             print(e)
             self.video_source = None
     else:
         self.imagen = self.photos._logo
         w_f, h_f = self.imagen.size
         self.delay = 16
         self._wxh = (422, 624)
         self._twh = (629, 231)
         pass
     # ajuste ventana.
     str_window = str(self._wxh[0])+ 'x'+str(self._wxh[1])+ '+' + str(self._twh[0])+ '+' + str(self._twh[1])
     self.master.geometry(str_window)
     print('str ventana de inicio:', str_window)
     # event changed de volume
     self.soundvar.trace('w', self.soundvar_adjust)
     # Create a canvas that can fit the above video source size
     self.canvas = tk.Canvas(self, bg='red')
     # contenedor de controles
     self.conten_controls = tk.LabelFrame(self, height=4)
     # Button that lets the user take a snapshot
     self.btn_snapshot=tk.Button(self.conten_controls, text="Snapshot", command=self.snapshot)
     self.btn_snapshot['image'] = self.photos._snapshot 
     self.btn_snapshot.pack(side='left')
     # Button open
     self.btn_open = tk.Button(self.conten_controls, text='...', command=self.open_file)
     self.btn_open['image'] = self.photos._open
     self.btn_open.pack(side='right')
     # button volum
     self.btn_volume = tk.Button(self.conten_controls, text='volume', command=self.open_adjust_volumen)
     self.btn_volume['image'] = self.photos._volume
     self.btn_volume.pack(side='right')
     # Button replay
     self.btn_replay = tk.Button(self.conten_controls, text=">>", command=self.replay)
     self.btn_replay['image'] = self.photos._repeat
     self.btn_replay.pack(side='right')
     # Button play-pausa
     self.btn_toogle_pause = tk.Button(self.conten_controls, text="[]", command=self.toogle_pause)
     self.btn_toogle_pause['image'] = self.photos._play
     self.btn_toogle_pause.pack(side='right')
     # Slade
     self.var_t = tk.DoubleVar()
     self.scale = tk.Scale(self.conten_controls, from_=0.0, to= self.duracion, showvalue=0, orient='horizontal', variable=self.var_t, 
                     resolution=0.3, sliderrelief='flat', command=self.onScale )
     self.scale.pack(side='left', fill='x', expand=1)
     self.scale.bind('<ButtonPress-1>', self.scale_button_press)
     self.scale.bind('<ButtonRelease-1>', self.scale_button_release)
     self.master.bind('<Configure>', self.master_on_resize)
     # self.master.bind_all('<ButtonPress-1>', self.master_button_press)
     # self.master.bind('<ButtonRelease-1>', self.master_button_release)
     # self.master.wm_withdraw()
     self.canvas.pack(side =tk.TOP, fill=tk.BOTH, expand=1)
     self.conten_controls.pack(side='bottom', fill='x')
     
     
     if self.video_source is not None:
         self.scale.configure(to=self.duracion)
         self.btn_toogle_pause['image'] = self.photos._pause
         self.vid.player.set_volume(float(self.soundvar.get()))
         self.canvas_tags = None
         
     self.photo = ImageTk.PhotoImage(self.imagen)
     self.canvas.configure(width = w_f, height=h_f)
     self.canvas.create_image(w_f/2, h_f/2, image = self.photo, 
                                     anchor='center', tags='img')
     self.val = None
     self.pts = None
     #
     
   
     if self.video_source is None:
         self.get_init_status()
     self.update()
     self.master.mainloop()
Ejemplo n.º 26
0
# import the necessary packages
from videostream import VideoStream
import imutils
import numpy as np
import cv2
import time
import math

def nothing(x):
    pass

cap = VideoStream(usePiCamera=False, resolution=(960, 540)).start()
 
# allow the camera to warmup
time.sleep(0.1)

pi = math.pi
cos = math.cos
sin = math.sin

pt1x = 860
pt1y = 30

cv2.namedWindow("River Angle")

cv2.createTrackbar("Degrees", "River Angle", 90, 180, nothing)

while True:
    
    image = cap.read()
    
Ejemplo n.º 27
0
import imagezmq
import argparse
import socket
import time

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument(
    "-s",
    "--server-ip",
    required=True,
    help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(
    connect_to="tcp://{}:5555".format(args["server_ip"]))

# get the host name, initialize the video stream, and allow the
# camera sensor to warmup
rpiName = socket.gethostname()
vs = VideoStream(usePiCamera=True).start()
#vs = VideoStream(usePiCamera=True, resolution=(320, 240)).start()
#vs = VideoStream(src=0).start() # Used for a usb camera
time.sleep(2.0)

while True:
    # read the frame from the camera and send it to the server
    frame = vs.read()
    #frame = imutils.resize(frame, width=320)  between Lines 28 and 29 to resize # Used for resizing frames.
    sender.send_image(rpiName, frame)
Ejemplo n.º 28
0
gallery = None
kairos = Kairos(gallery)

source = 0  # camera

#Load a cascade file for detecting faces
#xmlfile = '../XML/haarcascades/haarcascade_frontalface_alt.xml'
xmlfile = "C:\\Prasad-IoT\\Code\\Python\\CV1-master\\XML\\haarcascades\\haarcascade_frontalface_alt.xml"
if not os.path.isfile(xmlfile):
    print "Could not find cascade training set"
    raise SystemExit(1)
face_cascade = cv2.CascadeClassifier(xmlfile)

fileName = "detected_face.jpg"
stream = VideoStream(src=source)
stream.start()
time.sleep(2.0)
frame = stream.read()
if frame is None:
    print "No camera !"
    raise SystemExit(1)

print "press any key to save file; ESC to quit.."
while (True):
    frame = stream.read()
    frame = my_imutils.resize(frame, 640)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.1, 5)
    if (len(faces) == 0):
        cv2.imshow("Face", frame)
Ejemplo n.º 29
0
                    angle = objectAngle(cx)
                    if self.debug:
                        self.drawStats(cx, cy, box, "wall", "black", None, dist, angle)
    
                    return [dist, angle]
                return [None, None]
            return [None, None]
        return [None, None]


if __name__ == '__main__':

    from videostream import VideoStream

    vision = VideoProcess(debug=True, goal='yellow', ball=True, obstacles=True)
    stream = VideoStream().start()
    time.sleep(2)

    try:
        while True:

            frame = stream.read()

            if frame is None:
                break

            # (obs1, obs2, obs3), goal, ball
            (obs), (ball), (goal), (wall) = vision.update(frame)

            cv2.imshow("scene", vision.get())
            key = cv2.waitKey(1) & 0xFF
Ejemplo n.º 30
0
# -*- coding: utf-8 -*-

'''
### Example.1: 打开USB摄像头 
'''
import cv2
from videostream import VideoStream
from imutils.video import FPS

camera_id = 0

vs = VideoStream(camera_id)

vs.start()
fps = FPS().start()

while True:
    res, frame = vs.read()
    fps.update()
    if not(res):
        print("Camera Read Failed")
        break
    
    cv2.imshow("test", frame)
    waikey = cv2.waitKey(1) & 0xFF
    if waikey == ord('q'):
        break

fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))