コード例 #1
0
    def __init__(
        self,
        hell,
        ip,
        usern,
        passw,
        video_source=0,
    ):
        # Open the video source

        self.vid = cv2.VideoCapture("rtsp://" + usern + ":" + passw + "@" +
                                    ip +
                                    ":554/cam/realmonitor?channel=3&subtype=0")
        # self.vid = cv2.VideoCapture("videos/airport.mp4")
        if not self.vid.isOpened():
            raise ValueError("Unable to open video source", video_source)
        ap = argparse.ArgumentParser()
        ap.add_argument("-p",
                        "--prototxt",
                        required=True,
                        help="path to Caffe 'deploy' prototxt file")
        ap.add_argument("-m",
                        "--model",
                        required=True,
                        help="path to Caffe pre-trained model")
        ap.add_argument("-c",
                        "--confidence",
                        type=float,
                        default=0.2,
                        help="minimum probability to filter weak detections")
        self.args = vars(ap.parse_args())

        # initialize the list of class labels MobileNet SSD was trained to
        # detect, then generate a set of bounding box colors for each class
        self.CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]
        self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))

        # load our serialized model from disk
        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(self.args["prototxt"],
                                            self.args["model"])

        # initialize the video stream, allow the cammera sensor to warmup,
        # and initialize the FPS counter
        print("[INFO] starting video stream...")
        self.vs = VideoStream(src=0).start()
        time.sleep(2.0)
        self.fps = FPS().start()
コード例 #2
0

#Allow for user to put in experiment name
askWindow = tk.Tk()
d = StartDialog(askWindow)
[exptName, condit, syncFile, shutoffTime, trackingToggle, twoCam] = d.result
if len(shutoffTime) == 0:
    shutoffTime = np.inf
else:
    shutoffTime = float(shutoffTime)
askWindow.destroy()

h = 240
w = 320
if twoCam:
    cap = VideoStream(src=2, resolution=(w, h)).start()
    # cap2 = VideoStream(usePiCamera=True).start()
    cap2 = VideoStream(src=0, resolution=(w, h)).start()
    camList = [cap, cap2]
    camTsList = [list(), list()]
    ROIList = [list(), list()]
    ROINameList = [list(), list()]
    ROIStatList = [list(), list()]
    ROIShowCheck = [0, 0]
    camCtrXList = [list(), list()]
    camCtrYList = [list(), list()]
    camVelList = [list(), list()]
    camRotList = [list(), list()]
    anglePrev = [list(), list()]
else:
    cap = VideoStream(src=2, resolution=(w * 2, h * 2)).start()
コード例 #3
0
from ttk import *
import tkinter as tk
from tkinter import *
import cv2
from PIL import Image, ImageTk
import os
import numpy as np
import picamera
from picamera.array import PiRGBArray
import imutils.imutils as imutils
from imutils.imutils.video import VideoStream
import time
import RPi.GPIO as GPIO

cap = VideoStream(src=0).start()
# cap2 = VideoStream(usePiCamera=True).start()
cap2 = VideoStream(src=3).start()
time.sleep(2.0)
startTime = time.time()
lineHolder = list()
global lineHolder2
lineHolder2 = list()
graphCheck = -1
global graphCheck2
graphCheck2 = -1
frac = 0
minTime = 0

inputPin1 = 24
GPIO.setmode(GPIO.BCM)
GPIO.setup(inputPin1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
コード例 #4
0
class MyVideoCapture:
    def __init__(
        self,
        hell,
        ip,
        usern,
        passw,
        video_source=0,
    ):
        # Open the video source

        self.vid = cv2.VideoCapture("rtsp://" + usern + ":" + passw + "@" +
                                    ip +
                                    ":554/cam/realmonitor?channel=3&subtype=0")
        # self.vid = cv2.VideoCapture("videos/airport.mp4")
        if not self.vid.isOpened():
            raise ValueError("Unable to open video source", video_source)
        ap = argparse.ArgumentParser()
        ap.add_argument("-p",
                        "--prototxt",
                        required=True,
                        help="path to Caffe 'deploy' prototxt file")
        ap.add_argument("-m",
                        "--model",
                        required=True,
                        help="path to Caffe pre-trained model")
        ap.add_argument("-c",
                        "--confidence",
                        type=float,
                        default=0.2,
                        help="minimum probability to filter weak detections")
        self.args = vars(ap.parse_args())

        # initialize the list of class labels MobileNet SSD was trained to
        # detect, then generate a set of bounding box colors for each class
        self.CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]
        self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))

        # load our serialized model from disk
        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(self.args["prototxt"],
                                            self.args["model"])

        # initialize the video stream, allow the cammera sensor to warmup,
        # and initialize the FPS counter
        print("[INFO] starting video stream...")
        self.vs = VideoStream(src=0).start()
        time.sleep(2.0)
        self.fps = FPS().start()

        # Get video source width and height
        #  self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
        #  self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # self.width=

    def get_frame(self):
        frame = self.vs.read()

        frame = cv2.resize(frame, (1280, 720))

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        self.net.setInput(blob)
        detections = self.net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > self.args["confidence"]:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(self.CLASSES[idx],
                                             confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              self.COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.COLORS[idx], 2)

        self.fps.update()
        # Return a boolean success flag and the current frame converted to BGR
        return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # Release the video source when the object is destroyed
    def __del__(self):
        if self.vid.isOpened():
            self.vid.release()
コード例 #5
0
ファイル: selfieCam.py プロジェクト: jsschor/selfieCam
import imutils.imutils as imutils
from imutils.imutils.video import VideoStream
import time
import pigpio
import datetime
import tkinter as tk
from tkinter import simpledialog
import numpy as np
from subprocess import call
import os

h = 480
w = 640
dispW, dispH = (1024, 768)
frameRate = 90
cap = VideoStream(usePiCamera=True, resolution=(w, h),
                  framerate=frameRate).start()
cap.camera.vflip = True
windName = "selfie cam"
preVid = 1
vid = 0
cv2.namedWindow(windName)
cv2.moveWindow(windName, dispW // 2 - w // 2, dispH // 2 - h // 2)
startTime = 0
saveName = ''
savePath = ''
time.sleep(.5)
whiteRedHold, whiteBlueHold = cap.camera.awb_gains
cap.camera.awb_mode = 'off'
cap.camera.awb_gains = (whiteRedHold, whiteBlueHold)
stop = False
charStart = 5