コード例 #1
0
ファイル: Camera.py プロジェクト: RdeLange/home_surveillance
 def __init__(self, camName, camURL, cameraFunction, dlibDetection,
              fpsTweak):
     logger.info("Loading Stream From IP Camera: " + camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.fpsTweak = fpsTweak  # used to know if we should apply the FPS work around when you have many cameras
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     self.video = cv2.VideoCapture(
         camURL
     )  # VideoCapture object used to capture frames from IP camera
     logger.info("We are opening the video feed.")
     self.url = camURL
     self.camName = camName
     if not self.video.isOpened():
         #   raise Exception("could not open camera or channelinput " + camurl)
         self.video.open()
     logger.info("Video feed open.")
     self.dump_video_info()  # logging every specs of the video feed
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread ' +
                                           camURL,
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.stop = False
     self.captureThread.start()
     #RdL Load Params from HSConfig.cfg
     hsconfigparser = SafeConfigParser()
     hsconfigparser.read('HSConfig.cfg')
     self.param_cameramode = hsconfigparser.get('MACHINERY', 'cameramode')
     logger.info('Video Feed opened in mode: ' + self.param_cameramode)
     print('Video Feed opened in mode: ' + self.param_cameramode)
コード例 #2
0
 def __init__(self, camURL, cameraFunction, dlibDetection, fpsTweak):
     logger.info("Loading Stream From IP Camera: " + camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.fpsTweak = fpsTweak  # used to know if we should apply the FPS work around when you have many cameras
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     uri = camURL
     latency = 100
     width = 1280
     height = 720  #738
     framerate = 25
     #gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, framerate={}/1, format=(string)BGRx ! videoconvert ! appsink").format(uri, latency, width, height, framerate)
     #gst_str = ("rtspsrc location={} latency={} ! queue ! rtph264depay ! queue ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw,format=BGRx ! videoconvert ! video/x-raw,format=BGR ! appsink").format(uri, latency)
     gst_str = "rtspsrc location={} ! application/x-rtp, media=video ! rtph264depay ! h264parse ! nvv4l2decoder ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink".format(
         uri)
     self.video = cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
     #self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
     logger.info("We are opening the video feed.")
     self.url = camURL
     logger.info("Video feed open.")
     self.dump_video_info()  # logging every specs of the video feed
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread',
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.start()
     self.captureThread.stop = False
コード例 #3
0
	def __init__(self, camURL, cameraFunction="detect_recognise_track", dlibDetection=True, fpsTweak=False):
		logger.info("Loading Stream From IP Camera: " + camURL)
		self.motionDetector = MotionDetector.MotionDetector()
		self.faceDetector = FaceDetector.FaceDetector()
		self.processing_frame = None
		self.tempFrame = None
		self.captureFrame  = None
		self.streamingFPS = 0 # Streaming frame rate per second
		self.processingFPS = 0
		self.FPSstart = time.time()
		self.FPScount = 0
		self.motion = False # Used for alerts and transistion between system states i.e from motion detection to face detection
		self.people = {} # Holds person ID and corresponding person object 
		self.trackers = [] # Holds all alive trackers
		self.cameraFunction = cameraFunction 
		self.dlibDetection = dlibDetection # Used to choose detection method for camera (dlib - True vs opencv - False)
		self.fpsTweak = fpsTweak # used to know if we should apply the FPS work around when you have many cameras
		self.rgbFrame = None
		self.faceBoxes = None
		self.captureEvent = threading.Event()
		self.captureEvent.set()	
		self.peopleDictLock = threading.Lock() # Used to block concurrent access to people dictionary
		
		if camURL == 'w':
			self.video = cv2.VideoCapture(0)
			camURL = 0		
		else:
			self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
		
		self.video.set(3, 640)
		self.video.set(4, 480)
		self.url = camURL
		logger.info("We are opening the video feed.")
		logger.info("Video feed open.")
		# Start a thread to continuously capture frames.
		# The capture thread ensures the frames being processed are up to date and are not old
		self.captureLock = threading.Lock() # Sometimes used to prevent concurrent access
		self.captureThread = threading.Thread(name='video_captureThread',target=self.get_frame)
		self.captureThread.daemon = True
		self.captureThread.start()
		self.captureThread.stop = False
コード例 #4
0
 def __init__(self, camURL, cameraFunction, dlibDetection):
     print("Loading Stream From IP Camera ", camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     self.video = cv2.VideoCapture(
         camURL
     )  # VideoCapture object used to capture frames from IP camera
     self.url = camURL
     if not self.video.isOpened():
         self.video.open()
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread',
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.start()
コード例 #5
0
def detectMotion():
    print("Ready to detect motion...")
    while True:
        sharedDictionary["Motion Detected"] = MotionDetector.detectMotion()
コード例 #6
0
 def motiondetector(self):
     MotionDetector.MotionDetector(self).start()
コード例 #7
0
ファイル: TestLabelVideo.py プロジェクト: CountChu/LabelItems
def main():

    #
    # Parse arguments.
    #

    cfg = {'h': False, 'd': False, 'f': '', 'a': 'a1', 'o': '', 't': False}

    try:
        (opts, args) = getopt.getopt(
            sys.argv[1:], 'hdf:a:o:t',
            ['help', 'debug', 'file', 'algorithm', 'output', 'transform'])
    except getopt.GetoptError as err:
        print(str(err))
        help()
        sys.exit(0)

    print(opts)

    for o, a in opts:
        if o in ('-h', '--help'):
            cfg['h'] = True
        elif o in ('-d', '--debug'):
            cfg['d'] = True
        elif o in ('-f', '--file'):
            cfg['f'] = a
        elif o in ('-a', '--algorithm'):
            cfg['a'] = a
        elif o in ('-o', '--output'):
            cfg['o'] = a
        elif o in ('-t', '--transform'):
            cfg['t'] = True
        else:
            help()
            sys.exit(0)

    if cfg['h']:
        help()
        sys.exit(0)

    #
    # Build a LabelImage object.
    #

    if cfg['a'] == 'a1':
        labelImage = LabelImageCv2.LabelImage(True)
    elif cfg['a'] == 'a2':
        labelImage = LabelImageSki.LabelImage(True)
    else:
        help()
        sys.exit(0)

    #
    # Open file or camera.
    #

    if cfg['f'] != '':
        cam = cv2.VideoCapture(cfg['f'])
    else:
        cam = cv2.VideoCapture(0)

    if cfg['o'] != '':
        outputFn = cfg['o']
        (grabbed, frame) = cam.read()
        (fheight, fwidth, _) = frame.shape
        print(fwidth, fheight)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(outputFn, fourcc, 20.0, (fwidth, fheight))

    #fn = 'IMG_4912.m4v'
    #cam = cv2.VideoCapture(fn)

    winName = "Movement Indicator"
    cv2.namedWindow(winName)

    md = MotionDetector.MotionDetector(cam)
    md.readFirstFrame()

    saticCount = 0
    lastStatic = False
    isStatic = False
    labelTrigger = False

    if cfg['t']:
        maxApprox = None

    seq = 0
    writeSeq = 0
    while True:

        labeledImage = None
        transformedImage = None
        displayedFrame = None
        whiteFrame = None
        #time.sleep(0.01)

        if cfg['t']:

            if maxApprox is None:
                labelImage.processImages = []
                maxApprox = labelImage.getMaxApprox(md.frame)

            if maxApprox is not None:
                md.frame = labelImage.transform(md.frame, maxApprox, 640, 480)

        if cfg['o']:
            out.write(md.frame)
            print('Write frame ', writeSeq)
            writeSeq += 1

        lastStatic = isStatic

        if not md.isMotion():

            saticCount += 1

            if saticCount >= 10:
                saticCount = 0
                isStatic = True

        else:
            isStatic = False

        labelTrigger = False
        if not lastStatic and isStatic:
            labelTrigger = True

        title = "lastStatic = %d, isStatic = %d, labelTrigger = %d" % (
            lastStatic, isStatic, labelTrigger)

        labelImage.processImages = []
        if labelTrigger:

            labelImage.handle(md.frame, False)
            displayedFrame = labelImage.finalImage
            whiteFrame = labelImage.whiteImage

        if not isStatic:
            displayedFrame = md.frame
            whiteFrame = md.frame.copy()
            whiteFrame.fill(255)

        if cfg['t']:
            if transformedImage is not None:
                cv2.imshow("Transform", transformedImage)

        if labeledImage is not None:
            cv2.imshow("2 Before Original", labeledImage)

        if displayedFrame is not None:

            cv2.putText(displayedFrame, title,
                        (10, displayedFrame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            cv2.imshow("Final", displayedFrame)
            '''
            if cfg['o']:
                out.write(displayedFrame) 
                print('Write frame ', writeSeq)
                writeSeq += 1
            '''

        if whiteFrame is not None:
            cv2.imshow("White", whiteFrame)

        #cv2.imshow("Original", md.frame)
        #cv2.imshow("Black", labelImage.blackImage)
        #cv2.imshow(winName, md.diffImage)

        #
        # Read next frame
        #

        md.readNextFrame()
        seq += 1

        key = cv2.waitKey(
            1)  # wait 50 milliseconds before each frame is written.
        if key == 27:  # ESC
            cv2.destroyWindow(winName)
            break

    if cfg['o']:
        out.release()

    print('seq = ', seq)
    print('writeSeq = ', writeSeq)
    print("Goodbye")