예제 #1
0
    def __init__(self, points, previousTime, currentTime, carPark):

        self.points = points
        self.previousTime = previousTime
        self.currentTime = currentTime
        self.carPark = carPark
        self.detector = MotionDetector(self.points, 1, self.carPark)
예제 #2
0
    def __init__(self):
        # init node
        rospy.init_node('commander_2')
        rospy.Subscriber('amcl_pose', PoseWithCovarianceStamped, self.new_pose_callback)
        rospy.Subscriber('camera/rgb/image_raw', Image, self.new_image_callback)

        rate = rospy.Rate(1)
        motion_detector = MotionDetector()

        while not rospy.is_shutdown():
            self.state_counter += 1
            self.should_detect_motion = self.state_counter > 3

            if self.should_detect_motion:
                # motion detection
                if self.last_image is not None and self.new_image is not None:
                    rospy.loginfo("Detecting motion")
                    motion_detector.show_diff(self.last_image, self.new_image)
            else:
                # navigate
                if self.last_pose is not None:
                    rospy.loginfo("Navigating")
                    self.navigate_commander()
                    self.last_image=None
                    self.new_image=None
                else:
                    rospy.logwarn("Current pose is None")
                    rospy.loginfo("Recovering")
                    self.recover_commander()
                    self.last_image=None
                    self.new_image=None

            self.state_counter %= 10
            rate.sleep()
예제 #3
0
def main():
    logging.basicConfig(level=logging.INFO)

    cap = cv2.VideoCapture("./videos/sample1.mp4")

    ret, frame = cap.read()
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cv2.imwrite('test111.jpg', frame)


    start_frame = "4000"
    image_file = "./images/parking_lot_1.png"
    test_image = "test111.jpg"
    data_file = "./data/coordinates_1.yml"
    video_file = "./videos/sample1.mp4"
    #video_url = "parking_lot_1.mp4"
    video_url = "C:/Users/YHJ/PycharmProjects/parkingLot/DetectParking-develop/ParkingLot-master/parking_lot/videos/sample1.mp4"



    if image_file is not None:
        with open(data_file, "w+") as points:

            generator = CoordinatesGenerator(test_image, points, COLOR_RED)
            generator.generate()

    with open(data_file, "r") as data:
        points = yaml.load(data , Loader=yaml.FullLoader)
        print(points)
        detector = MotionDetector(video_file, points, int(start_frame))
        detector.detect_motion()
        print("sdfs")
예제 #4
0
    def __init__(self, points, previousTime, currentTime, waterLevelSlots):

        self.points = points
        self.previousTime = previousTime
        self.currentTime = currentTime
        self.waterLevelSlots = waterLevelSlots
        self.detector = MotionDetector(self.points, 1, self.waterLevelSlots)
예제 #5
0
def main():
    logging.basicConfig(level=logging.INFO)

    # args = parse_args()

    image_file = "images/parking_lot_1.png"  #args.image_file
    data_file = "data/coordinates_1.yml"  #args.data_file
    start_frame = 0  #args.start_frame

    if image_file is not None:
        with open(data_file, "w+") as points:
            generator = CoordinatesGenerator(image_file, points, COLOR_RED)
            generator.generate()
        storeCor = open("data\\pastCordinate.pickle", 'wb')

        # source, destination
        pickle.dump(generator.saveCordinate, storeCor)
        storeCor.close()
        print(generator.saveCordinate)
        logging.info(generator.saveCordinate)

    with open(data_file, "r") as data:
        points = yaml.load(data)
        # points = load(data, Loader=yaml.Loader)
        # detector = MotionDetector(args.video_file, points, int(start_frame))
        detector = MotionDetector("videos/parking_lot_1.mp4", points,
                                  int(start_frame))
        #pass the spot no which you want to spot
        # in image the spotno are 1 index
        spotNo = 0
        detector.detect_motion(spotNo)
예제 #6
0
def gen(name):
    data_file = 'data/' + name + '.yml'
    start_frame = '1'
    video_file = 'videos/' + name + '.mp4'

    with open(data_file, "r") as data:
        points = yaml.load(data, Loader=yaml.FullLoader)
        detector = MotionDetector(video_file, points, int(start_frame))
        detector.detect_motion()
예제 #7
0
def main():

    print('main executed')
    logging.basicConfig(level=logging.INFO)

    image_file = "images/parking_lot_1.png"
    data_file = "data/coordinates_1.yml"
    start_frame = 400
    video_file = "videos/parking_lot_1.mp4"

    f = open("config.txt", "r")
    arr = {}
    for _ in range(0, 2):
        result = f.readline()
        arr[result.split(' : ')[0]] = result.split(' : ')[-1].split('\n')[0]
    print(arr)
    f.close()
    url = arr['img_url']

    img_resp_pic = requests.get(url)
    img_arr_pic = np.array(bytearray(img_resp_pic.content), dtype=np.uint8)
    img1_pic = cv2.imdecode(img_arr_pic, -1)
    filename = 'savedImage.jpg'
    cv2.imwrite(filename, img1_pic)

    #cv2.imshow("AndroidCamPic",img1_pic)
    #cv2.waitKey(0)==27

    #cv2.destroyWindow("AndroidCamPic")
    '''while True:
        img_resp= requests.get(url)
        img_arr= np.array(bytearray(img_resp.content),dtype=np.uint8)
        img = cv2.imdecode(img_arr, -1)
        
        cv2.imshow("AndroidCam",img)
        
        if cv2.waitKey(1)==27:
             break
    cv2.destroyWindow("AndroidCamPic")'''
    if filename is not None:
        with open(data_file, "w+") as points:
            generator = CoordinatesGenerator(filename, points, COLOR_RED)
            generator.generate()

    with open(data_file, "r") as data:
        '''try:
            thread.start_new_thread( MotionDetector, ( ) )
            thread.start_new_thread( alpr, ( ) )
        except:
            print("Error: unable to start thread")'''
        points = yaml.load(data)
        detector = MotionDetector(video_file, points, int(start_frame))
        detector.detect_motion()
예제 #8
0
def main():
    
    logging.basicConfig(level=logging.INFO)

    args = parse_args()

    image_file = args.image_file
    data_file = args.data_file
    start_frame = args.start_frame
    
    if image_file is not None:
            with open(data_file, "w+") as points:
                generator = CoordinatesGenerator(image_file, points, COLOR_RED)
                generator.generate()
    
    while(1>0) :
        
        capture_duration = 10
        
        cap = cv2.VideoCapture(1)
        
        start_time = time.time()

        if(cap.isOpened()== False):
            print("error")
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter('videos/output.mp4', -1, 20.0, (640,480))
        
        while(int(time.time() - start_time) < capture_duration):
            ret, frame = cap.read()
            if ret == True:
                out.write(frame)
                cv2.imshow('Frame',frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
        cap.release()
        out.release()
        cv2.destroyAllWindows()
        
          
        

        with open(data_file, "r") as data:
            points = yaml.load(data,Loader=yaml.FullLoader)
            
            #CALL MotionDetector from motion_detector.py
            detector = MotionDetector(args.video_file, points, int(start_frame))
            detector.detect_motion()
예제 #9
0
def detect_motion(frameCount):
    global vs, outputFrame, lock, motion_detected

    # initialize the motion detector tot frames
    md = MotionDetector(accumWeight=0.1)
    total = 0

    while True:
        # read next frame from stream, resize, to grayscale, blur
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # timestamp on image
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # if tot frames reached enough to construct background model, continue

        if total > frameCount:

            # detect
            motion = md.detect(gray)

            # check if motion was found
            if motion is not None:
                # draw bounding box
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (0, 0, 255), 2)

                if motion_detected is False:
                    res = telegram_bot.telegram_bot_sendtext("motion detected: " + timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"))
                    print(res)
                    motion_detected = True


        # update background model and increment total frames
        md.update(gray)
        total += 1

        # acquire the lock, set output frame, release
        with lock:
            outputFrame = frame.copy()
예제 #10
0
def main():
    logging.basicConfig(level=logging.INFO)

    image_file = "images/parking_lot_1.png"
    data_file = "data/coordinates_1.yml"
    start_frame = 400
    video_file = "videos/parking_lot_1.mp4"

    if image_file is not None:
        with open(data_file, "w+") as points:
            generator = CoordinatesGenerator(image_file, points, COLOR_RED)
            generator.generate()

    with open(data_file, "r") as data:
        points = yaml.load(data)
        detector = MotionDetector(video_file, points, int(start_frame))
        detector.detect_motion()
예제 #11
0
def main():
    logging.basicConfig(level=logging.INFO)

    args = parse_args()

    image_file = args.image_file
    data_file = args.data_file
    start_frame = args.start_frame

    if image_file is not None:
        with open(data_file, "w+") as points:
            generator = CoordinatesGenerator(image_file, points, COLOR_RED)
            generator.generate()

    with open(data_file, "r") as data:
        points = yaml.safe_load(data)
        detector = MotionDetector(args.video_file, points, int(start_frame))
        detector.detect_motion()
예제 #12
0
def main():
    logging.basicConfig(level=logging.INFO)

    args = parse_args()

    image_file = args.image_file
    data_file = args.data_file
    start_frame = args.start_frame

    # If image file is provided, prompt user to generate rectangles
    if image_file is not None:
        with open(data_file, "w+") as points:
            generator = CoordinatesGenerator(image_file, points, COLOR_RED)
            generator.generate()

    # Read coordinates_1.yml in...
    with open(data_file, "r") as data:
        points = yaml.load(data)
        detector = MotionDetector(args.video_file, points, int(start_frame))
        detector.detect_motion()
예제 #13
0
 def create(videoSrc, indexFrame=30):
     capture = cv2.VideoCapture(videoSrc)
     # we can't take the first frame of the capture
     # because the video peripheral may be need some initialization to produce acceptable frame
     for _ in range(indexFrame):
         isOpen, firstImg = capture.read()
         if not isOpen:
             raise IOError("video stream '" + str(videoSrc) +
                           "' is not readable")
     motionDetector = MotionDetector.create(firstImg)
     return VideoFeed(capture, motionDetector)
예제 #14
0
def main():
    global LM, RM, led
    m = MotionDetector()
    t = time.time()
    while (True):
        #Read the tilt and orientation from the other program
        m.update_parameters()
        x, y, z, orientation, acceleration = m.get_parameters()

        #Change the orientation and acceleration to motor speeds
        LM, RM = accelerationToMotors(acceleration, LM, RM)
        LM, RM = orientationToMotors(orientation, LM, RM)

        for event in sense.stick.get_events():
            if event.direction == 'middle' and event.action == 'pressed':
                LM, RM = STOP_L, STOP_R

        led.set_left_motor(LM)
        led.set_right_motor(RM)
        led.show()

        if time.time() - t > 1:
            sendSignal(LM, RM)
            t = time.time()
예제 #15
0
def main():
    logging.basicConfig(level=logging.INFO)

    args = parse_args()

    image_file = args.image_file
    data_generate_file = args.data_generate_file
    data_file = args.data_file
    video_file = args.video_file
    start_frame = args.start_frame

    # generate coordinate based data map.
    if image_file is not None and args.data_generate_file:
        with open(data_generate_file, "w+") as points:
            generator = CoordinatesGenerator(image_file, points, COLOR_RED)
            generator.generate()

    # read from coordinate data map
    if data_file is not None and video_file is not None:
        with open(data_file, "r") as data:
            #points = yaml.load(data)
            points = yaml.safe_load(data)
            detector = MotionDetector(video_file, points, int(start_frame))
            detector.detect_motion()
예제 #16
0
    def __init__(self):
        #observer = Observable()
        #motion_detector = MotionObserver('Motion Detector')
        #observer.register(motion_detector)
        self.camera = cv2.VideoCapture(0)
        time.sleep(0.5)
        self.motion_detector = MotionDetector()
        self.face_detector = FaceDetector()
        self.face_recognizer = FaceRecognizer()
        self.detectingstate = DetectingMotion(self)
        self.scanningstate = Scanning(self)

        self.facestate = FacialRecognition(self)
        self.greetingstate = GreetRoommate(self)
        self.waitingstate = WaitingForTask(self)
        self.servingstate = Serving(self)
        self.state = self.detectingstate
예제 #17
0
class Controller:
    def __init__(self, points, previousTime, currentTime, waterLevelSlots):

        self.points = points
        self.previousTime = previousTime
        self.currentTime = currentTime
        self.waterLevelSlots = waterLevelSlots
        self.detector = MotionDetector(self.points, 1, self.waterLevelSlots)

    def runController(self):

        workingDirectory = os.getcwd() + '\\captured_videos'
        hasIncomingVideoCaptureChanged = False
        try:

            videoCaptureDirectory = max([
                os.path.join(workingDirectory, d)
                for d in os.listdir(workingDirectory)
            ],
                                        key=os.path.getmtime)

            self.currentTime = time.ctime(
                os.path.getmtime(videoCaptureDirectory))

        except FileNotFoundError:
            pass

        if self.currentTime == self.previousTime:
            hasIncomingVideoCaptureChanged = False
            return None, None

        hasIncomingVideoCaptureChanged = True
        self.previousTime = self.currentTime

        videoFilePath = videoCaptureDirectory + '//output.mp4'

        return self.detector.detect_motion(
            videoFilePath,
            hasIncomingVideoCaptureChanged), self.waterLevelSlots
예제 #18
0
파일: main.py 프로젝트: timmygee/ispypi
#!/usr/bin/env python3

from motion_detector import MotionDetector
from still_camera import StillCamera
from gifbox_uploader import GifBoxUploader

IMAGE_FILE_PATH = 'capture.jpg'

detector = MotionDetector()
camera = StillCamera(default_file_path=IMAGE_FILE_PATH,
                     resolution=(640, 480),
                     rotation=180)
uploader = GifBoxUploader()

print('Checking for motion...')

while True:
    if detector.motion_detected():
        # Take a high res photo and save it
        camera.shoot()
        # Upload the file. You could do anything you want here with the
        # image file. I upload it to my custom online image repository
        uploader.upload(IMAGE_FILE_PATH)
예제 #19
0
		   formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(
                    prog, max_help_position=80, width=150))
		

	ap.add_argument("-v", "--video", default=None, help="path to the video file. leave empty for live feed")
	ap.add_argument("-x", "--capture-center-x", type=int, default=None, help="x coordinate - center of capture square")
	ap.add_argument("-y", "--capture-center-y", type=int, default=None, help="y coordinate - center of capture square")
	ap.add_argument("--triggered-area-percent", type=float, default=DEFAULT_TRIGGERED_AREA_PERCENT, help="minimum percentage of captured square to trigger motion detection")
	ap.add_argument("--capture-square-side", type=int, default=DEFAULT_CAPTURE_RECT_SIDE, help="side length of the capture square (area will be side*side)")
	ap.add_argument("--frames-to-trigger", type=int, default=DEFAULT_FRAMES_TO_TRIGGER, help="Number of frames motion is detected in before camera capture is triggered")
	ap.add_argument("--retrigger-interval", type=int, default=DEFFAULT_RETRIGGER_INTERVAL_SEC, help="Seconds to trigger another capture if detection is continous")
	ap.add_argument("--capture-target", type=int, default=DEFAULT_CAPTURE_TARGET, help="Location of photos saved on camera. 0=internal memory (faster), 1=SD Card")
	ap.add_argument("--frame-resize", type=int, default=DEFAULT_FRAME_RESIZE, help="resize live feed camera. None is not to resize")
	ap.add_argument("--download-photo-folder", type=str, default=DEFAULT_DOWNLOAD_PHOTO_FOLDER, help="Location of downloaded photos from camera")
	ap.add_argument("--autofocus-before-trigger",default=DEFAULT_AUTOFOCUS_BEFORE_TRIGGER, action="store_false", help="trigger camera's autofocus before capturign an image")
	ap.add_argument("--ui-port",type=int, default=DEFAULT_UI_PORT, help="UI web server listening port")
	

	args = vars(ap.parse_args())
	print (args)
	port = args.pop('ui_port')
	# loop over the frames of the video
	with MotionDetector(**args) as md:
		flask_app.md = md
		thread = threading.Thread(target = md.stream, args = (), daemon=True)
		thread.start()
		print ("running flask")
		ip = get_outbound_ip()
		flask_app.run(host=ip, port=port, debug=False, threaded=True, use_reloader=False)
		
예제 #20
0
#!/usr/bin/python3

import time
import RPi.GPIO as GPIO
import threading
from config import Configuration
from motion_detector import MotionDetector
from event_thread import EventThread

CONFIG_DATA = Configuration().get_config_data()
LOCK = threading.Lock()
MOTION_DETECTOR = MotionDetector(CONFIG_DATA, LOCK)


def execute_event_thread(channel):
    del channel
    _event_thread = EventThread(MOTION_DETECTOR.motion_detection_event)


GPIO.setmode(GPIO.BOARD)
GPIO_PIR = CONFIG_DATA['motion_input_pin']
GPIO.setup(GPIO_PIR, GPIO.IN)
GPIO.add_event_detect( \
    GPIO_PIR, \
    GPIO.BOTH, \
    callback=execute_event_thread, \
    bouncetime=250 \
    )

try:
    while True:
예제 #21
0
from motion_detector import MotionDetector
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
import threading
import time
import traceback

# SimpleWebSocketServer: https://github.com/dpallot/simple-websocket-server
# based on https://www.smashingmagazine.com/2016/02/simple-augmented-reality-with-opencv-a-three-js/#3-websockets-in-both-front-end-and-back-end

client = None
server = None
motion_detector = MotionDetector(None)


class MotionReporterSocket(WebSocket):
    def handleConnected(self):
        motion_detector.set_socket(self)
        global client
        client = self
        print('client connected')

    def handleClose(self):
        motion_detector.clear_socket()
        global client
        client = None
        print('client closed')


def run_server():
    global server
    # empirically determined by measuring the webcam's output
예제 #22
0
def main():
    import sys

    if path.exists('data/coordinates_1.yml'):
        os.remove('data/coordinates_1.yml')

    points = None
    carPark = None

    try:
        fn = sys.argv[1]
    except:
        fn = 1
    cap = video.create_capture(fn)

    def fetchShortIntervalVideos(ctrl, motion_detector, coordinates_data,
                                 times, statuses, lock):
        with lock:
            videoFilePath, hasIncomingVideoCaptureChanged = ctrl.getVideoFilePath(
            )
            return videoFilePath, hasIncomingVideoCaptureChanged, motion_detector, coordinates_data, times, statuses

    threadn = cv.getNumberOfCPUs()
    pending = deque()
    lock = Lock()
    pool = ThreadPool(processes=threadn,
                      initializer=init_child,
                      initargs=(lock, ))

    threaded_mode = True
    ctrl = None
    motionDetector = None

    lastsave = 0
    coordinates_data = None
    times = None
    statuses = None

    pointsCaptured = False
    while True:
        with lock:
            while len(pending) > 1 and pending[0].ready() and pending[1].ready(
            ):
                payload = pending.popleft().get()
                if len(payload) == 6:
                    videoFilePath, hasIncomingVideoCaptureChanged, motion_detector, coordinates_data, times, statuses = payload
                    if videoFilePath == None and hasIncomingVideoCaptureChanged == None:
                        break
                    else:

                        if hasIncomingVideoCaptureChanged == True:
                            capture = cv.VideoCapture(videoFilePath)
                            while capture.isOpened():
                                result, frame = capture.read()
                                if not result:
                                    capture.release()
                                    continue
                                else:
                                    res, evaluated_carPark = motion_detector.process_algo_per_frame(
                                        frame, capture, coordinates_data,
                                        times, statuses)
                                    draw_str(
                                        res, (5, 20),
                                        CarParkData.TOTAL_NUMBER_OF_SLOTS +
                                        str(evaluated_carPark.
                                            get_total_car_park_slots()))
                                    draw_str(
                                        res, (5, 40), CarParkData.
                                        NUMBER_OF_CARPARK_SLOTS_AVAILABLE +
                                        str(evaluated_carPark.
                                            get_available_carpark_slots()))

                                    if carPark.is_carpark_full():
                                        draw_str_red(
                                            res, (5, 60),
                                            CarParkData.CARPARK_FULL_MESSAGE)
                                    else:
                                        draw_str_green(
                                            res, (5, 60), CarParkData.
                                            CARPARK_AVAILABLE_MESSAGE)

                                    #latest_modified_date_time = datetime.now()
                                    #draw_str(res, (440,20), latest_modified_date_time.strftime('%d-%m-%Y %H:%M:%S '))

                                    json = '['
                                    for carpark_slot in evaluated_carPark.get_carpark_slots(
                                    ):
                                        json = json + carpark_slot.toJSON(
                                        ) + ','

                                    json = json[:-1]
                                    json = json + ']'
                                    json = json.replace('\n', '')
                                    json = json.replace('\t', '')
                                    json = json.replace('\r', '')
                                    json = json.replace("\'", '')
                                    json = json.replace('    ', '')
                                    #retval, buffer = cv.imencode('.jpg', res)
                                    #jpg_as_text = base64.b64encode(buffer)

                                    if time.time() - lastsave > 1:
                                        lastsave = time.time()
                                        persistSmartParkRealTime(
                                            None, None,
                                            evaluated_carPark.
                                            get_total_car_park_slots(),
                                            evaluated_carPark.
                                            get_available_carpark_slots(),
                                            json, None, None)

                                    cv.namedWindow('smart-parking',
                                                   cv.WINDOW_NORMAL)
                                    cv.setWindowProperty('smart-parking', 0, 1)
                                    cv.imshow('smart-parking', res)

        if len(pending) < threadn:

            if not pointsCaptured:
                _ret, frame = cap.read()
                points = getPoints(frame, points)
                carPark = CarParkData('SmartCarPark', len(points))
                ctrl = Controller(points, None, None)
                motionDetector = MotionDetector(points, 1, carPark)
                coordinates_data, times, statuses = motionDetector.detect_motion_activity(
                )
                pointsCaptured = True

            if threaded_mode:
                task_put_videos = pool.apply_async(captureShortIntervalVideos,
                                                   (cap, lock))
                task_get_videos = pool.apply_async(
                    fetchShortIntervalVideos,
                    (ctrl, motionDetector, coordinates_data, times, statuses,
                     lock))

            pending.append(task_put_videos)
            pending.append(task_get_videos)

        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
    cap.release()
예제 #23
0
from motion_detector import MotionDetector

md = MotionDetector()
md.main()
예제 #24
0
def main():
    import sys
    
    if path.exists('data/coordinates_1.yml'):
        os.remove('data/coordinates_1.yml')
        
    points = None
    waterLevelSlots = None
    
    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    def fetchShortIntervalVideos(ctrl, motion_detector, lock):
        with lock:
             videoFilePath, hasIncomingVideoCaptureChanged = ctrl.getVideoFilePath()
             return videoFilePath, hasIncomingVideoCaptureChanged, motion_detector
    
    threadn = cv.getNumberOfCPUs()
    pending = deque()
    lock = Lock()
    pool = ThreadPool(processes = threadn, initializer = init_child, initargs=(lock,))
    
    threaded_mode = True
    ctrl = None
    motionDetector = None
    
    screenWidth = int(round(GetSystemMetrics(0) / 2))
    screenHeight = int(round(GetSystemMetrics(1) / 2))
    
    
    coordinates_data = None
    times = None 
    statuses = None
    
    pointsCaptured = False
    while True:
        with lock:
            while len(pending) > 1 and pending[0].ready() and pending[1].ready():
                payload = pending.popleft().get()
                if len(payload) == 3:
                    videoFilePath, hasIncomingVideoCaptureChanged, motion_detector = payload
                    if videoFilePath == None and hasIncomingVideoCaptureChanged == None:
                        break
                    else:
                        capture, coordinates_data, times, statuses = motion_detector.detect_motion_activity(videoFilePath, hasIncomingVideoCaptureChanged)
                        while capture.isOpened():
                            result, frame = capture.read()
                            if not result:
                                capture.release()
                                continue
                            else:
                                res, evaluated_areas  = motion_detector.process_algo_per_frame(frame, capture, coordinates_data, times, statuses)
                                
                                #draw_str(res, (5, 20), WaterLevelSlots.LEVEL_REACHED_MSG
                                        # + str(evaluated_waterLevelSlots.get_current_water_level()))
                                
                                #cv.namedWindow('flood-detection', cv.WINDOW_NORMAL)
                                #cv.setWindowProperty('flood-detection', 0, 1)
                                #cv.imshow('flood-detection', res)
                                
                                cv.namedWindow('OOW 2020 SMART CITY USE CASE - "SMART PEDESTRIAN CROSSING"', cv.WINDOW_NORMAL)
                                #cv.setWindowProperty('smart-parking', 0, 1)
                                #print(screenWidth)
                                #print(screenHeight)
                                resize = ResizeWithAspectRatio(res, width=screenWidth, height=screenHeight) 
                                cv.imshow('OOW 2020 SMART CITY USE CASE - "SMART PEDESTRIAN CROSSING"', resize)
                         
        if len(pending) < threadn:
            
            if not pointsCaptured:
                _ret, frame = cap.read()
                points = getPoints(frame, points)
                area = Area()
                ctrl = Controller(points, None, None)
                motionDetector = MotionDetector(points, 1, area)
                pointsCaptured = True
           
            if threaded_mode:
                task_put_videos = pool.apply_async(captureShortIntervalVideos, (cap, lock))
                task_get_videos = pool.apply_async(fetchShortIntervalVideos,(ctrl, motionDetector, lock))
                
            
            pending.append(task_put_videos)
            pending.append(task_get_videos)
        
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
    cap.release()
예제 #25
0
            web_server.frame_available(mjpg_frame)
            #new_frame_queue.put(frame.copy())
        except Exception:
            logging.exception("Unknown failure processing frame.")

    # Clean up
    video.release()
    #pool.close()
    #pool.join()


if __name__ == '__main__':
    object_detector = ObjectDetector(PATH_TO_CKPT, PATH_TO_LABELS, USE_CORAL,
                                     USE_TPU, 1296, 730, MIN_CONF_THRESHOLD)
    web_server = WebServer(WEB_PORT)

    #manager = Manager()
    #new_frame_queue = manager.Queue()
    motion_detector = MotionDetector(0, 1296, 0, 730, 1296, 730)
    web_server.start()

    while (True):
        try:
            start_video()
        except Exception:
            logging.exception("Failure processing video.")
        time.sleep(120)

    #pool = Pool(1)
    #pool.apply_async(motion_detector.start_processing, args=(new_frame_queue, ), callback=motion_detector_started)
from gesture_detector import GestureDetector
from motion_detector import MotionDetector
import RPi.GPIO as GPIO
import numpy as np
import imutils
import time
import cv2

#deklarasi kamera
camera = cv2.VideoCapture(0)
#deklarasi ROI (Region of Interest)
(top, right, bot, left) = np.int32(("100, 300, 375, 580").split(","))
#deklarasi class yang sudah dibuat
gd = GestureDetector()
md = MotionDetector()
#deklarasi pin yang dipakai
fan_pin = 12
led_pin = [16, 18, 22]
buzzer_pin = 32
#setup GPIO sesuai PIN
GPIO.setmode(GPIO.BOARD)
GPIO.setup(fan_pin, GPIO.OUT)
GPIO.setup(led_pin[0], GPIO.OUT)
GPIO.setup(led_pin[1], GPIO.OUT)
GPIO.setup(led_pin[2], GPIO.OUT)
GPIO.setup(buzzer_pin, GPIO.OUT)
#setting pwm PIN (fan dan buzzer)
pwm = GPIO.PWM(fan_pin, 100)
buzzer_pwm = GPIO.PWM(buzzer_pin, 100)
#menjalankan fan dan buzzer dengan voltase 0
pwm.start(0)
예제 #27
0
import datetime
import cv2

SHOW_GUI = True

left_offsetX = 900
right_offsetX = 1600
up_offsetY = 550
down_offsetY = 1350

# Start videostream, 0 for webcam, 1 for rtsp
frame_grabber = FrameGrabber(1)
frame_grabber.start()

# Initialize motion detector
motion_detector = MotionDetector()
num_frame_read = 0  # no. of frames read

# Initialize face detector
face_detector = FaceDetector()

# FPS calculation
fps = FPS().start()

print("[INFO] Start collecting face images.")

while True:
    # grab frame
    frame = frame_grabber.read()
    frame_show = frame.copy()
    frame_roi = frame[up_offsetY:down_offsetY, left_offsetX:right_offsetX]
예제 #28
0
def main():
    import sys
    
    if path.exists('data/coordinates_1.yml'):
        os.remove('data/coordinates_1.yml')
        
    points = None
    waterLevelSlots = None
    
    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    def fetchShortIntervalVideos(ctrl, motion_detector, coordinates_data, times, statuses, lock):
        with lock:
             videoFilePath, hasIncomingVideoCaptureChanged = ctrl.getVideoFilePath()
             return videoFilePath, hasIncomingVideoCaptureChanged, motion_detector, coordinates_data, times, statuses
    
    threadn = cv.getNumberOfCPUs()
    pending = deque()
    lock = Lock()
    pool = ThreadPool(processes = threadn, initializer = init_child, initargs=(lock,))
    
    threaded_mode = True
    ctrl = None
    motionDetector = None
    
    lastsave = 0
    coordinates_data = None
    times = None 
    statuses = None
    
    pointsCaptured = False
    while True:
        with lock:
            while len(pending) > 1 and pending[0].ready() and pending[1].ready():
                payload = pending.popleft().get()
                if len(payload) == 6:
                    videoFilePath, hasIncomingVideoCaptureChanged, motion_detector, coordinates_data, times, statuses = payload
                    if videoFilePath == None and hasIncomingVideoCaptureChanged == None:
                        break
                    else:
                    
                        if hasIncomingVideoCaptureChanged == True:
                            capture = cv.VideoCapture(videoFilePath)
                            while capture.isOpened():
                                result, frame = capture.read()
                                if not result:
                                    capture.release()
                                    continue
                                else:
                                    res, evaluated_waterLevelSlots  = motion_detector.process_algo_per_frame(frame, capture, coordinates_data, times, statuses)
                                    
                                    draw_str(res, (5, 20), WaterLevelSlots.LEVEL_REACHED_MSG
                                             + str(evaluated_waterLevelSlots.get_current_water_level()))
                        
                                    if time.time() - lastsave > 1:
                                        lastsave = time.time()
                                        persistWaterLevelData(evaluated_waterLevelSlots.get_current_water_level())
                        
                        cv.namedWindow('flood-detection', cv.WINDOW_NORMAL)
                        cv.setWindowProperty('flood-detection', 0, 1)
                        
                        cv.imshow('flood-detection', res)
                         
        if len(pending) < threadn:
            
            if not pointsCaptured:
                _ret, frame = cap.read()
                points = getPoints(frame, points)
                waterLevelSlots = WaterLevelSlots(len(points))
                ctrl = Controller(points, None, None)
                motionDetector = MotionDetector(points, 1, waterLevelSlots)
                coordinates_data, times, statuses = motionDetector.detect_motion_activity()
                pointsCaptured = True
           
            if threaded_mode:
                task_put_videos = pool.apply_async(captureShortIntervalVideos, (cap, lock))
                task_get_videos = pool.apply_async(fetchShortIntervalVideos,
                                                   (ctrl, motionDetector, coordinates_data, times, statuses, lock))
                
            
            pending.append(task_put_videos)
            pending.append(task_get_videos)
        
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
    cap.release()
예제 #29
0
def test_motion():
    m = MotionDetector()
    while not m.motion_detected:
        pass
    assert True
예제 #30
0
import argparse
from multiprocessing import Queue

from motion_detector import MotionDetector
from object_detector import ObjectDetectorRunner
from video_reader import VideoStream

ap = argparse.ArgumentParser()
ap.add_argument('-v', '--video', help='path to the video file')
args = vars(ap.parse_args())

vs = VideoStream(args['video'] or 0).start()

need_to_detect = Queue(maxsize=100)

t = ObjectDetectorRunner(q=need_to_detect)
t.start()

MotionDetector(vs=vs, frame_queue_size=6, detect_queue=need_to_detect).start()
예제 #31
0
    print ("subtracting from pause")
    if pause > 15:
        pause -= 4
    else:
        pause = pause - 1
    if (pause < 1):
        pause = 1
    print ("new pause: " + str(pause))
    global detection_timestamp
    detection_timestamp = time.time()
    print (" time stamp: " + str(detection_timestamp))


pause = 1
detection_timestamp = 0
detector = MotionDetector(detector_callback)
queue = Queue()
blinker = Blinker()


class EventManager():
    file_folders = ["a", "b", "c", "d"]
    file_manager = FileManager()

    def start(self):

        global detection_timestamp
        detection_timestamp = time.time()
        global queue
        player_threads = []