Esempio n. 1
0
def filter_fun(b):
    return ((b[2] - b[0]) * (b[3] - b[1])) > 1000


t = 0
saved_time = 0
lock1 = 0

if __name__ == "__main__":

    cap = cv2.VideoCapture(0)
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
    detector = MotionDetector(bg_history=20,
                              brightness_discard_level=25,
                              bg_subs_scale_percent=0.1,
                              group_boxes=True,
                              expansion_step=5)
    now = datetime.now()
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output = "backup/backup_" + now.strftime("%d"
                                             "."
                                             "%m"
                                             "."
                                             "%Y"
                                             "_"
                                             "%X") + ".avi"
    print(output)
    out = cv2.VideoWriter(output, fourcc, 20.0, (640, 480))
    # group_boxes=True can be used if one wants to get less boxes, which include all overlapping boxes

@jit(nopython=True)
def filter_fun(b):
    return ((b[2] - b[0]) * (b[3] - b[1])) > 1000


if __name__ == "__main__":

    cap = cv2.VideoCapture('tmp/helmets-v1-55.mp4')

    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    detector = MotionDetector(bg_history=15,
                              bg_subs_scale_percent=0.25,
                              group_boxes=False,
                              expansion_step=3)

    # group_boxes=True can be used if one wants to get less boxes, which include all overlapping boxes

    b_height = 512
    b_width = 512

    res = []
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        if frame is None:
            break
        begin = time()
        boxes = detector.detect(frame)
Esempio n. 3
0
#configPath = "rcnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
#labels1Path = "rcnn/object_detection_classes_coco.txt"
#labels2Path = "rcnn/object_detection_classes_coco_ru.txt"

weightsPath = "mobilenet/MobileNetSSD_deploy.caffemodel"
configPath = "mobilenet/MobileNetSSD_deploy.prototxt.txt"
labels1Path = "mobilenet/object_detection_classes.txt"
labels2Path = "mobilenet/object_detection_classes.txt"

labels = open(labels1Path).read().strip().split("\n")
labels_ru = open(labels2Path).read().strip().split("\n")

vs = cv2.VideoCapture(0)
(grabbed, frame) = vs.read()

motion_detector = md.MotionDetector(frame)
#object_detector = tod.TensorObjectDetector(weightsPath, configPath)
object_detector = cod.CaffeObjectDetector(weightsPath, configPath)
processor = fp.FrameProcessor(motion_detector, object_detector, labels)

cv2.namedWindow("out_img", cv2.WINDOW_NORMAL)

publisher_worker = publisher.PublisherThread(p.MqttPublisher("1.2.3.4", 1883, "user", "password"),
                                             data_to_publish, publish, 10)
publisher_worker.start()

while True:
    (grabbed, img) = vs.read()
    #img = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
    (out_img, out_labels_idxs) = processor.process(img)
Esempio n. 4
0
@jit(nopython=True)
def filter_fun(b):
    return ((b[2] - b[0]) * (b[3] - b[1])) > 300


if __name__ == "__main__":

    cap = cv2.VideoCapture('tmp/helmets-v1-55.mp4')
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    detector = MotionDetector(bg_history=10,
                              bg_skip_frames=1,
                              movement_frames_history=2,
                              brightness_discard_level=5,
                              bg_subs_scale_percent=0.2,
                              pixel_compression_ratio=0.1,
                              group_boxes=True,
                              expansion_step=5)

    # group_boxes=True can be used if one wants to get less boxes, which include all overlapping boxes

    b_height = 320
    b_width = 320

    res = []
    fc = dict()
    ctr = 0
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
import cv2
from imutils.video import VideoStream
from detector import MotionDetector

if __name__ == "__main__":

    # cap = cv2.VideoCapture("PATH_TO_VIDEO")
    cap = VideoStream(src=0).start()

    detector = MotionDetector(bg_history=20,
                              group_boxes=True,
                              expansion_step=5,
                              pixel_compression_ratio=0.4,
                              min_area=500)

    while True:
        # Capture frame-by-frame
        frame = cap.read()

        if frame is None:
            break
        boxes = detector.detect(frame)

        scale = detector.pixel_compression_ratio
        for b in boxes:
            cv2.rectangle(frame, (int(b[0] / scale), int(b[1] / scale)),
                          (int(b[0] / scale) + int(b[2] / scale),
                           int(b[1] / scale) + int(b[3] / scale)), (0, 255, 0),
                          1)

        cv2.imshow('last_frame', frame)
Esempio n. 6
0
def get_frame_processor(net_config, frame, labels):
    motion_detector = md.MotionDetector(frame)
    object_detector = cod.CaffeObjectDetector(net_config["model"], net_config["config"])
    return fp.FrameProcessor(motion_detector, object_detector, labels)
Esempio n. 7
0

@jit(nopython=True)
def filter_fun(b):
    return ((b[2] - b[0]) * (b[3] - b[1])) > 1000


if __name__ == "__main__":

    cap = cv2.VideoCapture('tmp/sample-Elysium.2013.2160p.mkv')

    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    detector = MotionDetector(bg_history=20,
                              group_boxes=True,
                              expansion_step=5)

    # group_boxes=True can be used if one wants to get less boxes, which include all overlapping boxes

    b_height = 512
    b_width = 512

    res = []
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()
        if frame is None:
            break
        begin = time()
        boxes = detector.detect(frame)