Exemple #1
0
class VideoSource (object):
    def __init__ (self, video_file, log, use_pi_camera = True, resolution=(320, 200), framerate = 30, night = False):
        self.filename = video_file
        self.log = log
        self.use_pi_camera  = use_pi_camera
        self.resolution = resolution
        self.framerate = framerate
        self.night = night
        self.fvs = None
        self.stream = None
        self._done = False

    def start (self):
        if self.filename is not None:
            self.log.debug('Video file: %s', self.filename)
            self.fvs = FileVideoStream(self.filename).start()
            self.stream = self.fvs.stream
            time.sleep(1.0)
        else: 
            if self.use_pi_camera:
                self.log.debug('Pi Camera (%d %d)', self.resolution[0], self.resolution[1])
                self.stream = VideoStream(src=0,
                    usePiCamera=True,                    
                    resolution=self.resolution,
                    framerate=self.framerate,
                    sensor_mode=5
                    ).start()

            else:
                self.log.debug('Web Camera')
                self.stream = cv2.VideoCapture(0)
                self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 2)
                self.resolution = (
                    self.stream.get(cv2.CAP_PROP_FRAME_WIDTH),
                    self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
                )
                self.framerate = self.stream.get(cv2.CAP_PROP_FPS)

        return self.resolution, self.framerate

    def read(self):
        if self.filename:
            frame = self.fvs.read()
        else:
            frame = self.stream.read()
        return frame

    def stop (self):
        if self.filename:
            self.fvs.stop()
        else:
            self.stream.stop()
        self._done = True

    def done(self):
        # check to see if video is still running
        running = (self.filename and self.fvs.running()) or True
        self._done = not running
        return self._done
class CameraComs:

    logger = Logger()

    def __init__(self):
        print(self.get_connceted_camera_list())
        self.Settings = settings()
        source = int(self.Settings.find_setting_by_name('camera_src'))

        # SRC in settings
        self.cap = VideoStream(src=source).start()

    def get_connceted_camera_list(self):
        cams_test = 10
        camera_list = []
        for i in range(0, cams_test):
            cap = cv2.VideoCapture(i)
            test, frame = cap.read()
            camera_list.append(test)
        return camera_list

    def change_camera_source(self, source_idx):
        # DOESN'T WORK AS INTENDED
        self.cap = VideoStream(src=source_idx).start()

    def get_frame(self):
        frame = self.cap.read()
        return frame

    def get_frame_grey(self):
        gray = cv2.cvtColor(self.get_frame(), cv2.COLOR_BGR2GRAY)
        return gray

    def destruct(self):
        cv2.destroyAllWindows()
        self.logger.debug('Camera Communication module destructed')

    def get_width(self):
        return self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)

    def get_height(self):
        return self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

    def __del__(self):
        self.cap.stop()
def getVideoCaptureObject(arguments=None,
                          video_stream_source=0,
                          video_stream_fps=30):
    '''Create an object to capture video clip or stream'''
    if arguments.get('video', None) is None:
        video_object = VideoStream(src=video_stream_source).start()
        fps = video_stream_fps
    else:
        video_object = cv2.VideoCapture(arguments['video'])
        fps = video_object.get(5)

    return video_object, fps
Exemple #4
0
    def initialize_stream(input):
        # if a video path was not supplied, grab a reference to the webcam
        if not input:
            print("[INFO] starting video stream...")
            vs = VideoStream(src=0).start()
            fps = vs.get(cv2.CAP_PROP_FPS)
            time.sleep(2.0)
        # otherwise, grab a reference to the video file
        else:
            print("[INFO] opening video file...")
            vs = cv2.VideoCapture(input)
            fps = vs.get(cv2.CAP_PROP_FPS)

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # initialize the video writer (we'll instantiate later if need be)
        writer = None

        return vs, fps, W, H, writer
def loadStream(res: tuple=(640,480), fps: int=30, loadVideo: Optional[str]=None, startFrame: Optional[int]=None) -> GameViewSource:
    # Load from video or from webcam
    if not loadVideo:
        stream = VideoStream(src=0, resolution=res, framerate=fps).start()
        time.sleep(2)
        fps = stream.stream.get(cv2.CAP_PROP_FPS)
        res = (int(stream.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(stream.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        gameViewSource = GameViewSource(stream, False, res)
    else:
        stream = cv2.VideoCapture(loadVideo)
        fps = stream.get(cv2.CAP_PROP_FPS)
        if startFrame:
            stream.set(cv2.CAP_PROP_POS_FRAMES, startFrame)
        res = (int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        gameViewSource = GameViewSource(stream, True, res)
    print("Stream:\n\tFPS - %d\n\tResolution: (%d, %d)" % (fps, res[0], res[1]))

    # Setup net
    netX = userSetupScene(gameViewSource)
    gameViewSource.setNetPos(netX)
    print("Got net x: %d" % netX)

    # View has been setup
    return gameViewSource
Exemple #6
0
try:
    if not os.path.exists(folder_name):
        # os.path.exists('save'):
        # strftime("%Y%m%d", gmtime()) add data to name folder
        os.makedirs(folder_name)
        # create folder up, down, left, right
        os.makedirs("./" + folder_name + "/" + "up")
        os.makedirs("./" + folder_name + "/" + "down")
        os.makedirs("./" + folder_name + "/" + "left")
        os.makedirs("./" + folder_name + "/" + "right")
        #os.makedirs("./" + folder_name + "/" + "screenshots")
        # strftime("%Y%m%d", gmtime()) create save + date folder
except OSError:
    print('Error: Creating directory of save')
# get total number of frames
nr_of_frames = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
# function called by trackbar, sets the next frame to be read

# loop over frames from the video stream
image_num = 0

# set wait for each frame, determines playbackspeed
playSpeed = 26

# get write FPS
FPS = 10

# add trackbar
cv2.namedWindow("Frame")
# create Trackbar rewinding
cv2.createTrackbar("Frames", "Frame", 0, nr_of_frames, getFrame)
Exemple #7
0
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args = vars(ap.parse_args())

# 定义肤色HSV范围
faceLower = (0, 50, 145)
faceUpper = (180, 115, 230)

# 未提供视频路径则实时捕获摄像头画面
if not args.get("video", False):
    vs = VideoStream(src=0).start()
    fps = 10
    size = (compressWidth, int(compressWidth / 16 * 9) - 1)
# 提供视频路径则读取本地视频
else:
    vs = cv.VideoCapture(args["video"])
    fps = vs.get(cv.CAP_PROP_FPS)
    size = (int(vs.get(cv.CAP_PROP_FRAME_WIDTH)),
            int(vs.get(cv.CAP_PROP_FRAME_HEIGHT)))
    size = (compressWidth,
            int(
                vs.get(cv.CAP_PROP_FRAME_HEIGHT) /
                vs.get(cv.CAP_PROP_FRAME_WIDTH) * compressWidth))

# 设置视频写入保存参数
fourcc = cv.VideoWriter_fourcc('M', 'P', '4', '2')
out = cv.VideoWriter('oops.avi', fourcc, fps, size)

# 允许摄像头或视频文件加载时间
time.sleep(2.0)

# 循环获取当前帧并进行后续处理
Exemple #8
0
    def detect(self):
        # define the two output layer names for the EAST detector model that
        # we are interested in -- the first is the output probabilities and the
        # second can be used to derive the bounding box coordinates of text
        layerNames = [
            "feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"
        ]

        # load the pre-trained EAST text detector
        net = cv2.dnn.readNet(self.east)

        # if a video path was not supplied, grab the reference to the web cam
        if not self.videoFile:
            print("[INFO] starting video stream...")
            vs = VideoStream(src=0).start()
            time.sleep(1.0)
        # otherwise, grab a reference to the video file
        else:
            vs = cv2.VideoCapture(self.videoFile)

        w = vs.get(cv2.CAP_PROP_FRAME_WIDTH)
        h = vs.get(cv2.CAP_PROP_FRAME_HEIGHT)

        reshape_ratio = np.array(
            (w / float(self.reshapeW), h / float(self.reshapeH)))

        self.fps = vs.get(cv2.CAP_PROP_FPS)
        self.frames = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
        self.spf = 1.0 / self.fps

        print(
            f"[INFO] fps={self.fps}, frames={self.frames}, spf={self.spf}(seconds per frame)"
        )

        fps = FPS().start()

        while True:
            starttime = time.time()
            ret, frame = vs.read()
            reshaped_frame = cv2.resize(frame, (self.reshapeW, self.reshapeH))

            if not ret:
                break

            # construct a blob from the image and then perform a forward pass of
            # the model to obtain the two output layer sets
            blob = cv2.dnn.blobFromImage(reshaped_frame,
                                         1.0, (self.reshapeW, self.reshapeH),
                                         (123.68, 116.78, 103.94),
                                         swapRB=True,
                                         crop=False)
            net.setInput(blob)
            (scores, geometry) = net.forward(layerNames)

            # decode the predictions, then  apply non-maxima suppression to
            # suppress weak, overlapping bounding boxes
            (polys, confidences) = self.decode_predictions(scores, geometry)

            indexes = nms.nms.polygons(polys,
                                       confidences,
                                       nms_threshold=self.nms_threshold)

            if indexes:
                self.nmspolys = np.array(
                    np.array(polys)[indexes] * reshape_ratio, np.int32)
                self.nmsscores = np.array(confidences)[indexes]

                cv2.polylines(frame, self.nmspolys, True, (0, 0, 255), 1)
                for i in range(len(self.nmsscores)):
                    poly = self.nmspolys[i]
                    cv2.putText(frame, f"{self.nmsscores[i]*100:.2f}%",
                                (poly[0, 0], poly[0, 1] + 18),
                                cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255), 1)

            cv2.imshow(self.winName, frame)
            key = cv2.waitKey(1) & 0xFF
            fps.update()
            elapsed = time.time() - starttime
            if self.frameskip:
                vs.set(cv2.CAP_PROP_POS_FRAMES,
                       vs.get(cv2.CAP_PROP_POS_FRAMES) + elapsed // self.spf)
            # if 'q' key pressed, break from the loop
            if key == ord("q"):
                break

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        # if we are using a webcam, release the pointer
        if not self.videoFile:
            vs.stop()

        # otherwise, release the file pointer
        else:
            vs.release()

        # close all windows
        cv2.destroyAllWindows()
    "age_gender_models/deploy_gender.prototxt",
    "age_gender_models/gender_net.caffemodel")

# initialize the video stream and allow the camera sensor to warmup
print("[INFO] starting video stream...")
# if the video argument is None, then the code will read from webcam (work in progress)
if args.get("video", None) is None:
    cap = VideoStream(src=0).start()
    time.sleep(2.0)
# otherwise, we are reading from a video file
else:
    cap = cv2.VideoCapture(args["video"])
writer = None
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if int(major_ver) < 3:
    fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
else:
    fps = cap.get(cv2.CAP_PROP_FPS)

# initialize our centroid tracker and frame dimensions
ct = CentroidTracker()
ct1 = CentroidTracker()
(H, W) = (None, None)
font = cv2.FONT_HERSHEY_SIMPLEX
# loop over the frames from the video stream
person = 0
person1 = 0

while True:
    # read the next frame from the video stream and resize it
    frame = cap.read()
    vs = cv2.VideoCapture(args["input"])

# initialize the video writer (we'll instantiate later if need be)
writer = None

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
(W, H) = (None, None)

######

## try to determine the total number of frames in the video file
try:
   prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
       else cv2.CAP_PROP_FRAME_COUNT
   total = int(vs.get(prop))
   print("[INFO] {} total frames in video".format(total))

## an error occurred while trying to determine the total
## number of frames in the video file
except:
    print("[INFO] could not determine # of frames in video")
    print("[INFO] no approx. completion time can be provided")
    total = -1


## INITIALIZE TRACKERS using OpenCV

###OPENCV_OBJECT_TRACKERS = {
### "csrt": cv2.TrackerCSRT_create,
### "kcf": cv2.TrackerKCF_create,
measurement = np.array((2, 1), np.float32)
prediction = np.zeros((2, 1), np.float32)

print("\nObservation in image: BLUE")
print("Prediction from Kalman: GREEN")
print("Update from Kalman: RED\n")

# if the video argument is empty, then we are reading from camera
if args.get("video", None) is None:
    vs = VideoStream(src=0).start()
    with_plt = 480
    height_plt = 360
    time.sleep(2.0)
else:
    vs = cv2.VideoCapture(args["video"])
    print("\t Width: ", vs.get(cv2.CAP_PROP_FRAME_WIDTH))
    print("\t Height: ", vs.get(cv2.CAP_PROP_FRAME_HEIGHT))
    print("\t FourCC: ", vs.get(cv2.CAP_PROP_FOURCC))
    print("\t Framerate: ", vs.get(cv2.CAP_PROP_FPS))

    with_plt = vs.get(cv2.CAP_PROP_FRAME_WIDTH)
    height_plt = vs.get(cv2.CAP_PROP_FRAME_HEIGHT)
# initiaze the first frame in the video stream
firstFrame = None

plt.figure()
#plt.hold(True)

plt.axis([0, with_plt, height_plt, 0])
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
# loop over frames from the video stream
while True:
	# grab the current frame, then handle if we are using a
	# VideoStream or VideoCapture object
	frame = vs.read()
	frame = frame[1] if args.get("video", False) else frame

	# check to see if we have reached the end of the stream
	if frame is None:
		break

	# resize the frame, maintaining the aspect ratio
	frame = imutils.resize(frame, width=1000)
	orig = frame.copy()
	origW  = (int)vs.get(cv2.CAP_PROP_FRAME_WIDTH)   # float
	origH = (int)vs.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float

	# if our frame dimensions are None, we still need to compute the
	# ratio of old frame dimensions to new frame dimensions
	if W is None or H is None:
		(H, W) = frame.shape[:2]
		rW = W / float(newW)
		rH = H / float(newH)

	# resize the frame, this time ignoring aspect ratio
	frame = cv2.resize(frame, (newW, newH))
  
	#############################
	# DETECTION
	#############################
    # less data we have, the faster we can process it), then convert
    # the frame from BGR to RGB for dlib
    # frame = imutils.resize(frame, width=500)
    frame = imutils.resize(frame, width=500)
    fgMask = backSub.apply(frame)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # fgMask = imutils.resize(fgMask, width=500)
    # print(fgMask.shape)

    if totalFramesOfcutwhite % args["skip_frames"] == 0:

        # แปลงเป็นสีขาว ดำ  ไบนารี่
        rgbOfcutwhite = cv2.cvtColor(fgMask, cv2.COLOR_GRAY2RGB)

        cv2.rectangle(frame, (10, 2), (100, 20), (255, 255, 255), -1)
        cv2.putText(frame, str(vs.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

# _____________________________________Start CutCheckPeoplePass/Long Flash_____________________________________________

        # 120:150 170:190 , 450:420 ,240:270
        fgMaskCutCheckPeoplePass1 = fgMask[50:80, 420:450]

        fgMaskCutCheckPeoplePass1Mean = fgMaskCutCheckPeoplePass1.flatten().mean()

        # fgMask[0:30,0:30] = fgMaskCutCheckPeoplePass1

        # 120:150 170:190 , 450:420
        fgMaskCutCheckPeoplePass2 = fgMask[120:150, 420:450]

        fgMaskCutCheckPeoplePass2Mean = fgMaskCutCheckPeoplePass2.flatten().mean()
def trackObject(object, args):
    result = {}

    # extract the OpenCV version info
    (major, minor) = cv2.__version__.split(".")[:2]

    # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
    # function to create our object tracker
    if int(major) == 3 and int(minor) < 3:
        tracker = cv2.Tracker_create(args["tracker"].upper())

    # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
    # approrpiate object tracker constructor:
    else:
        # initialize a dictionary that maps strings to their corresponding
        # OpenCV object tracker implementations
        OPENCV_OBJECT_TRACKERS = {
            "csrt": cv2.TrackerCSRT_create,
            "kcf": cv2.TrackerKCF_create,
            "boosting": cv2.TrackerBoosting_create,
            "mil": cv2.TrackerMIL_create,
            "tld": cv2.TrackerTLD_create,
            "medianflow": cv2.TrackerMedianFlow_create,
            "mosse": cv2.TrackerMOSSE_create
        }

        # grab the appropriate object tracker using our dictionary of
        # OpenCV object tracker objects
        tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
    # tracker = cv2.Tracker_create(args["tracker"].upper())
    # initialize the bounding box coordinates of the object we are going
    # to track

    initBB = None

    # if a video path was not supplied, grab the reference to the web cam
    if not args.get("video", False):
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(1.0)

    # otherwise, grab a reference to the video file
    else:
        vs = cv2.VideoCapture(args["video"])

    fps = vs.get(cv2.CAP_PROP_FPS)
    # print("FPS: " + str(fps))

    vs.set(cv2.CAP_PROP_POS_FRAMES, object['time'] * fps)
    # print("Pos: ", object['time'] * fps)

    result['id'] = object['id']
    result['fps'] = fps
    result['start'] = int(object['time'] * fps)
    result['track'] = []
    # exit(0)

    # width of processing video
    pWidth = 200

    # initialize the FPS throughput estimator
    # fps = None

    # loop over frames from the video stream

    initBB = (object['location']['left'], object['location']['top'],
              object['location']['width'], object['location']['height'])

    width = vs.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = vs.get(cv2.CAP_PROP_FRAME_HEIGHT)

    initBB = scaleRect(initBB, pWidth / width)

    frame = vs.read()
    oFrame = frame[1] if args.get("video", False) else frame
    frame = imutils.resize(oFrame, width=pWidth)
    tracker.init(frame, initBB)
    count = int(fps)
    # fps = FPS().start()
    while True:
        count = count + 1
        # grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()

        oFrame = frame[1] if args.get("video", False) else frame
        # resize the frame (so we can process it faster) and grab the
        # frame dimensions
        # check to see if we have reached the end of the stream
        if oFrame is None:
            break
        frame = imutils.resize(oFrame, width=pWidth)
        (H, W) = frame.shape[:2]

        # check to see if we are currently tracking an object

        if initBB is not None:  # grab the new bounding box coordinates of the object
            (success, box) = tracker.update(frame)

            # check to see if the tracking was a success
            if success:
                (x, y, w, h) = [int(v) for v in box]
                # print(str(count) + ": [" + str(x) + ", " + str(y) + ", " + str(w) + ", " + str(h) + "]")
                rect = scaleRect((x, y, w, h), width / pWidth)
                # print(str(count) + ": [" + str(rect[0]) + ", " + str(rect[1]) + ", " + str(rect[2]) + ", " + str(rect[3]) + "]")
                # cv2.rectangle(oFrame, (rect[0], rect[1]), (rect[0]+ rect[2], rect[1] + rect[3]), (0, 255, 0), 2)
                cv2.rectangle(
                    oFrame, (int(rect[0]), int(rect[1])),
                    (int(rect[0]) + int(rect[2]), int(rect[1]) + int(rect[3])),
                    (0, 255, 0), 2)
                result['track'].append([rect[1], rect[0], rect[2], rect[3]])
            else:
                break
            # update the FPS counters
            # fps.update()
            # fps.stop()

            # initialize the set of information we'll be displaying on
            # the frame
            info = [
                ("Tracker", args["tracker"]),
                ("Success", "Yes" if success else "No"),
                # ("FPS", "{:.2f}".format(fps.fps())),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(oFrame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        #
        # show the output frame
        cv2.imshow("Frame", oFrame)
        key = cv2.waitKey(1) & 0xFF  # do not remove this line

        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        # if key == ord("s"):
        # select the bounding box of the object we want to track (make
        # sure you press ENTER or SPACE after selecting the ROI)
        # initBB = cv2.selectROI("Frame", frame, fromCenter=False,
        #                        showCrosshair=True)

        # start OpenCV object tracker using the supplied bounding box
        # coordinates, then start the FPS throughput estimator as well
        # tracker.init(frame, initBB)
        # fps = FPS().start()

        # if the `q` key was pressed, break from the loop
        # elif key == ord("q"):
        #     break
    # print(count)
    # if we are using a webcam, release the pointer
    if not args.get("video", False):
        vs.stop()

    # otherwise, release the file pointer
    else:
        vs.release()

    # close all windows

    # print(result)
    totalResult.append(result)
# define the lower and upper boundaries of the "orange"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (-10, 100, 100) #-2
greenUpper = (10, 255, 255) #18
pts = deque(maxlen=args["buffer"])
 
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
	vs = VideoStream(src=1).start()
 
# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args["video"])
	w = vs.get(3)
	h = vs.get(4)
	print("WXH",w,h)
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
	# grab the current frame
	frame = vs.read()
 
	# handle the frame from VideoCapture or VideoStream
	frame = frame[1] if args.get("video", False) else frame
 
	# if we are viewing a video and we did not grab a frame,
	# then we have reached the end of the video
	if frame is None:
Exemple #16
0
if not args.get("video", False):
    vs = VideoStream(src=0).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

# allow the camera or video file to warm up
time.sleep(2.0)

f_name = args.get('video').replace('.mp4', '_tracking.txt')
f_out = open(f_name, 'w')

# keep looping
while True:
    current_time = vs.get(cv2.CAP_PROP_POS_MSEC) / 1000

    # grab the current frame
    frame = vs.read()

    # handle the frame from VideoCapture or VideoStream
    frame = frame[1] if args.get("video", False) else frame

    # if we are viewing a video and we did not grab a frame,
    # then we have reached the end of the video
    if frame is None:
        break

    # rotate the current frame
    #frame = imutils.rotate(frame, -90)
Exemple #17
0
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

# allow the camera or video file to warm up
time.sleep(2.0)
# fourcc = int(vs.get(cv2.CAP_PROP_FOURCC))
# print("fourcc: ", fourcc)

fourcc = cv2.VideoWriter_fourcc(*'XVID')
print("fourcc: ", fourcc)
fps = vs.get(cv2.CAP_PROP_FPS)

test = vs.get(cv2.CAP_PROP_FOURCC)
size = (int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)))
out = cv2.VideoWriter('output_test_ball.avi', fourcc, fps, size)
# keep looping
while True:
    # grab the current frame
    frame = vs.read()
    if DEBUG and False:
        print("original frame: ", frame)
    # handle the frame from VideoCapture or VideoStream
    frame = frame[1] if args.get("video", False) else frame
    if DEBUG and False:
        print("changed frame: ", frame)
    # resize the frame to have a maximum width of 500 pixels (the less data we have, the faster we can process it), then
    # convert the frame from BGR to RGB for dlib
    #frame = imutils.resize(frame, width=416, height=416)								-->> SE DESCOMENTAR, NÃO GERA O VIDEO DE OUTPUT
    rgb = cv.cvtColor(frame, cv.COLOR_BGR2RGB)

    # if the frame dimensions are empty, set them
    if W is None or H is None:
        W = 640  #frame.shape[1] // 2
        H = 350  #frame.shape[0] // 2

    # if we are supposed to be writing a video to disk, initialize
    # the writer
    if args["output"] is not None and writer is None:
        fourcc = cv.VideoWriter_fourcc('M', 'J', 'P', 'G')
        writer = cv.VideoWriter(args["output"], fourcc, 30,
                                (round(vs.get(cv.CAP_PROP_FRAME_WIDTH)),
                                 round(vs.get(cv.CAP_PROP_FRAME_HEIGHT))))

    # initialize the current status along with our list of bounding
    # box rectangles returned by either (1) our object detector or
    # (2) the correlation trackers
    status = "Waiting"
    rects = []

    # check to see if we should run a more computationally expensive
    # object detection method to aid our tracker
    if totalFrames % args["skip_frames"] == 0:
        # set the status and initialize our new set of object trackers
        status = "Detecting"
        trackers = []
Exemple #19
0
    # "medianflow": cv2.TrackerMedianFlow_create,
    # "mosse": cv2.TrackerMOSSE_create
}
tracker = OPENCV_OBJECT_TRACKERS[tracker_choice]()

initBB = None

if not video:
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(video)
    height, width = (
        int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)),
    )
    fps = vs.get(cv2.CAP_PROP_FPS)
    print(height, width)
# initialize the FPS throughput estimator
fps = None

while True:
    # grab the current frame, then handle if we are using a
    # VideoStream or VideoCapture object

    frame = vs.read()
    frame = frame[1] if video else frame
    # check to see if we have reached the end of the stream
    frame = cv2.line(frame, (320, 0), (320, 500), (255, 255, 0), 3)
i = 0
while True:
    i += 1
    # grab the current frame, then handle if we are using a VideoStream or VideoCapture object
    frame = vs.read()
    # if(i % 10 != 0):
    # 	continue
    frame = frame[1] if args.get("video", False) else frame

    # check to see if we have reached the end of the stream
    if frame is None:
        break

    # resize the frame (so we can process it faster)
    frame = imutils.resize(frame, width=600)
    timestamp = vs.get(cv2.CAP_PROP_POS_MSEC)

    # grab the updated bounding box coordinates (if any) for each object that is being tracked
    (success, boxes) = trackers.update(frame)
    # if(round(timestamp, -2) % 1000 != 0):
    # 	continue

    # loop over the bounding boxes and draw then on the frame
    for index, box in enumerate(boxes):
        (x, y, w, h) = [int(v) for v in box]
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        object = IDs[index]
        cv2.putText(frame, object, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2)
Exemple #21
0
    # linear motion capture
    if len(linear_cnts) > 0:
        # find the largest contour in the linear_mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        linear_c = max(linear_cnts, key=cv2.contourArea)
        ((linear_x, linear_y),
         linear_radius) = cv2.minEnclosingCircle(linear_c)
        linear_M = cv2.moments(linear_c)
        linear_center = (int(linear_M["m10"] / linear_M["m00"]),
                         int(linear_M["m01"] / linear_M["m00"]))

        if linear_StartCounter == 0:
            linear_StartCounter += 1
            linear_startPosition = int(linear_x)
            linear_startTime = vs.get(cv2.CAP_PROP_POS_MSEC)
            print(linear_startPosition)

        if int(linear_startPosition) != int(linear_x):
            f = open('test.txt', 'a')
            #print(vs.get(cv2.CAP_PROP_POS_MSEC))
            distance = linear_startPosition - int(linear_x)
            current_x = linear_x - linear_startPosition
            timeUsed = vs.get(cv2.CAP_PROP_POS_MSEC) - linear_startTime
            speed = int(
                (distance / timeUsed) * (0.219 * 1000) * 100000) / 100000
            print(distance)
            f.write(str(speed) + " " + str(timeUsed) + "\n")
            f.close()
            cv2.putText(frame,
                        str(speed) + " cm/s",
                cv2.putText(frame, text, (xmin , ymin ),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
        elif time_flag.shape[0]>1:
            if xmin==int(time_flag.iloc[i-1,2]) :
                cv2.putText(frame, text, (xmin, ymin -25),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
            else :
                cv2.putText(frame, text, (xmin , ymin ),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
        else :
            cv2.putText(frame, text, (xmin, ymin),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)"""


while True:
    time = ((np.array([vs.get(cv2.CAP_PROP_POS_MSEC)]) / 10)) / 100
    # grab the current frame, then handle if we are using a
    # VideoStream or VideoCapture object
    frame = vs.read()
    frame = frame[1] if args.get("video", False) else frame
    # check to see if we have reached the end of the stream
    if frame is None:
        break
    # resize the frame (so we can process it faster) and grab the
    # frame dimensions
    frame = imutils.resize(frame, width=3840, height=2160)
    (H, W) = frame.shape[:2]
    fps.update()
    fps.stop()
    #compare function
    compare(time, fps_count, excel_time)
tracker = cv2.TrackerKCF_create()

# initialize the bounding box coordinates of the object we are tracking
initBB = None

# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
	print("[INFO] starting video stream...")
	vs = VideoStream(src=0).start()
	time.sleep(1.0)

# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args.get("video"))

WIDTH = int(vs.get(cv2.CAP_PROP_FRAME_WIDTH))
HEIGHT = int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT))

count = 0

# loop over frames from the video stream
while True:
	# grab the current frame
	frame = vs.read()
	frame = frame[1] if args.get("video", False) else frame

	# check to see if we have reached the end of the stream
	if frame is None:
		break

	# resize the frame so we can process it faster
initBB = None
# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
    vs = VideoCapture(args["video"])
# initialize the FPS throughput estimator
fps = None
show = True
sendtime = 0
frame = vs.read()
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = vs.get()
#size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#frame=frameflow.frame
size=frame.shape[:2]
out = cv2.VideoWriter('./camera_test12.mp4', fourcc, fps, (size[1],size[0]))
# loop over frames from the video stream
while True:
    # grab the current frame, then handle if we are using a
    # VideoStream or VideoCapture object
    frame = vs.read()
    out.write(frame)
    # frame = frame[1] if args.get("video", False) else frame
    # check to see if we have reached the end of the stream
    if frame is None:
        break
    # resize the frame (so we can process it faster) and grab the
net = cv2.dnn.readNet(args["east"])

# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
	print("[INFO] starting video stream...")
	vs = VideoStream(src=0).start()
	time.sleep(1.0)

# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args["video"])


frame_width = 342
frame_height = 720
fps = int(vs.get(5))


#out = cv2.VideoWriter('outpyOCR.mp4',fourcc, fps, (frame_width,frame_height))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter('outpyOCR.mp4', cv2.VideoWriter_fourcc(
     'm', 'p', '4', 'v'), fps ,(frame_width, frame_height))

# start the FPS throughput estimator
fps = FPS().start()

# loop over frames from the video stream
i = 0
while i != 200:#True:
	i += 1
	# grab the current frame, then handle if we are using a
Exemple #26
0
                               nms_threshold)

    for i in indices:
        i = i[0]
        box = boxes[i]
        x = box[0]
        y = box[1]
        w = box[2]
        h = box[3]

        draw_prediction(image, class_ids[i], confidences[i], round(x),
                        round(y), round(x + w), round(y + h))

    cv2.imshow("object detection", image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    if 0 in class_ids:
        objtime = int(vs.get() / 1000)
        mmin = str(int(objtime / 60))
        mmin = mmin.zfill(2)
        ssec = str(int(objtime % 60))
        ssec = ssec.zfill(2)
        cv2.imwrite(videodir + "\\%s_%s.jpg" % (mmin, ssec), copyimage)

    fps.update()

fps.stop()
cv2.destroyAllWindows()
vs.stop()
frame_textsave(videodir)
Exemple #27
0
pts = deque(maxlen=args["buffer"])
fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=False)

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

OUTFLAG = False
if args.get("output", False):
    # Define the codec and create VideoWriter object
    (width, height, fps) = int(vs.get(3)), int(vs.get(4)), vs.get(5)
    OUTFLAG = True
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    out = cv2.VideoWriter(args["output"], fourcc, fps, (width, height), True)

# allow the camera or video file to warm up
time.sleep(2.0)

# keep looping
while True:
    # grab the current frame
    frame = vs.read()

    # handle the frame from VideoCapture or VideoStream
    frame = frame[1] if args.get("video", False) else frame
args = vars(ap.parse_args())
imgcx = 0
imgcy = 0

# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
    vs = VideoStream(src=0).start()
    imgcx = vs.stream.get(3) / 2
    imgcy = vs.stream.get(4) / 2
    print("streaming video frame center .. ", imgcx, imgcy)
    time.sleep(2.0)

# otherwise, we are reading from a video file
else:
    vs = cv2.VideoCapture(args["video"])
    imgcx = vs.get(3) / 2
    imgcy = vs.get(4) / 2
    print("video file frame center .. ", imgcx, imgcy)

# initialize the first frame in the video stream
firstFrame = None

disparr = [0.0, 0.0, 0.0, 0.0, 0.0]
index = 0  # only 5
# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    frame1 = vs.read()

    frame = frame1 if args.get("video", None) is None else frame1[1]
    def run(self):
        # Input webcam or video file
        if not self.args.get("input", False):
            rec = VideoStream(src=0).start()
            time.sleep(1.0)
            fps = self.args["fps"]

        else:
            rec = cv2.VideoCapture(self.args["input"])
            fps = rec.get(cv2.CAP_PROP_FPS)

        writer = None
        W = None
        H = None

        # Associate tracked object
        ct = CentroidAssociation(maxDisappeared=10, maxDistance=50)
        trackers = []

        # Run detection module
        ObjectDetection = DetectionModule('person', self.args["confidence"])
        ObjectDetection.load_model()

        # initialize the csv file
        with open(self.args["csvfile"], 'w', newline='') as f:
            csv_writer = csv.writer(f)
            fields = ['Entry_count', 'Exit_count']
            csv_writer.writerow(fields)
        f.close()

        # loop over frames from the video
        while True:
            frame = rec.read()
            frame = frame[1] if self.args.get("input", False) else frame

            if self.args["input"] is not None and frame is None:
                break

            # resize the frame to process it faster
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # initialize video writer to disk
            if self.args["output"] is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(self.args["output"], fourcc, 30,
                                         (W, H), True)

            rects = []

            # check the object detections for tracker
            if self.totalFrames % self.args["skip_frames"] == 0:
                trackers = []
                detections = ObjectDetection.inference(frame)
                #pdb.set_trace()
                print(self.totalFrames)

                for i in np.arange(0, len(detections)):

                    box = detections[i]
                    (startX, startY, endX, endY) = box.astype("int")
                    # Specify tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(int(startX), int(startY), int(endX),
                                          int(endY))
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)

            else:
                self.get_tracked_boxes(trackers, rects, rgb)

            self.assosciate_tracked_objects(rects, ct, frame, H, W, writer)

            cv2.imshow("frame", frame)
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break

            # Updating count in csv file every second
            if self.totalFrames % fps == 0:

                #write total count values each second to csv file
                with open(self.args["csvfile"], 'a', newline='') as f:
                    csv_writer = csv.writer(f)
                    csv_writer.writerow([self.countEnter, self.countExit])

                # write the unique number of exits/entries happened in each second.

                #people_entered_per_sec = countExit - people_entered_per_sec
                #people_left_per_sec = countEnter - people_left_per_sec
                #with open(self.args["csvfile"], 'a') as f:
                #csv_writer = csv.writer(f)
                #csv_writer.writerow([people_entered_per_sec, people_left_per_sec])
                #csv_writer.writerow([countExit, countEnter])

                f.close()

        if writer is not None:
            writer.release()

        if not self.args.get("input", False):
            rec.stop()
        else:
            rec.release()

        cv2.destroyAllWindows()
Exemple #30
0
#whiteUp = np.array([255, sens, 255])

whiteLow = np.array([0, 0, 30])
whiteUp = np.array([360, 100, 100])

#grabs the video to begin processing
if not args.get("video", False):
    vs = VideoStream(src=0).start()
    outputfile = open("camera_video.csv", 'w')
else:
    vs = cv2.VideoCapture(args["video"])
    length = len(str(args.get("video", False)))
    outputfile = open(
        'analyzed_' + str(args.get("video", False)[:length - 4] + '.csv'), 'w')

fps = vs.get(5)
totalframes = vs.get(7)
print(str(totalframes) + " total frames to be processed.")

framestate = 0
while True:
    # current frame
    frame = vs.read()

    # handle the frame from VideoCapture
    frame = frame[1] if args.get("video", False) else frame

    if frame is None:
        break

    # resize the frame, blur it, and convert it to HSV, this