예제 #1
0
 def compute_attributes(self):
     input_stream = cv.VideoCapture(self.file_name)
     self.sample_rate = input_stream.get(cv.CAP_PROP_FPS)
     input_stream.release()
     # Use multi-threading by Adrian of https://pyimagesearch.com
     input_stream = FileVideoStream(self.file_name).start()
     directograms = []
     directogram_times = []
     frame = input_stream.read()
     last_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
     self.shape = frame.shape
     while input_stream.more():
         if len(directograms) % 250 == 0:
             print('Video Processing: {:.2f}s'.format(
                 len(directograms) / self.sample_rate))
         frame = input_stream.read()
         if not input_stream.more():
             break
         next_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
         # These parameters were taken from a demo in the documentation
         flow = cv.calcOpticalFlowFarneback(last_frame, next_frame, None,
                                            0.5, 3, 15, 3, 5, 1.2, 0)
         directograms.append(get_directogram(flow))
         directogram_times.append(len(directograms) / self.sample_rate)
         last_frame = next_frame
     self.envelope_times = np.array(directogram_times)
     self.envelopes = get_impact_envelopes(directograms,
                                           self.envelope_times)
     self.envelope_sample_rate = self.sample_rate
     input_stream.stop()
예제 #2
0
파일: fryer.py 프로젝트: paramkpr/DankBot
def fry_gif(update, url, number_of_cycles, args):
    number_of_emojis = 1.5 if args['high-fat'] else 1 if args['low-fat'] else 0
    bulge_probability = 0.3 if args['heavy'] else 0.15 if args['light'] else 0
    magnitude = 4 if args['deep'] else 1 if args['shallow'] else 2

    name = update.message.from_user.first_name
    filename = '%s_%s_%s' % (update.message.chat_id, name,
                             update.message.message_id)
    filepath = bin_path + '/temp/' + filename
    caption = __get_caption(name, number_of_cycles, args)
    output = bin_path + '/temp/out_' + filename + '.mp4'

    gifbio = BytesIO()
    gifbio.name = filename + '.gif'
    fs = [__posterize, __sharpen, __increase_contrast, __colorize]
    shuffle(fs)

    if not __download_gif(url, filepath):
        return

    fvs = FileVideoStream(filepath + '.mp4').start()
    frame = fvs.read()
    height, width, _ = frame.shape

    try:
        fps = fvs.get(CAP_PROP_FPS)
    except:
        fps = 30
    out = VideoWriter(output, VideoWriter_fourcc(*'mp4v'), fps,
                      (width, height))
    out.write(
        fry_frame(frame, number_of_cycles, fs, number_of_emojis,
                  bulge_probability, magnitude, args))

    while fvs.more() or fvs.more():
        try:
            temp = fry_frame(fvs.read(), number_of_cycles, fs,
                             number_of_emojis, bulge_probability, magnitude,
                             args)
        except Exception as e:
            break
        out.write(temp)

    fvs.stop()
    fvs.stream.release()
    out.release()
    update.message.reply_animation(open(output, 'rb'),
                                   caption=caption,
                                   quote=True)
    try:
        __upload_to_imgur(output, caption)
    except (Exception, BaseException) as e:
        print(e)
    remove(filepath + '.mp4')
예제 #3
0
def run_video():
    PATH_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023.mp4'
    PATH_OUT_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023_2.avi'
    fo = FaceOrchestrator()
    fs = FileVideoStream(PATH_VIDEO).start()
    time.sleep(1.0)
    frame = fs.read()
    h, w, __ = frame.shape
    out = cv2.VideoWriter(PATH_OUT_VIDEO,
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                          (w, h))
    i = 0
    while fs.more():
        i = i + 1
        print('Processing frame ' + str(i))
        try:
            frame = fs.read()
            fo.set_image_to_be_processed(frame)
            # detect faces
            fo.fd_method = 'AI'
            fo.fd_ai_min_score = 0.6
            fo.detect_faces()
            fo.align_faces(target_face_percent=0.28)
            # reconize faces
            # fo.fr_max_delta = 0.6
            # fo.fr_method = 'DELTA'
            fo.fr_min_probability = 0.5
            fo.fr_method = 'ML'
            frame_with_label, names = fo.recognize_faces()
            out.write(frame_with_label)
        except:
            pass
    fs.stop()
    out.release()
    print('Finish.')
예제 #4
0
파일: main.py 프로젝트: VerstraeteP/stitch
def process_video(file_name, n=1):
    """
	iterate over frames of videofile
	:param file_name: filename
	:param n:process one out of every n frames-> default 1: take all frames
	:return:list of finish frames
	"""

    fvs = FileVideoStream(file_name).start()  #load video

    cam = cv2.VideoCapture(file_name)
    fps = cam.get(cv2.CAP_PROP_FPS)  #get original fps of video

    counter = 1
    frame_list = []
    teller = 0
    while fvs.running():
        if fvs.more():

            teller += 1
            frame = fvs.read()

            if frame is not None:

                frame_list.append(
                    cv2.resize(
                        frame,
                        (int(frame.shape[1] / 2), int(frame.shape[0] / 2)))
                )  #append frame to list and resize it: height/2, width/2

            counter += 1
        else:
            time.sleep(2)

    return (frame_list, fps)
예제 #5
0
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (predict_one(frame,
                                  modelname).squeeze().detach().float().cpu())
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
def main():
    # Construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-m", "--model", required=True,
                    help="path to model file")
    ap.add_argument("-c", "--confidence", type=float, default=0.95,
                    help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # Initialize our centroid tracker and frame dimensions
    ct = CentroidTracker()

    # Load our serialized model from disk
    print("[INFO] Loading model...")
    model = get_model(args["model"])

    vs = FileVideoStream("/media/ave/ae722e82-1476-4374-acd8-a8a18ef8ae7a/Rana_vids/Nova_1/121/03-2018.05.25_06.24.23-17.mpg").start()
    time.sleep(2.0)

    # Loop over the frames from the video stream
    while vs.more():
        # Read the next frame from the video stream
        frame = vs.read()

        analyze_frame(frame, ct, model, args["confidence"])

    # Cleanup
    cv2.destroyAllWindows()
    vs.stop()
예제 #7
0
    def UAMS(self):
        video_file = FileVideoStream(self.args["video"]).start()
        time.sleep(2.0)
        fps = FPS().start()

        while video_file.more():
            # Converting each frame to matrix of numbers
            try:
                self.frame = video_file.read()
                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                gray = np.dstack([gray, gray, gray])

            except Exception as e:
                print(e)

            self.frame, self.faces, conf, detection, startX, y = self.cnn_op.CNNOperation(
                self.frame, self.net, self.args)
            try:
                msg = self.calculation(startX, y)
            except Exception as e:
                print(e)
            try:
                cv2.imshow("frame-this is main frame", self.frame)
            except Exception as e:
                print(e)
            fps.update()
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        fps.stop()
        print("elapsed time : {:.2f}".format(fps.elapsed()))
        print("FPS  : {:.2f}".format(fps.fps()))
예제 #8
0
def main(input_video_path, output_video_path, batch_size, async_workers_count):
    frame_id = 0
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(input_video_path).start()
    time.sleep(1.0)
    fps = FPS().start()
    in_holder_path, op_holder_path = return_io_frames_holder()
    print(in_holder_path, op_holder_path)
    # loop over frames from the video file stream
    input_frames_list = []
    while fvs.more():
        frame_id += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.imwrite(f"{in_holder_path}/{frame_id}.jpeg", frame)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
    colonel_batch(in_holder_path, op_holder_path, batch_size,
                  async_workers_count)
    #op_holder_path = "/home/gr8-adakron/Adakron_bay/work/FastAPI/fastapi_exp/video_processing/outputs/bCe7mEE/"
    write_detected_video(output_video_path, op_holder_path)
예제 #9
0
class ImageRouter(QtCore.QObject):
    source_is_file = False
    on_frame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, filename=''):
        super().__init__()
        if filename == '':
            self.vs = VideoStream(src=0).start()
            self.source_is_file = False
        else:
            self.vs = FileVideoStream(filename).start()
            self.source_is_file = True

    def __del__(self):
        self.vs.stop()

    def tick(self):
        if self.source_is_file and not self.vs.more():
            return False
        frame = self.vs.read()
        if frame is None:
            return False
        self.on_frame.emit(frame)
        return True

    def run(self):
        print("[INFO] Started ImageRouter")
        while self.tick():
            None
        print("[INFO] Stopped ImageRouter")
예제 #10
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        frames = VideoMeta(args.video).fps()
        #print(meta)

        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            if (fno % frames != 0):
                continue
            try:
                frame = imutils.resize(frame, width=450)
            except:
                break
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # frame = np.dstack([frame, frame, frame])

            if args.detect == "haarcascade":
                results = self.face_cascade.detectMultiScale(frame, 1.3, 5)
                if len(results) != 0:

                    self.crop(frame, results[0],
                              str(output_path / Path(str(fno) + ".jpg")))
            else:
                results = detector.detect_faces(frame)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    # ivs = InferenceDataStream(fvs, queue_size=25).start() # --> inference video stream reads from file video stream and forms queue of prepped images for inference
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()  # --> frame = ivs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (
            predict_one(frame, frame_number).squeeze().detach().cpu()
        )  # --> prediction = predict_one(frame, frame_number) #why? because logic for preparing images is now done asyncronously in ivs.
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
예제 #12
0
def main(video_path):
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(video_path).start()
    time.sleep(1.0)
    fps = FPS().start()

    # loop over frames from the video file stream
    input_frames_list = []
    number = 0
    while fvs.more():
        number += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        ann_frame = stream_frames_to_model(frame, number)
        cv2.imshow('frame', ann_frame)
        cv2.waitKey(1)
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
예제 #13
0
def main():
    vid = (Path(__file__).parent / 'test.mp4').absolute()
    stream = FileVideoStream(str(vid), queue_size=100).start()
    time.sleep(1)

    while stream.more():
        img_data = stream.read()
        if img_data is None:
            break
        img = Image(img_data)
        transformed = Transformer(img)
        image = transformed.cannify()
        cropped_img = region(image)
        lines = cv2.HoughLinesP(cropped_img.raw,
                                2,
                                np.pi / 180,
                                100,
                                np.array([]),
                                minLineLength=40,
                                maxLineGap=5)
        avg_lines = utils.average_slope_intercept(img, lines)
        lines_image = img.display_lines(avg_lines)
        combined = img.combine(lines_image)
        cv2.imshow('', combined.raw)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    stream.stop()
    def standard_run(
        self,
        x1: int,
        x2: int,
        y1: int,
        y2: int,
        sub_ch: list,
        channel_len,
    ):

        fps = FPS().start()
        cap = FileVideoStream(self.filename).start()
        count = 0

        start = time.time()
        cycle_start = time.time()
        frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        while cap.more():
            #            print(count)
            frame = cap.read()
            if frame is None:
                break

            if count < 200:
                self.frames_buffer.append(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            frame = frame[y1:y2, x1:x2]
            crop = self.mask.apply(frame)
            crop = cv2.GaussianBlur(crop, (7, 7), 3.0)

            _, crop = cv2.threshold(crop, 150, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            contours, hierarchy = cv2.findContours(crop, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            for i in range(len(contours)):
                avg = np.mean(contours[i], axis=0)
                coord = (int(avg[0][0]), int(avg[0][1]))  # Coord is (x,y)
                ch_pos = int(math.floor((coord[0]) / channel_len))

                try:
                    self.sum_ch1[ch_pos] += float(1)
                except:
                    pass

            count += 1
            fps.update()
        fps.stop()
        cycle_end = time.time()
        self.cycle_count += 1
        end = time.time()
        detect_benchmark = end - start
        print("Number of frames processed: ", count)
        print("Time taken for WBC counting:", detect_benchmark)
        print("[INFO] Each cycle time taken = %0.5fs" %
              ((cycle_end - cycle_start) / count))

        return fps
예제 #15
0
def showVideo(app):
    fvs = FileVideoStream(app.vidDir).start()
    time.sleep(1.0)
    while fvs.more():
        frame = fvs.read()
        frame = cv2.resize(frame, (720, 480))
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
예제 #16
0
    def frames():
        camera = FileVideoStream()
        time.sleep(1.0)

        # loop over frames from the video file stream
        while camera.more():
            frame = camera.read()
            yield frame
예제 #17
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        #frames = VideoMeta(args.video).fps()
        #print(meta)
        e = TfPoseEstimator(get_graph_path("mobilenet_thin"),
                            target_size=(432, 368))
        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            #if (fno % frames != 0):
            #   continue
            try:
                frame = imutils.resize(frame, width=432, height=368)
            except:
                break
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #frame = np.dstack([frame, frame, frame])

            humans = e.inference(frame)
            image = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
            print("humans:", humans)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", image)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
예제 #18
0
def piscadas():
    vs = FileVideoStream(0).start()
    fileStream = True

    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 3

    COUNTER = 0
    TOTAL = 0

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    while True:

        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 0)
        for rect in rects:

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = get_ear(leftEye)
            rightEAR = get_ear(rightEye)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1

            else:
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                COUNTER = 0

            cv2.putText(frame, "Piscadas: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
def write_svg_haar(stream_url):
    '''
    Reads an alternative face detection model, and connects to the in-memory 
    Redis database. Detects faces (no identification) in the specified stream 
    and calculates the corresponding bounding boxes. Writes the bounding boxes 
    for all detected faces to an svg overlay which is then saved to
    Redis to be accessed by other processes. 
    '''
    print("[INFO] opening redis connection")
    redis_db = redis.StrictRedis(host="localhost", port=6379, db=0)
    print("[INFO] starting stream")
    #    capture = cv2.VideoCapture(stream_url)
    #    capture.set(cv2.CAP_PROP_BUFFERSIZE, 0)
    fvs = FileVideoStream(stream_url, queue_size=1).start()
    process_this_frame = True
    while True:
        # if process_this_frame:
        if True:
            # read_flag, img = capture.read()
            if not fvs.more():
                continue
            img = fvs.read()

            face_cascade = cv2.CascadeClassifier(
                "./haarcascade_frontalface_default.xml")
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, minSize=(20, 20))
            svg_document = svgwrite.Drawing(size=("1280px", "720px"))
            svg_document.add(
                svg_document.rect(insert=(0, 0),
                                  size=("1280px", "720px"),
                                  stroke_width="10",
                                  stroke="green",
                                  fill="rgb(0,0,0)",
                                  fill_opacity=0))

            for (x, y, w, h) in faces:
                x = int(x)
                y = int(y)
                svg_document.add(
                    svg_document.rect(insert=(x, y),
                                      size=("{}px".format(w),
                                            "{}px".format(h)),
                                      stroke_width="10",
                                      stroke="yellow",
                                      fill="rgb(0,0,0)",
                                      fill_opacity=0))
            redis_db.set('overlay', svg_document.tostring())

        process_this_frame = not process_this_frame

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
예제 #20
0
def detect_blinks(videoFilePath):
    frameCount = 0
    blinkCount = 0

    faceDetector = dlib.get_frontal_face_detector()
    landmarksPredictor = dlib.shape_predictor(shapePredictorPath)

    leftEyeIdx = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']
    rightEyeIdx = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']

    if FILE_VIDEO_STREAM:
        vs = FileVideoStream(videoFilePath).start()
    else:
        vs = VideoStream().start()
    time.sleep(0.5)

    target_face_save_path = os.path.join(CURRENT_FOLDER_PATH,
                                         str(time.time()) + '.jpg')

    try:
        while True:
            if FILE_VIDEO_STREAM and not vs.more():
                break
            frame = vs.read()
            frame = rotate(frame, 90)
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            target_face = gray
            faces = faceDetector(gray, 0)

            for (i, face) in enumerate(faces):
                (x, y, w, h) = face_utils.rect_to_bb(face)

                landmarks = landmarksPredictor(gray, face)
                landmarks = face_utils.shape_to_np(landmarks)

                leftEye = landmarks[leftEyeIdx[0]:leftEyeIdx[1]]
                rightEye = landmarks[rightEyeIdx[0]:rightEyeIdx[1]]

                leftEAR = calculateEAR(leftEye)
                rightEAR = calculateEAR(rightEye)
                ear = (leftEAR + rightEAR) / 2.0
                if ear < EAR_THRESHOLD:
                    frameCount += 1
                else:
                    target_face = gray
                    if frameCount >= EAR_CONSEC_FRAMES:
                        blinkCount += 1
                    frameCount = 0
        cv2.imwrite(target_face_save_path, target_face)
    except:
        blinkCount = 0
    vs.stop()

    return blinkCount, target_face_save_path
예제 #21
0
def face_detection_in_video(path2video, seconds_per_frame=1):
    print(path2video)
    detected_faces = []
    face_embeddings = []
    facecodes = []

    print("[INFO] starting video file thread...")
    fvs = FileVideoStream(path2video)
    org_frame_rate = fvs.stream.get(5)
    width = fvs.stream.get(3)  # float
    height = fvs.stream.get(4)  # float
    no_of_frames_to_skip = org_frame_rate * seconds_per_frame
    no_of_frames_to_skip = no_of_frames_to_skip if no_of_frames_to_skip >= 1 else 1

    print("[INFO] original frame rate: {}".format(org_frame_rate))
    print("[INFO] no_of_frames_to_skip: {}".format(no_of_frames_to_skip))
    fvs.start()

    # start the FPS timer
    fps = FPS().start()

    frame_no = 1

    # loop over frames from the video file stream
    while fvs.more():
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fvs.read()

        if frame is not None:
            if frame_no % math.floor(no_of_frames_to_skip) == 0:
                data = detect_faces_from_frame(frame, frame_no, width, height)
                detected_faces.extend(data["faces"])
                face_embeddings.extend(data["face_embeddings"])
                facecodes.extend(data["facecodes"])
            frame_no += 1

            # update the FPS counter
            fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    print("[INFO] total faces detected: {} ".format(len(detected_faces)))
    print("[INFO] total frames: {} ".format(frame_no))

    return {
        "faces": detected_faces,
        "face_embeddings": face_embeddings,
        "facecodes": facecodes
    }
예제 #22
0
def video_extract_features(vid_path, frame_number, end_frame):
    # FASTER video reading (decode in separate thread)
    fvs = FileVideoStream(vid_path, start_frame=frame_number).start()
    time.sleep(1.0)

    current_frame = frame_number - 1
    frameCount = end_frame - frame_number + 1

    roi = [0.25, 0.55, 0.35, 0.65]
    frame_width = int(fvs.get_width())
    frame_height = int(fvs.get_height())
    result_width = round(roi[3] * frame_width) - round(roi[2] * frame_width)
    result_height = round(roi[1] * frame_height) - round(roi[0] * frame_height)
    buf = np.empty((frameCount, result_height, result_width), np.dtype('uint8'))
    buf_canny = np.empty((frameCount, result_height, result_width), np.dtype('uint8'))
    hist_buf = np.empty((frameCount, 16))
    print_iter = 0

    while fvs.more():

        print_iter += 1
        # Capture frame-by-frame
        frame = fvs.read()
        current_frame += 1

        frame_roi = get_ROI(frame, roi)

        harris_result = get_harris_feature(frame_roi)

        hist_result = extract_frame_histogram(frame_roi)

        # canny_result = get_canny_feature(frame_roi)

        buf[current_frame - frame_number] = harris_result
        hist_buf[current_frame - frame_number] = hist_result
        # buf_canny[current_frame - frame_number] = canny_result

        # cv2.imshow('Frame', harris_result)
        if divmod(print_iter, 60)[1] == 0:
            print(f'Progress: {100*(current_frame-frame_number)/(end_frame-frame_number)}%')


        # Press Q on keyboard to  exit
        if cv2.waitKey(10) & 0xFF == ord('q'):
            print(f'current_frame: {current_frame}')
            break
        if current_frame == end_frame:
            fvs.stop()
            break
    #save numpy matrix of feature frames
    np.save('library_match_1_segment_harris.npy', buf)
    np.save('library_match_1_segment_histogram.npy', hist_buf)
예제 #23
0
def process_video(analyzed_videos, reference_digits, time_parsable, ts_box, vdir, video):
    print("[*] Processing video {} from {}".format(video, vdir.directory))
    if video in analyzed_videos:
        print("[*] Video has been processed. Checking if processing is complete...")
        last_processed_frame = get_last_processed_frame(video)
        print("[*] Last processed frame for this video is: ", last_processed_frame)
    else:
        last_processed_frame = None

    vs = FileVideoStream(os.path.join(vdir.directory, video)).start()

    # Reset frame number
    f_num = 0

    # Allow the buffer some time to fill
    time.sleep(2.0)

    while vs.more():
        frame = vs.read()

        f_num += 1
        if (last_processed_frame is not None) and (f_num <= last_processed_frame):
            print("[*] Current frame is {}. Waiting for {}...".format(f_num, last_processed_frame + 1))
            # Give video buffer time to fill so we don't overtake it
            time.sleep(0.01)
            continue
        else:
            try:
                # Process the timestamp area in the video
                frame_time, ts_box = compute_frame_time(frame, reference_digits, time_parsable, ts_box)
            except AttributeError:
                if frame is None:
                    print("[!] Frame was none.")
                    break

                print("Something went wrong. Trying again...")
                continue

            if frame_time is not None:
                time_parsable = True
            else:
                print("[!] Failed to process time, probably because the frame is distorted.")

            # Add the frame information to the logging database
            add_frame(directory=vdir,
                      video=video,
                      time=frame_time,
                      frame_number=f_num)

    print("{} is done being processed. Adding to database of processed videos...".format(video))
    add_processed_video(video=video, total_frames=f_num)
    vs.stop()
def run_video(path, net):
    '''Runs the uploaded video through the juggle counter algorithm
	'''

    confidence = 0.45
    (W, H) = (200, 200)
    direction = 1
    bounces = 0
    prev_cY = 0
    frame_num = 0
    prev_frame_num = -7
    first_detection = True
    print("Processing video...")
    vs = FileVideoStream(path).start()

    while vs.more():
        frames_per_read = 2
        for i in range(frames_per_read):
            frame = vs.read()
            frame_num += 1
        try:
            frame = cv2.resize(frame, (W, H))
        except cv2.error as e:
            print("...end of video")
            break

        detection = process_frame(frame, net, W, H)
        if detection[0, 0, 0, 2] > confidence:
            (cX, cY, bounces, direction, prev_cY, prev_frame_num,
             diameter_temp) = check_detection(frame_num, prev_frame_num, frame,
                                              detection, W, H, bounces,
                                              direction, prev_cY)
            if first_detection:
                diameter = diameter_temp
                first_detection = False
            cv2.putText(frame, str(bounces), (int(W * 0.86), int(W * 0.2)),
                        cv2.FONT_HERSHEY_SIMPLEX, int(W * 0.007), (0, 255, 0),
                        2)
            if 0.9 * diameter < diameter_temp < 1.1 * diameter:
                cv2.circle(frame, (cX, cY), int(W * 0.03), (0, 255, 0), -1)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # Only for debuggin:
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    cv2.destroyAllWindows()
    vs.stop()

    return bounces
예제 #25
0
def process_video(video, tolerance=0.6, model="hog", upsample=1):
    # process video file and produce clustering results
    process_frame = True
    frames_with_face = 0
    known_faces_encodings = list()
    data = list()

    print("[INFO] Video name: " + video)
    print("[INFO] Starting video processing...")
    fvs = FileVideoStream(video).start()
    time.sleep(1.0)

    # division by two because every second frame is processed
    total_video_frames = int(fvs.stream.get(cv2.CAP_PROP_FRAME_COUNT)) / 2

    fps = FPS().start()

    while (fvs.more()):
        # read and transform the frame
        frame = transform_to_rgb(fvs.read())

        if (process_frame):
            timestamp = fvs.stream.get(cv2.CAP_PROP_POS_MSEC)
            frame_position = fvs.stream.get(cv2.CAP_PROP_POS_FRAMES)

            face_locations = fcr.face_locations(frame, upsample, model)
            face_encodings = fcr.face_encodings(frame, face_locations)
            face_indexes = cluster_faces(known_faces_encodings, face_encodings,
                                         tolerance)

            if (len(face_indexes) > 0):
                frames_with_face += 1

            result = dict(timestamp=timestamp,
                          frame_position=frame_position,
                          face_indexes=face_indexes)
            data.append(result)

        process_frame = not process_frame

        # update the FPS counter
        fps.update()

    fps.stop()

    print("[INFO] Processing successfully finished")
    print("[INFO] Elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] Approx. FPS: {:.2f}".format(fps.fps()))

    return total_video_frames, frames_with_face, known_faces_encodings, data
예제 #26
0
def read_video(path_video: str) -> np.ndarray:
    assert pathlib.Path(path_video).exists()
    print("Reading video")
    buf = []
    video_stream = FileVideoStream(path_video).start()
    count = 0
    while video_stream.more():
        count += 1
        frame = video_stream.read()
        if frame is None:
            break
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        buf.append(frame)
        print(count)
    return buf
def realtime(src, modelweightspath):

    if (type(src) == str):
        cap = FileVideoStream(src).start()
    elif (type(src) == int):
        cap = WebcamVideoStream(src).start()  # for fast fps read for real-time

    fps = FPS().start()
    #cap = cv2.VideoCapture(0)                               # for cv2 read
    h = 512
    w = 384
    seg_model = load_bonnet(3, h, w)
    seg_model.load_weights(modelweightspath)
    check = True

    while check:
        start = time.time()
        #ret,frame = cap.read()                             # for cv2 read
        frame = cap.read()  # for fast fps read webcamvideostream
        #print(frame.shape)                                 # frame is of: BGR type
        frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
        cv2.imshow('Input', frame)
        print(frame.shape)
        IP = []
        ip = load_input(frame, h, w)
        IP.append(ip)
        IP = np.array(IP)
        pred = seg_model.predict(IP)
        prediction = pred.argmax(axis=-1)
        prediction = to_categorical(prediction, 3)
        prediction = np.reshape(prediction, (h, w, 3))
        prediction = prediction[:, :, [2, 1, 0]]
        cv2.imshow('Output', prediction)  # 2=> R 0=> B  frame => BGR
        end = time.time()
        fps.update()
        print("FrameRate:" + str(1 / (end - start)))
        c = cv2.waitKey(1)
        if c == 27:
            break
        if (type(src) == str
            ):  # Check if next frame exists in case of video file
            check = cap.more()
    fps.stop()
    print("AVg FrameRate:" + str(fps.fps()))
    cap.stop()  # for fast fps read
    #cap.release()                                          # for cv2 read
    cv2.destroyAllWindows()
예제 #28
0
def main(service_socket, image_file_name):

    #read events from stdin and push results on stdout

    # service_socket: in the format tcp://*:port

    context = zmq.Context()
    socket = context.socket(zmq.REP)
    # ZMQ server must be listening on request first
    socket.bind(service_socket)

    print("[INFO] starting video file thread...")
    fvs = FileVideoStream(image_file_name).start()
    time.sleep(1.0)

    while fvs.more():
        frame = fvs.read()
        if frame is None:
            break
        cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
        cv2.imshow('Video', frame)
        cv2.resizeWindow('Video', 800, 600)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        img = np.array(frame)

        out_img = img

        # request from the client side
        request = socket.recv_json(flags=0)

        if request.get('corr_id', False):
            result = {
                'corr_id': request['corr_id'],
                'shape': out_img.shape,
                'dtype': str(out_img.dtype)
            }

            socket.send_json(result, flags=zmq.SNDMORE)
            socket.send(out_img, flags=0, copy=False, track=False)
        else:
            result = {
                'corr_id': request['corr_id'],
                'shape': None,
                'dtype': None
            }
            socket.send_json(result)
예제 #29
0
def vplay():
    stream =  askopenfilename()
    ld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 10, 50, 25)
    # import the necessary packages

    print("[INFO] starting video file thread...")
    fvs = FileVideoStream(stream).start()
    time.sleep(1.0)

    # start the FPS timer
    fps = FPS().start()

    # loop over frames from the video file stream
    while fvs.more():
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fvs.read()
        
        frame = imutils.resize(frame,width = 450)
        frame = ld.process_image(frame)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        #frame = np.dstack([frame])
        frame = np.dstack([frame, frame, frame])
        # display the size of the queue on the frame
       
        # show the frame and update the FPS counter
        
        frame = imutils.resize(frame,width = 900)
        
        
        
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
예제 #30
0
def num_blinks(video):

    vs = FileVideoStream(video).start()
    fileStream = True
    COUNTER = 0
    TOTAL = 0
    time.sleep(1.0)
    best_frame = None
    max_ear = -1
    while True:

        if fileStream and not vs.more():
            break
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rects = detector(gray, 0)

        for rect in rects:

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            ear = (leftEAR + rightEAR) / 2.0
            print ear
            if ear > max_ear:
                best_frame = gray
                max_ear = ear

            if ear < EYE_AR_THRESH:
                COUNTER += 1
            else:

                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                COUNTER = 0
    vs.stop()
    return TOTAL, best_frame
import sys
import cv2
import time
from imutils.video import FileVideoStream

videoFile = sys.argv[1]
outputDir = sys.argv[2]

fvs = FileVideoStream(videoFile).start()
time.sleep(1.0)

frameIndex = 0

while (fvs.more()):
	frame = fvs.read()

	cv2.imshow("Frame", frame)
	fileName = outputDir + "\\frame_" + str(frameIndex) + ".tiff"
	cv2.imwrite(fileName, frame)

	frameIndex += 1

	cv2.waitKey(1)

cv2.destroyAllWindows()
fvs.stop()