예제 #1
0
 def compute_attributes(self):
     input_stream = cv.VideoCapture(self.file_name)
     self.sample_rate = input_stream.get(cv.CAP_PROP_FPS)
     input_stream.release()
     # Use multi-threading by Adrian of https://pyimagesearch.com
     input_stream = FileVideoStream(self.file_name).start()
     directograms = []
     directogram_times = []
     frame = input_stream.read()
     last_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
     self.shape = frame.shape
     while input_stream.more():
         if len(directograms) % 250 == 0:
             print('Video Processing: {:.2f}s'.format(
                 len(directograms) / self.sample_rate))
         frame = input_stream.read()
         if not input_stream.more():
             break
         next_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
         # These parameters were taken from a demo in the documentation
         flow = cv.calcOpticalFlowFarneback(last_frame, next_frame, None,
                                            0.5, 3, 15, 3, 5, 1.2, 0)
         directograms.append(get_directogram(flow))
         directogram_times.append(len(directograms) / self.sample_rate)
         last_frame = next_frame
     self.envelope_times = np.array(directogram_times)
     self.envelopes = get_impact_envelopes(directograms,
                                           self.envelope_times)
     self.envelope_sample_rate = self.sample_rate
     input_stream.stop()
예제 #2
0
def run_video():
    PATH_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023.mp4'
    PATH_OUT_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023_2.avi'
    fo = FaceOrchestrator()
    fs = FileVideoStream(PATH_VIDEO).start()
    time.sleep(1.0)
    frame = fs.read()
    h, w, __ = frame.shape
    out = cv2.VideoWriter(PATH_OUT_VIDEO,
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                          (w, h))
    i = 0
    while fs.more():
        i = i + 1
        print('Processing frame ' + str(i))
        try:
            frame = fs.read()
            fo.set_image_to_be_processed(frame)
            # detect faces
            fo.fd_method = 'AI'
            fo.fd_ai_min_score = 0.6
            fo.detect_faces()
            fo.align_faces(target_face_percent=0.28)
            # reconize faces
            # fo.fr_max_delta = 0.6
            # fo.fr_method = 'DELTA'
            fo.fr_min_probability = 0.5
            fo.fr_method = 'ML'
            frame_with_label, names = fo.recognize_faces()
            out.write(frame_with_label)
        except:
            pass
    fs.stop()
    out.release()
    print('Finish.')
def read_feed():
    #cap = VideoStream(usePiCamera=True, resolution=(VIDEO_WIDTH, VIDEO_HEIGHT))
    cap = FileVideoStream('sample/fish3.mp4').start()

    time.sleep(2.0)

    frame = cap.read()
    global VIDEO_HEIGHT, VIDEO_WIDTH
    VIDEO_HEIGHT, VIDEO_WIDTH = frame.shape[:2]

    try:
        while True:
            frame = cap.read()
            jpeg_frame = cv2.imencode('.jpg', frame,
                                      [int(cv2.IMWRITE_JPEG_QUALITY), 80])[1]

            if VIDEO_IS_RECORDING:
                VIDEO_WRITER_QUEUE.put(jpeg_frame)

            #time.sleep(0.1)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' +
                   jpeg_frame.tostring() + b'\r\n')
    finally:
        cap.stop()
def main(_argv):
    input_layer = tf.keras.layers.Input([FLAGS.size, FLAGS.size, 3])
    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, 'data/YOLOv4-obj_1000.weights')

    vid = FileVideoStream(FLAGS.input)  # Reading input
    return_value, frame = vid.read()

    fourcc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
    out = cv2.VideoWriter(FLAGS.output, fourcc, 10.0,
                          (frame.shape[1], frame.shape[0]), True)

    plates = []

    n = 0
    Sum = 0
    while True:
        start = time.time()
        n += 1
        return_value, frame = vid.read()
        if frame is None:
            continue

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        bboxes = plateDetect(frame, FLAGS.size,
                             model)  # License plate detection
        for i in range(len(bboxes)):
            img = frame[int(bboxes[i][1]):int(bboxes[i][3]),
                        int(bboxes[i][0]):int(bboxes[i][2])]
            prediction_groups = pipeline.recognize(
                [img])  # Text detection and recognition on license plate
            string = ''
            for j in range(len(prediction_groups[0])):
                string = string + prediction_groups[0][j][0].upper()

            if platePattern(string) == True and string not in plates:
                plates.append(string)

        if len(plates) > 0:
            drawText(frame, plates)

        frame = utils.draw_bbox(
            frame, bboxes)  # Draws bounding box around license plate
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        Sum += time.time() - start
        print('Avg fps:- ', Sum / n)

        out.write(frame)
        cv2.imshow("result", frame)
        if cv2.waitKey(1) == 27: break
    out.release()
    cv2.destroyAllWindows()
    def standard_run(
        self,
        x1: int,
        x2: int,
        y1: int,
        y2: int,
        sub_ch: list,
        channel_len,
    ):

        fps = FPS().start()
        cap = FileVideoStream(self.filename).start()
        count = 0

        start = time.time()
        cycle_start = time.time()
        frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        while cap.more():
            #            print(count)
            frame = cap.read()
            if frame is None:
                break

            if count < 200:
                self.frames_buffer.append(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            frame = frame[y1:y2, x1:x2]
            crop = self.mask.apply(frame)
            crop = cv2.GaussianBlur(crop, (7, 7), 3.0)

            _, crop = cv2.threshold(crop, 150, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            contours, hierarchy = cv2.findContours(crop, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            for i in range(len(contours)):
                avg = np.mean(contours[i], axis=0)
                coord = (int(avg[0][0]), int(avg[0][1]))  # Coord is (x,y)
                ch_pos = int(math.floor((coord[0]) / channel_len))

                try:
                    self.sum_ch1[ch_pos] += float(1)
                except:
                    pass

            count += 1
            fps.update()
        fps.stop()
        cycle_end = time.time()
        self.cycle_count += 1
        end = time.time()
        detect_benchmark = end - start
        print("Number of frames processed: ", count)
        print("Time taken for WBC counting:", detect_benchmark)
        print("[INFO] Each cycle time taken = %0.5fs" %
              ((cycle_end - cycle_start) / count))

        return fps
예제 #6
0
파일: fryer.py 프로젝트: paramkpr/DankBot
def fry_gif(update, url, number_of_cycles, args):
    number_of_emojis = 1.5 if args['high-fat'] else 1 if args['low-fat'] else 0
    bulge_probability = 0.3 if args['heavy'] else 0.15 if args['light'] else 0
    magnitude = 4 if args['deep'] else 1 if args['shallow'] else 2

    name = update.message.from_user.first_name
    filename = '%s_%s_%s' % (update.message.chat_id, name,
                             update.message.message_id)
    filepath = bin_path + '/temp/' + filename
    caption = __get_caption(name, number_of_cycles, args)
    output = bin_path + '/temp/out_' + filename + '.mp4'

    gifbio = BytesIO()
    gifbio.name = filename + '.gif'
    fs = [__posterize, __sharpen, __increase_contrast, __colorize]
    shuffle(fs)

    if not __download_gif(url, filepath):
        return

    fvs = FileVideoStream(filepath + '.mp4').start()
    frame = fvs.read()
    height, width, _ = frame.shape

    try:
        fps = fvs.get(CAP_PROP_FPS)
    except:
        fps = 30
    out = VideoWriter(output, VideoWriter_fourcc(*'mp4v'), fps,
                      (width, height))
    out.write(
        fry_frame(frame, number_of_cycles, fs, number_of_emojis,
                  bulge_probability, magnitude, args))

    while fvs.more() or fvs.more():
        try:
            temp = fry_frame(fvs.read(), number_of_cycles, fs,
                             number_of_emojis, bulge_probability, magnitude,
                             args)
        except Exception as e:
            break
        out.write(temp)

    fvs.stop()
    fvs.stream.release()
    out.release()
    update.message.reply_animation(open(output, 'rb'),
                                   caption=caption,
                                   quote=True)
    try:
        __upload_to_imgur(output, caption)
    except (Exception, BaseException) as e:
        print(e)
    remove(filepath + '.mp4')
예제 #7
0
class ImageRouter(QtCore.QObject):
    source_is_file = False
    on_frame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, filename=''):
        super().__init__()
        if filename == '':
            self.vs = VideoStream(src=0).start()
            self.source_is_file = False
        else:
            self.vs = FileVideoStream(filename).start()
            self.source_is_file = True

    def __del__(self):
        self.vs.stop()

    def tick(self):
        if self.source_is_file and not self.vs.more():
            return False
        frame = self.vs.read()
        if frame is None:
            return False
        self.on_frame.emit(frame)
        return True

    def run(self):
        print("[INFO] Started ImageRouter")
        while self.tick():
            None
        print("[INFO] Stopped ImageRouter")
예제 #8
0
def main(video_path):
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(video_path).start()
    time.sleep(1.0)
    fps = FPS().start()

    # loop over frames from the video file stream
    input_frames_list = []
    number = 0
    while fvs.more():
        number += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        ann_frame = stream_frames_to_model(frame, number)
        cv2.imshow('frame', ann_frame)
        cv2.waitKey(1)
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        # self.video = cv2.VideoCapture(0)
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        self.video = FileVideoStream("cropvideo.mp4").start()

    def __del__(self):
        self.video.release()

    def get_frame(self):

        image = self.video.read()
        faceCascade = cv2.CascadeClassifier('haarcascade_profileface.xml')
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(gray, 1.1, 4)

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.

        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()
예제 #10
0
def eye_close(image):
    path = "shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(path)
    detector = dlib.get_frontal_face_detector()

    EYE_AR_THRESH = 0.4

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

    vs = FileVideoStream(image).start()
    time.sleep(2.0)
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)
    for rect in rects:
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
    return leftEAR, rightEAR
class VideoStreamer(Camera):
    def __init__(self, src=0, use_pi=-1, resolution=480, framerate=30):

        super(VideoStreamer, self).__init__(use_pi=use_pi,
                                            resolution=resolution,
                                            framerate=framerate)

        # VideoStream class is used for live stream.
        # FileVideoStream class is used for streaming from a saved video.
        if isinstance(src, int):
            self.vs = VideoStream(src=src,
                                  usePiCamera=self.use_pi > 0,
                                  resolution=self.resolution,
                                  framerate=self.framerate).start()
        else:
            self.vs = FileVideoStream(path=src)

        # Let camera warm up
        time.sleep(1.0)

    # Returns the frame as a np array
    def read(self):
        if isinstance(self.vs, FileVideoStream):
            return self.vs.stream.read()[1]
        else:
            return self.vs.read()

    # Terminate the capture thread.
    def stop(self):
        self.vs.stop()
예제 #12
0
파일: main.py 프로젝트: VerstraeteP/stitch
def process_video(file_name, n=1):
    """
	iterate over frames of videofile
	:param file_name: filename
	:param n:process one out of every n frames-> default 1: take all frames
	:return:list of finish frames
	"""

    fvs = FileVideoStream(file_name).start()  #load video

    cam = cv2.VideoCapture(file_name)
    fps = cam.get(cv2.CAP_PROP_FPS)  #get original fps of video

    counter = 1
    frame_list = []
    teller = 0
    while fvs.running():
        if fvs.more():

            teller += 1
            frame = fvs.read()

            if frame is not None:

                frame_list.append(
                    cv2.resize(
                        frame,
                        (int(frame.shape[1] / 2), int(frame.shape[0] / 2)))
                )  #append frame to list and resize it: height/2, width/2

            counter += 1
        else:
            time.sleep(2)

    return (frame_list, fps)
예제 #13
0
def run_detection(fast_mtcnn, filenames):
    frames = []
    frames_processed = 0
    faces_detected = 0
    batch_size = 60
    start = time.time()

    for filename in tqdm(filenames):

        v_cap = FileVideoStream(filename).start()
        v_len = int(v_cap.stream.get(cv2.CAP_PROP_FRAME_COUNT))

        for j in range(v_len):

            frame = v_cap.read()
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frames.append(frame)

            if len(frames) >= batch_size or j == v_len - 1:

                faces = fast_mtcnn(frames)
                for i in range(len(faces)):
                    file_name = str(j) + str(i) + '.jpg'
                # cv2.imwrite(file_name,faces[i])
                #print(faces)
                frames_processed += len(frames)
                faces_detected += len(faces)
                frames = []

                print(
                    f'Frames per second: {frames_processed / (time.time() - start):.3f},',
                    f'faces detected: {faces_detected}\r',
                    end='')

        v_cap.stop()
def main():
    # Construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-m", "--model", required=True,
                    help="path to model file")
    ap.add_argument("-c", "--confidence", type=float, default=0.95,
                    help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # Initialize our centroid tracker and frame dimensions
    ct = CentroidTracker()

    # Load our serialized model from disk
    print("[INFO] Loading model...")
    model = get_model(args["model"])

    vs = FileVideoStream("/media/ave/ae722e82-1476-4374-acd8-a8a18ef8ae7a/Rana_vids/Nova_1/121/03-2018.05.25_06.24.23-17.mpg").start()
    time.sleep(2.0)

    # Loop over the frames from the video stream
    while vs.more():
        # Read the next frame from the video stream
        frame = vs.read()

        analyze_frame(frame, ct, model, args["confidence"])

    # Cleanup
    cv2.destroyAllWindows()
    vs.stop()
예제 #15
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        frames = VideoMeta(args.video).fps()
        #print(meta)

        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            if (fno % frames != 0):
                continue
            try:
                frame = imutils.resize(frame, width=450)
            except:
                break
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # frame = np.dstack([frame, frame, frame])

            if args.detect == "haarcascade":
                results = self.face_cascade.detectMultiScale(frame, 1.3, 5)
                if len(results) != 0:

                    self.crop(frame, results[0],
                              str(output_path / Path(str(fno) + ".jpg")))
            else:
                results = detector.detect_faces(frame)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
def main():
    # Get ROI from frames
    file_name = "/home/smart/WBC286 InvL-Pillars -35mbar 15fps 29-11-2019 v3.4.avi"
    #file_name = "C:/Users/Me/Desktop/capstone/WBC286 InvL-Pillars -350mbar 150fps 29-11-2019 v3.4.avi"
    cap = FileVideoStream(file_name).start()
    image = cap.read()
    print("***** PROCESSING ROI for RUN 1 ***** File: %s" % file_name)
    cap.stop()
    print("***** PROCESSING RUN 1 ***** File: %s" % file_name)
    print("Frame size: ", image.shape)
    Channels = 34
    #original size: [502,1402,3]
    # r = [84, 357, 1238, 130]
    r = [
        int(0.167 * image.shape[0]),
        int(0.25 * image.shape[1]),
        int(0.883 * image.shape[1]), 130
    ]
    x1, x2, y1, y2, sub_ch, channel_len = to_crop(image, r, Channels)

    # run count
    run_standard = standard(file_name, Channels)
    (fps) = run_standard.standard_run(x1, x2, y1, y2, sub_ch, channel_len)
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    rbc_counts = run_standard.rbc_detection()
    run_standard.process_results(rbc_counts)

    print(
        "----------------------------------------------------------------------"
    )
예제 #17
0
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (predict_one(frame,
                                  modelname).squeeze().detach().float().cpu())
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
예제 #18
0
def extract(input_path, output_folder, rate):
    STR_FRAME_PATH = "{}\\frame{}.jpg"
    video_args = {}
    video_args["video"] = input_path

    fvs = FileVideoStream(video_args["video"], queue_size=300).start()
    time.sleep(1.0)

    framecount = 0
    fps = FPS().start()

    while fvs.running():
        frame = fvs.read()
        img_raw = None

        framecount = framecount + 1

        if framecount % int(rate) == 0:
            print("Extrating frame {}".format(framecount), end="\r")
            cv2.imwrite(STR_FRAME_PATH.format(output_folder, str(framecount)),
                        frame)
        else:
            continue

        fps.update()

    fps.stop()
    fvs.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    def scan_file(self, fname):
        vs = FileVideoStream(fname).start()

        while True:

            frame = vs.read()
            # frame = imutils.resize(frame)

            barcodes = pyzbar.decode(frame)

            for code in barcodes:
                (x, y, w, h) = code.rect
                cv2.rectangle(frame, (x, y), (x + w, x + h), (0, 0, 255), 2)

                bar_data = code.data.decode('utf-8')
                bar_type = code.type

                text = f'{bar_data} ({bar_type})'
                cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 0, 255), 2)

            cv2.imshow("Barcode Scanner", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break

        cv2.destroyAllWindows()
        vs.stop()
예제 #20
0
    def UAMS(self):
        video_file = FileVideoStream(self.args["video"]).start()
        time.sleep(2.0)
        fps = FPS().start()

        while video_file.more():
            # Converting each frame to matrix of numbers
            try:
                self.frame = video_file.read()
                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                gray = np.dstack([gray, gray, gray])

            except Exception as e:
                print(e)

            self.frame, self.faces, conf, detection, startX, y = self.cnn_op.CNNOperation(
                self.frame, self.net, self.args)
            try:
                msg = self.calculation(startX, y)
            except Exception as e:
                print(e)
            try:
                cv2.imshow("frame-this is main frame", self.frame)
            except Exception as e:
                print(e)
            fps.update()
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        fps.stop()
        print("elapsed time : {:.2f}".format(fps.elapsed()))
        print("FPS  : {:.2f}".format(fps.fps()))
예제 #21
0
def main():
    vid = (Path(__file__).parent / 'test.mp4').absolute()
    stream = FileVideoStream(str(vid), queue_size=100).start()
    time.sleep(1)

    while stream.more():
        img_data = stream.read()
        if img_data is None:
            break
        img = Image(img_data)
        transformed = Transformer(img)
        image = transformed.cannify()
        cropped_img = region(image)
        lines = cv2.HoughLinesP(cropped_img.raw,
                                2,
                                np.pi / 180,
                                100,
                                np.array([]),
                                minLineLength=40,
                                maxLineGap=5)
        avg_lines = utils.average_slope_intercept(img, lines)
        lines_image = img.display_lines(avg_lines)
        combined = img.combine(lines_image)
        cv2.imshow('', combined.raw)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    stream.stop()
예제 #22
0
def main(input_video_path, output_video_path, batch_size, async_workers_count):
    frame_id = 0
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(input_video_path).start()
    time.sleep(1.0)
    fps = FPS().start()
    in_holder_path, op_holder_path = return_io_frames_holder()
    print(in_holder_path, op_holder_path)
    # loop over frames from the video file stream
    input_frames_list = []
    while fvs.more():
        frame_id += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.imwrite(f"{in_holder_path}/{frame_id}.jpeg", frame)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
    colonel_batch(in_holder_path, op_holder_path, batch_size,
                  async_workers_count)
    #op_holder_path = "/home/gr8-adakron/Adakron_bay/work/FastAPI/fastapi_exp/video_processing/outputs/bCe7mEE/"
    write_detected_video(output_video_path, op_holder_path)
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    # ivs = InferenceDataStream(fvs, queue_size=25).start() # --> inference video stream reads from file video stream and forms queue of prepped images for inference
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()  # --> frame = ivs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (
            predict_one(frame, frame_number).squeeze().detach().cpu()
        )  # --> prediction = predict_one(frame, frame_number) #why? because logic for preparing images is now done asyncronously in ivs.
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
    def face_verification(self, path):
        """
            Face verification from input file video Stream

            :param path: Location of the video
            :return  name: Recognized person's name
        """

        video_path = path
        result = True
        while (result):
            vs = FileVideoStream(video_path).start()
            frame = vs.read()

            frame = ndimage.rotate(frame, 180)
            frame = ndimage.rotate(frame, 90)
            cap_image = frame
            check = SPOOF_DETECTION().spoof_detection(video_path)
            result = False
            vs.stop()
        cv2.destroyAllWindows()

        if check == True:
            name = obj.face_recognize(cap_image)
            print('Recognized image {}'.format(name))
        else:
            print('SPOOFING ')
예제 #25
0
class VideoSource (object):
    def __init__ (self, video_file, log, use_pi_camera = True, resolution=(320, 200), framerate = 30, night = False):
        self.filename = video_file
        self.log = log
        self.use_pi_camera  = use_pi_camera
        self.resolution = resolution
        self.framerate = framerate
        self.night = night
        self.fvs = None
        self.stream = None
        self._done = False

    def start (self):
        if self.filename is not None:
            self.log.debug('Video file: %s', self.filename)
            self.fvs = FileVideoStream(self.filename).start()
            self.stream = self.fvs.stream
            time.sleep(1.0)
        else: 
            if self.use_pi_camera:
                self.log.debug('Pi Camera (%d %d)', self.resolution[0], self.resolution[1])
                self.stream = VideoStream(src=0,
                    usePiCamera=True,                    
                    resolution=self.resolution,
                    framerate=self.framerate,
                    sensor_mode=5
                    ).start()

            else:
                self.log.debug('Web Camera')
                self.stream = cv2.VideoCapture(0)
                self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 2)
                self.resolution = (
                    self.stream.get(cv2.CAP_PROP_FRAME_WIDTH),
                    self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
                )
                self.framerate = self.stream.get(cv2.CAP_PROP_FPS)

        return self.resolution, self.framerate

    def read(self):
        if self.filename:
            frame = self.fvs.read()
        else:
            frame = self.stream.read()
        return frame

    def stop (self):
        if self.filename:
            self.fvs.stop()
        else:
            self.stream.stop()
        self._done = True

    def done(self):
        # check to see if video is still running
        running = (self.filename and self.fvs.running()) or True
        self._done = not running
        return self._done
예제 #26
0
def detect_object(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    total = 0
    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        #       time.sleep(0.05)

        if frame is None:
            print(
                "INFO: unable to connect to a webcam, using file stream instead"
            )
            vs = FileVideoStream("static/test_video.mp4").start()
            frame = vs.read()
            continue
        frame = imutils.resize(frame, width=400)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and predictions
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

        # acquire the lock, set the output frame, and release the lock
        with lock:
            outputFrame = frame.copy()
예제 #27
0
    def frames():
        camera = FileVideoStream()
        time.sleep(1.0)

        # loop over frames from the video file stream
        while camera.more():
            frame = camera.read()
            yield frame
예제 #28
0
def showVideo(app):
    fvs = FileVideoStream(app.vidDir).start()
    time.sleep(1.0)
    while fvs.more():
        frame = fvs.read()
        frame = cv2.resize(frame, (720, 480))
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
예제 #29
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        time.sleep(0.05)
        if frame is None:
            print(
                "INFO: unable to connect to a webcam, using file stream instead"
            )
            vs = FileVideoStream("video/test_video.mp4").start()
            frame = vs.read()
            continue
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
예제 #30
0
class CustomStream:
    def __init__(self, src=0, use_cv2=False):
        if use_cv2:
            self.obj = cv2.VideoCapture(src)
        elif src == 0:
            self.obj = WebcamVideoStream(src)
        elif src != 0:
            self.obj = FileVideoStream(src)

    def isOpened(self):
        if isinstance(self.obj, cv2.VideoCapture):
            return self.obj.isOpened()
        return self.obj.stream.isOpened()

    def start(self):
        self.obj.start()
        return self

    def update(self):
        self.obj.update()

    def read(self):
        if isinstance(self.obj, cv2.VideoCapture):
            return self.obj.read()
        return not self.obj.stopped, self.obj.read()

    def stop(self):
        self.obj.stop()
        if isinstance(self.obj, cv2.VideoCapture):
            self.obj.release()
        else:
            self.obj.stream.release()

    def set(self, propId, value):
        if isinstance(self.obj, cv2.VideoCapture):
            self.obj.set(propId, value)
            return
        self.obj.stream.set(propId, value)

    def get(self, propId):
        if isinstance(self.obj, cv2.VideoCapture):
            self.obj.get(propId)
            return
        return self.obj.stream.get(propId)
# start the file video stream thread and allow the buffer to
# start to fill
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"], transform=filterFrame).start()
time.sleep(1.0)

# start the FPS timer
fps = FPS().start()

# loop over frames from the video file stream
while fvs.running():
	# grab the frame from the threaded video file stream, resize
	# it, and convert it to grayscale (while still retaining 3
	# channels)
	frame = fvs.read()

	# Relocated filtering into producer thread with transform=filterFrame
	#  Python 2.7: FPS 92.11 -> 131.36
	#  Python 3.7: FPS 41.44 -> 50.11
	#frame = filterFrame(frame)

	# display the size of the queue on the frame
	cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
		(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)	

	# show the frame and update the FPS counter
	cv2.imshow("Frame", frame)

	cv2.waitKey(1)
	if fvs.Q.qsize() < 2:  # If we are low on frames, give time to producer