def __init__(self, filename='videos/capture-0001.mp4'):
        self.filename = filename
        self.video_reader = VideoReader(filename)
        self.first_frame = self.video_reader.read()
        self.chosen_nematode_pos = []
        self.nematode_count = 0
        self.chosen_nematode_tot_distance = []
        self.colors = np.random.uniform(0, 255, (100, 3))
        self.threshold = 70

        self.max_display_resolution = (1280, 720)
        self.display_resize_ratio = min(
            self.max_display_resolution[0] / self.video_reader.width,
            self.max_display_resolution[1] / self.video_reader.height,
            1,
        )
        self.target_display_shape = (
            int(self.video_reader.width * self.display_resize_ratio),
            int(self.video_reader.height * self.display_resize_ratio),
        )

        self.min_area = 5  # relative area
        self.max_area = 20
        self.ppa = (self.video_reader.height *
                    self.video_reader.width) / 1253376
        self.elements_resize_ratio = np.sqrt(
            (self.video_reader.height * self.video_reader.width) / 1253376)

        self.data = []
    def __init__(
        self,
        filename=r'D:\Projects\model_organism_helper\Nematoda\capture-0001.avi',
        resize_ratio=1.0,
        frame_step=1,
        movement_threshold=4,
        max_nematoda_count=100,
        kernel_size=None,
        display_scale=1.0,
    ):
        self.frame_step = frame_step
        self.resize_ratio = resize_ratio
        self.video_reader = VideoReader(filename, resize_ratio, frame_step)
        self.background_subtractor = None
        self.movement_threshold = movement_threshold
        self.kernel_size = kernel_size
        if self.kernel_size is None:
            self.kernel_size = int(min(self.video_reader.target_shape) / 32)
            self.kernel_size = int(2 * (int((self.kernel_size - 1) / 2))) + 1

        self.max_nematoda_count = max_nematoda_count
        self.initialize_background_subtractor()
        display_scale = min(display_scale,
                            400 / np.min(self.video_reader.target_shape))
        self.display_size_target = (
            int(self.video_reader.target_shape[0] * display_scale),
            int(self.video_reader.target_shape[1] * display_scale),
        )
 def __iter__(self):
     while True:
         self.data_manager.remove(
             self.batch_size)  # remove the previous batch of URLs
         # confirm that we have at least batch_size files saved to disk
         while len(self.data_manager.url_queue) < self.batch_size:
             time.sleep(1)
         # retrieve the next batch_size URLs and create a video reader
         urls = self.data_manager.url_queue[-self.batch_size:]
         urls = [
             os.path.join(self.data_save_dir, url + '.mp4') for url in urls
         ]
         vid_reader = VideoReader(urls, self.transform, self.resize)
         yield vid_reader
예제 #4
0
topic = args.topic
freq = args.fps
src_dir = args.src

bridge = CvBridge()
publisher = rospy.Publisher(topic, Image, queue_size=10)
rospy.init_node('slam_reader', anonymous=True)

image_id = args.start
prefix_full = args.src + args.prefix
ext = args.ext

reader = None
if args.filetype == 'video':
    reader = VideoReader(src_dir)
else:
    reader = ImageReader(src_dir, args.prefix, image_id, ext)

master_publisher = None
if args.type == 'master':
    master_publisher = rospy.Publisher('/slam_reader/master', std_msgs.msg.Bool, queue_size=10)
    tmp = raw_input("Waiting")
else:
    slave_subscriber = rospy.Subscriber('/slam_reader/master', std_msgs.msg.Bool, slave_cb)
    rospy.spin()

expected_delay = 1.0 / args.fps
prev_time = time.time()
while not rospy.is_shutdown():
    #Send signal to all slave that they should send the image
예제 #5
0
 def __init__(self, video_path, predictor, decimation=None):
     self.video = VideoReader(video_path, decimation)
     self.predictor = predictor
예제 #6
0
def main():
    print('{}{:=<50}{}'.format(CP_Y, '', CP_C))
    print('{}**{}{:^46}{}**{}'.
            format(CP_Y, CP_R, 'Game Informantion Collector', CP_Y, CP_C))
    print('{}**{}{:^46}{}**{}'.
            format(CP_Y, CP_R, 'By: Abhishek Chaurasia', CP_Y, CP_C))
    print('{}{:=<50}{}'.format(CP_Y, '', CP_C))
    # Grab frames from screen or video
    # Replace it with any other frame grabber
    frame_grabber = VideoReader(args.video_path)

    # Initialization
    ocr = OCR(args.model, args.debug)
    items = {}
    n_items = 0

    keyvalues = open(args.key_info, 'r')
    # Ignore first two lines
    keyvalues.readline()
    keyvalues.readline()

    for line in keyvalues:
        item = line.split()
        # parsed info:    keyword | tx     | ty     | bx     | by
        items[n_items] = (item[0], item[2], item[4], item[6], item[8])
        n_items += 1

    ########################################
    # Ignore this section:
    # Important only when you care about printed values
    print('{:=<50}'.format(''))
    pad = (50//n_items) - 2
    for n_item in items:
        print_val(items[n_item][0], pad, n_item, len(items))
    print('\n{:-<50}'.format(''))
    ########################################

    # Get next frame
    while frame_grabber.next_frame():
        current_frame = frame_grabber.frame
        # Crop section of the frame containing value you are interested in
        for n_item in items:
            tx = int(items[n_item][1])
            ty = int(items[n_item][2])
            bx = int(items[n_item][3])
            by = int(items[n_item][4])
            key_part = current_frame[ty:by, tx:bx, :]

            # send the cropped area and get its value
            value = ocr.forward(key_part)

            # Create box around idividual ROIs
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(current_frame, str(value),
                    (tx-10,ty-10), font, 1, (255,255,255), 1)
            # cv2.rectangle(current_frame, (tx, ty), (bx, by), (0, 255, 0), 1)
            print_val(value, pad, n_item, len(items))
        print("")

        if args.debug:
            pass
        else:
            cv2.startWindowThread()
            cv2.namedWindow("Video")
        cv2.imshow('Video', current_frame)
        if args.debug:
            cv2.waitKey(1)
예제 #7
0
 def __init__(self):
     Detector.__init__(self)
     self.videoReader = VideoReader()
     self.detectedObjects = []
예제 #8
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from video_reader import VideoReader
from frame_enhancer import LowLightEnhance
from face_detector import FaceDetector
from face_tagger import CentroidTracker
from video_writer import VideoWriter

video_reader = VideoReader()
low_light_enhancer = LowLightEnhance('snapshots/Epoch99.pth', 0)
face_detector = FaceDetector(gpu_id=0)
face_tagger = CentroidTracker(maxDisappeared=25)

video_reader.setVideoPath(r'videos/video2.mp4')
video_reader.setFrameSavePath(r'savedframes')


def main():
    ret = True
    frame_dim = video_reader.getVideoDimension()
    video_writer = VideoWriter('abcd', frame_dim)
    while ret:
        ret, frame, frame_no = video_reader.getFrame()
        rects = []
        # frame = low_light_enhancer.enhance(frame)

        faces = face_detector.detect(frame)
        frame, rects = face_detector.draw_boundary_box(frame)

        objects, maxAppereds = face_tagger.update(rects)
예제 #9
0
import os
from video_reader import VideoReader
from video_producer import VideoProducer
import cProfile

CAMERA_ID = os.getenv('CAMERA_ID')
FRAME_WIDTH = int(os.getenv('FRAME_WIDTH'))
FRAME_HEIGHT = int(os.getenv('FRAME_HEIGHT'))
FRAME_RATE = int(os.getenv('FRAME_RATE'))
FRAME_BUFFER_SIZE = int(os.getenv('FRAME_BUFFER_SIZE'))

TOPIC = os.getenv('KAFKA_TOPIC')
BOOTSTRAP_SERVERS = os.getenv('KAFKA_BOOTSTRAP_SERVERS')
CLIENT_ID = os.getenv('KAFKA_CLIENT_ID')

if __name__ == '__main__':

    reader = VideoReader(device_id=CAMERA_ID,
                         frame_size=(FRAME_WIDTH, FRAME_HEIGHT),
                         frame_rate=FRAME_RATE,
                         buffer_size=FRAME_BUFFER_SIZE)
    producer = VideoProducer(topic=TOPIC,
                             bootstrap_servers=BOOTSTRAP_SERVERS,
                             client_id=CLIENT_ID,
                             video_reader=reader)

    cProfile.run("producer.produce()")