def __init__(self, filename='videos/capture-0001.mp4'):
        self.filename = filename
        self.video_reader = VideoReader(filename)
        self.first_frame = self.video_reader.read()
        self.chosen_nematode_pos = []
        self.nematode_count = 0
        self.chosen_nematode_tot_distance = []
        self.colors = np.random.uniform(0, 255, (100, 3))
        self.threshold = 70

        self.max_display_resolution = (1280, 720)
        self.display_resize_ratio = min(
            self.max_display_resolution[0] / self.video_reader.width,
            self.max_display_resolution[1] / self.video_reader.height,
            1,
        )
        self.target_display_shape = (
            int(self.video_reader.width * self.display_resize_ratio),
            int(self.video_reader.height * self.display_resize_ratio),
        )

        self.min_area = 5  # relative area
        self.max_area = 20
        self.ppa = (self.video_reader.height *
                    self.video_reader.width) / 1253376
        self.elements_resize_ratio = np.sqrt(
            (self.video_reader.height * self.video_reader.width) / 1253376)

        self.data = []
    def __init__(
        self,
        filename=r'D:\Projects\model_organism_helper\Nematoda\capture-0001.avi',
        resize_ratio=1.0,
        frame_step=1,
        movement_threshold=4,
        max_nematoda_count=100,
        kernel_size=None,
        display_scale=1.0,
    ):
        self.frame_step = frame_step
        self.resize_ratio = resize_ratio
        self.video_reader = VideoReader(filename, resize_ratio, frame_step)
        self.background_subtractor = None
        self.movement_threshold = movement_threshold
        self.kernel_size = kernel_size
        if self.kernel_size is None:
            self.kernel_size = int(min(self.video_reader.target_shape) / 32)
            self.kernel_size = int(2 * (int((self.kernel_size - 1) / 2))) + 1

        self.max_nematoda_count = max_nematoda_count
        self.initialize_background_subtractor()
        display_scale = min(display_scale,
                            400 / np.min(self.video_reader.target_shape))
        self.display_size_target = (
            int(self.video_reader.target_shape[0] * display_scale),
            int(self.video_reader.target_shape[1] * display_scale),
        )
Exemple #3
0
class Movie(object):
    def __init__(self, mov_path):
        self.mov_path = mov_path
        self.vr = VR(self.mov_path)
        self.frame_means = self.pull_frame_means()
    def pull_frame_means(self):
        means = []
        frame = self.vr.read()
        fi = 1
        while frame!=None:
            means.append(np.mean(frame))
            frame = self.vr.read()
            fi += 1
            print fi
            sys.stdout.flush()
            if fi>2000:
                break
        return np.array(means)
    def get_trials(self, iti_min_s=1., iti_max_s=10., darkness_thresh=60):
        #currently assumes first trial starts, without darkness, at time 0
        iti_min_fr = iti_min_s * self.vr.frame_rate
        iti_max_fr = iti_max_s * self.vr.frame_rate
        is_dark = self.frame_means < darkness_thresh
        is_flip = is_dark[:-1].astype(int)+is_dark[1:].astype(int)
        dark_edges = np.argwhere(is_flip==1) + 1
        itis = []
        added = False
        for e1,e2 in zip(dark_edges[:-1],dark_edges[1:]):
            if added:
                added = False
                continue
            if e2-e1 > iti_max_fr:
                continue
            if e2-e1 < iti_min_fr:
                continue
            itis.append([e1,e2])
            added = True
        itis = np.array(itis) 
        trials = np.array([np.append(0,itis[:,1]), np.append(itis[:,0],-1)]).T
        return trials
Exemple #4
0
class VideoDetector(Detector):
    def __init__(self):
        Detector.__init__(self)
        self.videoReader = VideoReader()
        self.detectedObjects = []

    def detect(self, frame):
        frame_path = "frame.png"
        cv2.imwrite(frame_path, frame)
        self.detectedObjects = Detector.detect(self, frame_path)
        os.remove(frame_path)
        return self.detectedObjects

    def start(self):
        print("start capture ...")
        while (True):
            # Read frame
            ret, frame = self.videoReader.read()

            # Detect objects in frame
            detect = threading.Thread(target=VideoDetector.detect,
                                      args=(self, frame))
            detect.start()

            # read and show frames without detecting while detection is being performed
            while detect.isAlive():
                ret, frame = self.videoReader.read()

                # Draw bounding boxes in frame
                Detector.draw_bboxes(self.detectedObjects, frame)

                # Show frame
                cv2.imshow('Detection', frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    # Close device
                    self.videoReader.release()
                    cv2.destroyAllWindows()
                    return
 def __iter__(self):
     while True:
         self.data_manager.remove(
             self.batch_size)  # remove the previous batch of URLs
         # confirm that we have at least batch_size files saved to disk
         while len(self.data_manager.url_queue) < self.batch_size:
             time.sleep(1)
         # retrieve the next batch_size URLs and create a video reader
         urls = self.data_manager.url_queue[-self.batch_size:]
         urls = [
             os.path.join(self.data_save_dir, url + '.mp4') for url in urls
         ]
         vid_reader = VideoReader(urls, self.transform, self.resize)
         yield vid_reader
Exemple #6
0
def main(args):
    ray.init()
    frame_number = 1

    video_reader = VideoReader.remote(args.video_path)
    rgb_converter = RGBConverter.remote()

    width, height, fps, frame_count = ray.get(
        video_reader.get_video_details.remote())

    print('Video Details:')
    print('*************************')
    print('Width: {}'.format(width))
    print('Height: {}'.format(height))
    print('FPS: {}'.format(fps))
    print('Frame Count: {}'.format(frame_count))
    print('*************************')

    video_reader.start.remote()
    rgb_converter.start.remote()

    time.sleep(0.5)

    while ray.get(video_reader.more.remote()):

        print('Started processing frame number {}'.format(frame_number))

        edisn_frame = ray.get(video_reader.read.remote())
        ray.get(rgb_converter.process_frame.remote(edisn_frame))
        del edisn_frame

        frame_number += 1
        time.sleep(0.001)
    print("calling destroy from RGBConverter")
    flag = ray.get(rgb_converter.destroy.remote())
    if flag == True:
        print("Destroy Process Complete... Ray Shutdown")
    ray.shutdown()
Exemple #7
0
topic = args.topic
freq = args.fps
src_dir = args.src

bridge = CvBridge()
publisher = rospy.Publisher(topic, Image, queue_size=10)
rospy.init_node('slam_reader', anonymous=True)

image_id = args.start
prefix_full = args.src + args.prefix
ext = args.ext

reader = None
if args.filetype == 'video':
    reader = VideoReader(src_dir)
else:
    reader = ImageReader(src_dir, args.prefix, image_id, ext)

master_publisher = None
if args.type == 'master':
    master_publisher = rospy.Publisher('/slam_reader/master', std_msgs.msg.Bool, queue_size=10)
    tmp = raw_input("Waiting")
else:
    slave_subscriber = rospy.Subscriber('/slam_reader/master', std_msgs.msg.Bool, slave_cb)
    rospy.spin()

expected_delay = 1.0 / args.fps
prev_time = time.time()
while not rospy.is_shutdown():
    #Send signal to all slave that they should send the image
Exemple #8
0
 def __init__(self, video_path, predictor, decimation=None):
     self.video = VideoReader(video_path, decimation)
     self.predictor = predictor
Exemple #9
0
class PersonDetector:

    PERSON_CID = 0

    def __init__(self, video_path, predictor, decimation=None):
        self.video = VideoReader(video_path, decimation)
        self.predictor = predictor

    def process_next_frame(self):
        retval = self.video.get_datetime_frame()
        if retval is not None:
            date, frame = retval
            outputs = self.predictor(frame)
            cid_mask = outputs['instances'].pred_classes == self.PERSON_CID
            cid_num = cid_mask.sum().item()  # total number of detections
            if cid_num:
                # copying required to detach numpy array from underlying Tensor's storage
                boxes = outputs['instances'].pred_boxes[cid_mask].tensor.cpu(
                ).numpy()
                scores = np.copy(outputs['instances'].scores[cid_mask].cpu().
                                 numpy().reshape(cid_num, 1))
                masks = outputs['instances'].pred_masks[cid_mask].cpu().numpy()
                person_masks = [
                    np.copy(PersonDetector.extract_person_mask(m, b))
                    for m, b in zip(masks, boxes)
                ]  # diff. sizes
                person_images = [
                    np.copy(ndarr)
                    for ndarr in PersonDetector.extract_person_images(
                        frame, boxes)
                ]
                boxes = [np.copy(ndarr) for ndarr in boxes]
                return date, frame, (person_masks, boxes, person_images,
                                     scores)
            else:
                return date, frame, None  # No detections
        else:
            return None  # No more frames

    @staticmethod
    def int_lims_from_box(box, frame_shape):
        start_x, start_y, end_x, end_y = box.astype(
            "int")  # truncate and convert to integers
        start_x, end_x = np.clip([start_x, end_x], 0, frame_shape[1])
        start_y, end_y = np.clip([start_y, end_y], 0, frame_shape[0])
        return start_x, start_y, end_x, end_y

    @staticmethod
    def extract_person_images(frame, boxes):
        person_images = []
        for box in boxes:
            start_x, start_y, end_x, end_y = PersonDetector.int_lims_from_box(
                box, frame.shape)
            image = frame[start_y:end_y, start_x:end_x]
            person_images.append(image)
        return person_images

    @staticmethod
    def extract_person_mask(mask, box):
        start_x, start_y, end_x, end_y = PersonDetector.int_lims_from_box(
            box, mask.shape)
        return mask[start_y:end_y, start_x:end_x]
Exemple #10
0
 def __init__(self, mov_path):
     self.mov_path = mov_path
     self.vr = VR(self.mov_path)
     self.frame_means = self.pull_frame_means()
Exemple #11
0
def main():
    print('{}{:=<50}{}'.format(CP_Y, '', CP_C))
    print('{}**{}{:^46}{}**{}'.
            format(CP_Y, CP_R, 'Game Informantion Collector', CP_Y, CP_C))
    print('{}**{}{:^46}{}**{}'.
            format(CP_Y, CP_R, 'By: Abhishek Chaurasia', CP_Y, CP_C))
    print('{}{:=<50}{}'.format(CP_Y, '', CP_C))
    # Grab frames from screen or video
    # Replace it with any other frame grabber
    frame_grabber = VideoReader(args.video_path)

    # Initialization
    ocr = OCR(args.model, args.debug)
    items = {}
    n_items = 0

    keyvalues = open(args.key_info, 'r')
    # Ignore first two lines
    keyvalues.readline()
    keyvalues.readline()

    for line in keyvalues:
        item = line.split()
        # parsed info:    keyword | tx     | ty     | bx     | by
        items[n_items] = (item[0], item[2], item[4], item[6], item[8])
        n_items += 1

    ########################################
    # Ignore this section:
    # Important only when you care about printed values
    print('{:=<50}'.format(''))
    pad = (50//n_items) - 2
    for n_item in items:
        print_val(items[n_item][0], pad, n_item, len(items))
    print('\n{:-<50}'.format(''))
    ########################################

    # Get next frame
    while frame_grabber.next_frame():
        current_frame = frame_grabber.frame
        # Crop section of the frame containing value you are interested in
        for n_item in items:
            tx = int(items[n_item][1])
            ty = int(items[n_item][2])
            bx = int(items[n_item][3])
            by = int(items[n_item][4])
            key_part = current_frame[ty:by, tx:bx, :]

            # send the cropped area and get its value
            value = ocr.forward(key_part)

            # Create box around idividual ROIs
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(current_frame, str(value),
                    (tx-10,ty-10), font, 1, (255,255,255), 1)
            # cv2.rectangle(current_frame, (tx, ty), (bx, by), (0, 255, 0), 1)
            print_val(value, pad, n_item, len(items))
        print("")

        if args.debug:
            pass
        else:
            cv2.startWindowThread()
            cv2.namedWindow("Video")
        cv2.imshow('Video', current_frame)
        if args.debug:
            cv2.waitKey(1)
class NematodeTracker:
    def __init__(self, filename='videos/capture-0001.mp4'):
        self.filename = filename
        self.video_reader = VideoReader(filename)
        self.first_frame = self.video_reader.read()
        self.chosen_nematode_pos = []
        self.nematode_count = 0
        self.chosen_nematode_tot_distance = []
        self.colors = np.random.uniform(0, 255, (100, 3))
        self.threshold = 70

        self.max_display_resolution = (1280, 720)
        self.display_resize_ratio = min(
            self.max_display_resolution[0] / self.video_reader.width,
            self.max_display_resolution[1] / self.video_reader.height,
            1,
        )
        self.target_display_shape = (
            int(self.video_reader.width * self.display_resize_ratio),
            int(self.video_reader.height * self.display_resize_ratio),
        )

        self.min_area = 5  # relative area
        self.max_area = 20
        self.ppa = (self.video_reader.height *
                    self.video_reader.width) / 1253376
        self.elements_resize_ratio = np.sqrt(
            (self.video_reader.height * self.video_reader.width) / 1253376)

        self.data = []

    @staticmethod
    def on_mouse(event, x, y, _, param):
        choice, display_resize_ratio = param
        if event == 4:
            choice.append(
                (int(x / display_resize_ratio), int(y / display_resize_ratio)))
        elif event == 5:
            choice_idx = np.argmin(
                list(
                    map(
                        lambda p: (p[0][0] - p[1][0])**2 +
                        (p[0][1] - p[1][1])**2,
                        [(c, (int(x / display_resize_ratio),
                              int(y / display_resize_ratio)))
                         for c in choice])))
            choice.pop(choice_idx)

    @staticmethod
    def l2distance(param):
        pos1, pos2 = param
        return np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)

    @staticmethod
    def l2distance2(param):
        pos1, pos2 = param
        return (pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2

    @staticmethod
    def get_eccentricity(rect):
        d1 = rect[1][0]
        d2 = rect[1][1]
        dmax = max(d1, d2)
        dmin = min(d1, d2)
        return np.sqrt(dmax**2 - dmin**2) / dmax

    def find_nematode(self, frame):
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, gray_frame = cv2.threshold(gray_frame, self.threshold, 255,
                                        cv2.THRESH_BINARY_INV)
        display_frame = frame.copy()
        centers = []
        eccentricities = []
        _, contours, _ = cv2.findContours(gray_frame, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
        if contours:
            for idx, contour in enumerate(contours):
                if self.min_area * self.ppa < cv2.contourArea(
                        contour) < self.max_area * self.ppa:
                    cv2.drawContours(display_frame, contours, idx, (0, 0, 255),
                                     int(max(1, self.elements_resize_ratio)))
                    m = cv2.moments(contour)
                    cx = int(m['m10'] / m['m00'])
                    cy = int(m['m01'] / m['m00'])
                    cv2.circle(display_frame, (cx, cy), 2, (0, 255, 0), -1)
                    centers.append((cx, cy))
                    ellipse = cv2.fitEllipseDirect(contour)
                    display_frame = cv2.ellipse(display_frame, ellipse,
                                                (0, 255, 0), 2)
                    eccentricities.append(self.get_eccentricity(ellipse))

        return display_frame, centers, eccentricities

    def init_threshold(self):
        cv2.namedWindow('tracker')

        # Create Track bar
        frame = self.first_frame
        cv2.imshow('tracker', cv2.resize(frame, self.target_display_shape))
        cv2.createTrackbar('threshold', 'tracker', self.threshold, 255,
                           nothing)

        while True:
            self.threshold = cv2.getTrackbarPos('threshold', 'tracker')
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, gray_frame = cv2.threshold(gray_frame, self.threshold, 255,
                                            cv2.THRESH_BINARY_INV)
            display_frame = frame.copy()

            _, contours, _ = cv2.findContours(gray_frame, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)
            if contours:
                cv2.drawContours(display_frame, contours, -1, (0, 0, 255),
                                 int(max(1, self.elements_resize_ratio)))

            display_frame = cv2.putText(display_frame,
                                        'Press Enter To Continue...', (50, 50),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 0, 0), 2)
            cv2.imshow(
                'tracker',
                cv2.resize(display_frame,
                           self.target_display_shape,
                           interpolation=cv2.INTER_AREA))
            k = cv2.waitKey(30) & 0xff
            if k in [27, 13, 32]:
                cv2.destroyAllWindows()
                break

    def choose_nematode(self):
        cv2.namedWindow('tracker')
        cv2.setMouseCallback(
            'tracker', self.on_mouse,
            (self.chosen_nematode_pos, self.display_resize_ratio))
        cv2.createTrackbar('minArea', 'tracker', self.min_area, 100, nothing)
        cv2.createTrackbar('maxArea', 'tracker', self.max_area, 100, nothing)
        frame = self.first_frame
        while True:
            self.min_area = max(5, cv2.getTrackbarPos('minArea', 'tracker')**2)
            self.max_area = max(5, cv2.getTrackbarPos('maxArea', 'tracker')**2)
            display_frame, centers, eccentricities = self.find_nematode(frame)
            if centers:
                for chosen_nematode_pos_idx in range(
                        len(self.chosen_nematode_pos)):
                    center_idx = int(
                        np.argmin(
                            list(
                                map(
                                    self.l2distance2,
                                    [(self.chosen_nematode_pos[
                                        chosen_nematode_pos_idx], center)
                                     for center in centers],
                                ))))
                    self.chosen_nematode_pos[
                        chosen_nematode_pos_idx] = centers[center_idx]
                    cv2.circle(
                        display_frame,
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        int(5 * self.elements_resize_ratio),
                        self.colors[chosen_nematode_pos_idx], -1)
                    cv2.putText(
                        display_frame,
                        str(chosen_nematode_pos_idx),
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        cv2.FONT_HERSHEY_SIMPLEX,
                        self.elements_resize_ratio,
                        self.colors[chosen_nematode_pos_idx],
                        int(2 * self.elements_resize_ratio),
                    )

            cv2.putText(display_frame, 'Press Enter To Start Tracking...',
                        (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            cv2.imshow(
                'tracker',
                cv2.resize(display_frame,
                           self.target_display_shape,
                           interpolation=cv2.INTER_AREA))
            k = cv2.waitKey(30) & 0xff
            if k in [27, 13, 32]:
                self.chosen_nematode_tot_distance = np.zeros(
                    len(self.chosen_nematode_pos))
                cv2.destroyWindow('tracker')
                break

    def track_nematode(self):
        output_dir, name = os.path.split(self.filename)
        output_dir = os.path.join(output_dir, 'output')
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        wri = cv2.VideoWriter(
            os.path.join(output_dir, name),
            cv2.VideoWriter_fourcc('F', 'M', 'P', '4'),
            self.video_reader.fps,
            self.video_reader.target_shape,
        )
        path_layer = np.zeros_like(self.first_frame)
        for i in range(1, self.video_reader.frame_count):
            text_layer = np.zeros_like(self.first_frame)
            display_frame, cur_centers, eccentricities = self.find_nematode(
                self.video_reader.read())

            data_point = []
            for chosen_nematode_pos_idx in range(len(
                    self.chosen_nematode_pos)):
                if not cur_centers:
                    distance = np.infty
                    cur_center_idx = -1
                else:
                    cur_center_idx = int(
                        np.argmin(
                            list(
                                map(
                                    self.l2distance2,
                                    [(self.chosen_nematode_pos[
                                        chosen_nematode_pos_idx], cur_center)
                                     for cur_center in cur_centers],
                                ))))
                    distance = self.l2distance((
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        cur_centers[cur_center_idx],
                    ))

                # display and record data according to whether the selected nematode is tracked correctly
                if distance < max(self.video_reader.width,
                                  self.video_reader.height) / 10:
                    self.chosen_nematode_pos[
                        chosen_nematode_pos_idx] = cur_centers[cur_center_idx]
                    cv2.circle(
                        path_layer,
                        self.chosen_nematode_pos[chosen_nematode_pos_idx], 2,
                        self.colors[chosen_nematode_pos_idx], -1)
                    cv2.circle(
                        display_frame,
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        int(5 * self.elements_resize_ratio),
                        self.colors[chosen_nematode_pos_idx], -1)
                    cv2.putText(
                        text_layer,
                        '%d %d %.2f' %
                        (chosen_nematode_pos_idx, self.
                         chosen_nematode_tot_distance[chosen_nematode_pos_idx],
                         eccentricities[cur_center_idx]),
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        cv2.FONT_HERSHEY_SIMPLEX,
                        self.elements_resize_ratio,
                        self.colors[chosen_nematode_pos_idx],
                        int(2 * self.elements_resize_ratio),
                    )
                else:
                    distance = 0
                    cv2.putText(
                        path_layer,
                        '?',
                        self.chosen_nematode_pos[chosen_nematode_pos_idx],
                        cv2.FONT_HERSHEY_SIMPLEX,
                        self.elements_resize_ratio,
                        self.colors[chosen_nematode_pos_idx],
                        int(2 * self.elements_resize_ratio),
                    )

                self.chosen_nematode_tot_distance[
                    chosen_nematode_pos_idx] += distance
                data_point.append((
                    self.chosen_nematode_pos[chosen_nematode_pos_idx][0],
                    self.chosen_nematode_pos[chosen_nematode_pos_idx][1],
                    distance,
                    self.chosen_nematode_tot_distance[chosen_nematode_pos_idx],
                    eccentricities[cur_center_idx],
                ))
            self.data.append(data_point)

            # combine information layer with the original video
            display_frame = cv2.bitwise_xor(display_frame, path_layer,
                                            display_frame)
            display_frame = cv2.bitwise_xor(display_frame, text_layer,
                                            display_frame)
            cv2.putText(
                display_frame,
                'Total nematode cnt: %d' % len(self.chosen_nematode_pos),
                (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)

            cv2.imshow('tracker',
                       cv2.resize(display_frame, self.target_display_shape))
            wri.write(display_frame)
            k = cv2.waitKey(1) & 0xff
            if k in [27, 13, 32]:
                break

        # save recorded data
        data = np.array(self.data)
        data = data.reshape((len(data), -1))
        columns = []
        for i in range(len(self.chosen_nematode_pos)):
            columns.append('n%dx' % i)
            columns.append('n%dy' % i)
            columns.append('n%dspeed' % i)
            columns.append('n%ddistance' % i)
            columns.append('n%deccentricity' % i)
        df = pd.DataFrame(data=data, columns=columns)
        df.to_csv(os.path.join(output_dir, name + '.csv'))
        wri.release()
        cv2.destroyAllWindows()
Exemple #13
0
 def __init__(self):
     Detector.__init__(self)
     self.videoReader = VideoReader()
     self.detectedObjects = []
class NematodaMovementDetector:
    def __init__(
        self,
        filename=r'D:\Projects\model_organism_helper\Nematoda\capture-0001.avi',
        resize_ratio=1.0,
        frame_step=1,
        movement_threshold=4,
        max_nematoda_count=100,
        kernel_size=None,
        display_scale=1.0,
    ):
        self.frame_step = frame_step
        self.resize_ratio = resize_ratio
        self.video_reader = VideoReader(filename, resize_ratio, frame_step)
        self.background_subtractor = None
        self.movement_threshold = movement_threshold
        self.kernel_size = kernel_size
        if self.kernel_size is None:
            self.kernel_size = int(min(self.video_reader.target_shape) / 32)
            self.kernel_size = int(2 * (int((self.kernel_size - 1) / 2))) + 1

        self.max_nematoda_count = max_nematoda_count
        self.initialize_background_subtractor()
        display_scale = min(display_scale,
                            400 / np.min(self.video_reader.target_shape))
        self.display_size_target = (
            int(self.video_reader.target_shape[0] * display_scale),
            int(self.video_reader.target_shape[1] * display_scale),
        )

    def initialize_background_subtractor(self):
        self.background_subtractor = cv2.createBackgroundSubtractorMOG2(
            history=10)
        for i in range(20):
            frame = self.video_reader.read()
            self.background_subtractor.apply(frame)
            # print('.', end='', flush=True)

    def get_contours(self, frame):
        foreground = cv2.absdiff(
            self.background_subtractor.getBackgroundImage(), frame)
        foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2GRAY)
        foreground = cv2.GaussianBlur(foreground,
                                      (self.kernel_size, self.kernel_size), 0)
        _, mask = cv2.threshold(foreground, self.movement_threshold, 255,
                                cv2.THRESH_BINARY)
        _, contours, _ = cv2.findContours(mask, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
        return contours

    def config(self):
        print('Initializing...')
        frame = self.video_reader.read()
        cv2.namedWindow('video', cv2.WINDOW_AUTOSIZE)
        frame_d = cv2.resize(frame, self.display_size_target)
        labeled_frame_d = cv2.resize(frame, self.display_size_target)
        cv2.imshow('video', np.hstack([frame_d, labeled_frame_d]))
        cv2.createTrackbar('threshold', 'video', self.movement_threshold, 63,
                           nothing)
        cv2.createTrackbar('kernelSize', 'video', self.kernel_size,
                           self.kernel_size * 3, nothing)
        cv2.createTrackbar('frameStep', 'video', self.frame_step,
                           int(self.video_reader.frame_count / 20 - 1),
                           nothing)
        cv2.setTrackbarMin('frameStep', 'video', 1)
        reset_frame_step_countdown = -1
        while True:
            contours = self.get_contours(frame)
            labeled_frame = frame.copy()
            cv2.drawContours(labeled_frame, contours, -1, utils.COLOR['red'],
                             2, cv2.LINE_AA)
            frame_d = cv2.resize(frame, self.display_size_target)
            labeled_frame_d = cv2.resize(labeled_frame,
                                         self.display_size_target)
            cv2.imshow('video', np.hstack([frame_d, labeled_frame_d]))

            self.movement_threshold = cv2.getTrackbarPos('threshold', 'video')
            self.kernel_size = cv2.getTrackbarPos('kernelSize', 'video')
            self.kernel_size = int(2 * (int((self.kernel_size - 1) / 2))) + 1
            if self.frame_step != cv2.getTrackbarPos('frameStep', 'video'):
                self.frame_step = cv2.getTrackbarPos('frameStep', 'video')
                reset_frame_step_countdown = 20

            if reset_frame_step_countdown > 0:
                reset_frame_step_countdown -= 1
            if reset_frame_step_countdown == 0:
                self.video_reader.reset(frame_step=self.frame_step)
                self.initialize_background_subtractor()
                frame = self.video_reader.read()
                reset_frame_step_countdown = -1

            k = cv2.waitKey(30) & 0xff

            if k in [13, 32]:
                break
            elif k == 27:
                cv2.destroyAllWindows()
                exit()
        cv2.destroyAllWindows()
        self.video_reader.reset()
        print('Done')

    def process(self, online=False, output_filename=None):
        # initialize VideoWriter
        wri = None
        if output_filename is not None:
            wri = cv2.VideoWriter(
                output_filename,
                cv2.VideoWriter_fourcc('F', 'M', 'P', '4'),
                self.video_reader.fps,
                self.video_reader.target_shape,
            )

        _time = cv2.getTickCount()
        frame_count = np.zeros((self.max_nematoda_count, ))

        frame = self.video_reader.read()
        frame_idx = 0
        while frame is not None:
            self.background_subtractor.apply(frame)
            contours = self.get_contours(frame)

            if contours:
                if len(contours) < self.max_nematoda_count:
                    frame_count[len(contours)] += 1

            if online or wri is not None:
                labeled_frame = frame.copy()
                cv2.putText(labeled_frame, '%d' % (len(contours), ), (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, utils.COLOR['yellow'],
                            6)
                if contours:
                    cv2.drawContours(labeled_frame, contours, -1,
                                     utils.COLOR['red'], 2, cv2.LINE_AA)
                if wri is not None:
                    wri.write(labeled_frame)
                if online:
                    frame = cv2.resize(frame, self.display_size_target)
                    labeled_frame = cv2.resize(labeled_frame,
                                               self.display_size_target)
                    cv2.imshow('video', np.hstack([frame, labeled_frame]))
                    k = cv2.waitKey(1) & 0xff
                    if k == 27:
                        break

            frame = self.video_reader.read()
            frame_idx += 1

            # progress report
            if frame_idx % 50 == 0:
                print('%.2f' %
                      (frame_idx * 100.0 / self.video_reader.frame_count) +
                      '%',
                      end=' ')
                time = cv2.getTickCount()
                print(50 / (time - _time) * cv2.getTickFrequency(), 'fps')
                _time = time

        if wri is not None:
            wri.release()
        if online:
            cv2.destroyAllWindows()
            print(frame_count)
            print('prediction:', np.argmax(frame_count))
        return frame_count
Exemple #15
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from video_reader import VideoReader
from frame_enhancer import LowLightEnhance
from face_detector import FaceDetector
from face_tagger import CentroidTracker
from video_writer import VideoWriter

video_reader = VideoReader()
low_light_enhancer = LowLightEnhance('snapshots/Epoch99.pth', 0)
face_detector = FaceDetector(gpu_id=0)
face_tagger = CentroidTracker(maxDisappeared=25)

video_reader.setVideoPath(r'videos/video2.mp4')
video_reader.setFrameSavePath(r'savedframes')


def main():
    ret = True
    frame_dim = video_reader.getVideoDimension()
    video_writer = VideoWriter('abcd', frame_dim)
    while ret:
        ret, frame, frame_no = video_reader.getFrame()
        rects = []
        # frame = low_light_enhancer.enhance(frame)

        faces = face_detector.detect(frame)
        frame, rects = face_detector.draw_boundary_box(frame)

        objects, maxAppereds = face_tagger.update(rects)
Exemple #16
0
import os
from video_reader import VideoReader
from video_producer import VideoProducer
import cProfile

CAMERA_ID = os.getenv('CAMERA_ID')
FRAME_WIDTH = int(os.getenv('FRAME_WIDTH'))
FRAME_HEIGHT = int(os.getenv('FRAME_HEIGHT'))
FRAME_RATE = int(os.getenv('FRAME_RATE'))
FRAME_BUFFER_SIZE = int(os.getenv('FRAME_BUFFER_SIZE'))

TOPIC = os.getenv('KAFKA_TOPIC')
BOOTSTRAP_SERVERS = os.getenv('KAFKA_BOOTSTRAP_SERVERS')
CLIENT_ID = os.getenv('KAFKA_CLIENT_ID')

if __name__ == '__main__':

    reader = VideoReader(device_id=CAMERA_ID,
                         frame_size=(FRAME_WIDTH, FRAME_HEIGHT),
                         frame_rate=FRAME_RATE,
                         buffer_size=FRAME_BUFFER_SIZE)
    producer = VideoProducer(topic=TOPIC,
                             bootstrap_servers=BOOTSTRAP_SERVERS,
                             client_id=CLIENT_ID,
                             video_reader=reader)

    cProfile.run("producer.produce()")