예제 #1
0
    def _rois_from_img(self,img):
        sorted_src_pts = self._find_target_coordinates(img)
        dst_points = np.array([(0,-1),
                               (0,0),
                               (-1,0)], dtype=np.float32)
        wrap_mat = cv2.getAffineTransform(dst_points, sorted_src_pts)

        rectangles = self._make_grid(self._n_cols, self._n_rows,
                                     self._top_margin, self._bottom_margin,
                                     self._left_margin,self._right_margin,
                                     self._horizontal_fill, self._vertical_fill)

        shift = np.dot(wrap_mat, [1,1,0]) - sorted_src_pts[1] # point 1 is the ref, at 0,0
        rois = []
        for i,r in enumerate(rectangles):
            r = np.append(r, np.zeros((4,1)), axis=1)
            mapped_rectangle = np.dot(wrap_mat, r.T).T
            mapped_rectangle -= shift
            ct = mapped_rectangle.reshape((1,4,2)).astype(np.int32)
            cv2.drawContours(img,[ct], -1, (255,0,0),1,LINE_AA)
            rois.append(ROI(ct, idx=i+1))

            # cv2.imshow("dbg",img)
            # cv2.waitKey(0)
        return rois
예제 #2
0
    def _test_dbwriter(self, RWClass, *args, **kwargs):
        """
        This test hardcode ROIs and generate random results for a set of arbitrary variables.
        The goal is to be able to test and benchmark result write independently of any tracking

        :return:
        """
        # building five rois
        coordinates = np.array([(0, 0), (100, 0), (100, 100), (0, 100)])
        rois = [ROI(coordinates + i * 100, i) for i in range(1, 33)]
        rpg = RandomResultGenerator()

        with RWClass(rois=rois, *args, **kwargs) as rw:
            # n = 4000000 # 222h of data
            # n = 400000 # 22.2h of data
            n = 40000  # 2.22h of data
            import time
            t0 = time.time()
            for t in range(0, n):
                rt = t * 1000 / 5

                if t % (n / 100) == 0:

                    print "filling with dummy variables: %f percent" % (
                        100. * float(t) / float(n))
                    print time.time() - t0
                for r in rois:
                    data = rpg.make_one_point()
                    rw.write(rt, r, [data])

                rw.flush(t)
예제 #3
0
 def _rois_from_img(self,img):
     h, w = img.shape[0],img.shape[1]
     return[
         ROI(np.array([
             (   0,        0       ),
             (   0,        h -1    ),
             (   w - 1,    h - 1   ),
             (   w - 1,    0       )])
         , idx=1)]
예제 #4
0
    def build(self, img):

        rois = []
        idx = 1

        for coord in self._coordinates:

            coord = "[" + coord + "]"
            coord = eval(coord)
            if len(coord) != 4:
                idx += 1
                continue
            cnt = np.array(coord).reshape(4, 1, 2)
            rois.append(ROI(cnt, idx))
            idx += 1

        return rois
    def setUp(self):

        self._result = []

        # head is moving
        # self._videos = ["../static_files/videos/whole_2020-05-09_12-00-07_014aad42625f433eb4bd2b44f811738e__1280x960@[email protected]"]
        # self._videos = ["../static_files/videos/whole_2020-05-09_12-00-07_014aad42625f433eb4bd2b44f811738e__1280x960@[email protected]"]
        # flipping wings
        self._videos = [
            "../static_files/videos/whole_2020-05-09_12-00-07_014aad42625f433eb4bd2b44f811738e__1280x960@[email protected]"
        ]
        # pretty still
        # self._videos = ["../static_files/videos/whole_2020-05-09_12-00-07_014aad42625f433eb4bd2b44f811738e__1280x960@[email protected]"]
        self.roi = None
        self.camera = FSLVirtualCamera(path=self._videos[0],
                                       bw=False,
                                       use_wall_clock=False)

        for frame_idx, (t_ms, img) in self.camera:
            print(frame_idx)

            # cv2.imshow("img", img)
            # cv2.waitKey(0)

            if self.roi is None:
                shape = img.shape
                shape = tuple([e - 1 for e in shape])

                self.roi = ROI(np.array([(0, 0), (shape[1], 0),
                                         shape[:2][::-1], (0, shape[0])]),
                               idx=18)
                self.tracker = RichAdaptiveBGModel(roi=self.roi)
                self.tracker.live_tracking = True
                self.tracker._old_pos = 0.0 + 0.0j
                self.tracker._null_dist = round(
                    np.log10(1. / float(img.shape[1])) * 1000)

                try:
                    datapoints = self.tracker.track(t_ms, img)
                except NoPositionError:
                    pass
            # print(img.shape)
            datapoints = self.tracker.track(t_ms, img)
            if datapoints:
                self._result.append((t_ms, datapoints))
예제 #6
0
    def _rois_from_img(self, img):
        # transform to gray scale image if not already
        if len(self._mask.shape) == 3:
            self._mask = cv2.cvtColor(self._mask, cv2.COLOR_BGR2GRAY)

        thresh_mask = self._mask.copy()
        #cv2.namedWindow("ROIMask Copy", cv2.WINDOW_NORMAL)
        #cv2.resizeWindow("ROIMask Copy", 800, 600)
        #cv2.imshow("ROIMask Copy", thresh_mask)
        #cv2.waitKey(0)

        # set threshold for findContours(): everthing not black (0) is over threshold
        ret, thresh_mask = cv2.threshold(thresh_mask, 5, 255,
                                         cv2.THRESH_BINARY)
        if CV_VERSION == 3:
            # OpenCV version 3 findContours() does not modify input image
            _, contours, _ = cv2.findContours(thresh_mask, RETR_EXTERNAL,
                                              CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(thresh_mask, RETR_EXTERNAL,
                                           CHAIN_APPROX_SIMPLE)

        contour_cnt = len(contours)
        #logging.info("ImgMaskROIBuilder: found %s contours" % contour_cnt)
        tmp_mask = np.zeros_like(self._mask)
        for i, c in enumerate(contours):
            #logging.info("ROI Contour %s: %s", i, c);
            if len(c) >= 3:
                # skip contours with less than 3 elements
                cv2.drawContours(tmp_mask, [c], -1, 255)

                value = int(np.median(self._mask[tmp_mask > 0]))
                #print("ROI %s value: %s" % (i, value))
                #if (value == 255):
                #  value = None
                self._rois.append(ROI(c, i + 1, value))

        logging.info("ImgMaskROIBuilder: %s valid contours" % len(self._rois))
        if logging.getLogger().isEnabledFor(logging.DEBUG):
            cv2.namedWindow("ROIMask", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("ROIMask", 800, 600)
            cv2.imshow("ROIMask", tmp_mask)
            cv2.waitKey(0)

        return self._rois
예제 #7
0
    def _rois_from_img(self, img):

        if len(self._mask.shape) == 3:
            self._mask = cv2.cvtColor(self._mask, cv2.COLOR_BGR2GRAY)

        contours, hiera = cv2.findContours(np.copy(self._mask),
                                           cv.CV_RETR_EXTERNAL,
                                           cv.CV_CHAIN_APPROX_SIMPLE)

        rois = []
        for i, c in enumerate(contours):
            tmp_mask = np.zeros_like(self._mask)
            cv2.drawContours(tmp_mask, [c], 0, 1)

            value = int(np.median(self._mask[tmp_mask > 0]))

            rois.append(ROI(c, i + 1, value))

        return rois
예제 #8
0
    def get_roi(self):

        if self._region_id == 0:
            self._roi = None
            return

        with sqlite3.connect(self._dbfile, check_same_thread=False) as src:
            src_cur = src.cursor()
            command = "SELECT x, y, w, h from ROI_MAP WHERE roi_idx = %d" % self._region_id
            src_cur.execute(command)
            xpos, ypos, width, height = next(iter(src_cur))
            polygon = [
                [xpos, ypos],  # tl
                [xpos + width, ypos],  # tr
                [xpos + width, ypos + height],  # br
                [xpos, ypos + height]  # bl
            ]
            roi = ROI(polygon, self._region_id)
            self._roi = roi
예제 #9
0
    def _rois_from_img(self, img):
        '''
        Fit a ROI to the provided img
        '''

        reference_points = self._find_target_coordinates(img)

        #point 1 is the reference point at coords A,B; point 0 will be A,y and point 2 x,B
        #we then transform the ROIS on the assumption that those points are aligned perpendicularly in this way
        dst_points = np.array([(0, -1), (0, 0), (-1, 0)], dtype=np.float32)

        wrap_mat = cv2.getAffineTransform(dst_points, reference_points)

        rectangles = self._make_grid(self._n_cols, self._n_rows,
                                     self._top_margin, self._bottom_margin,
                                     self._left_margin, self._right_margin,
                                     self._horizontal_fill,
                                     self._vertical_fill)

        shift = np.dot(wrap_mat, [
            1, 1, 0
        ]) - reference_points[1]  # point 1 is the ref which we have set at 0,0

        rois = []
        for i, r in enumerate(rectangles):
            r = np.append(r, np.zeros((4, 1)), axis=1)
            mapped_rectangle = np.dot(wrap_mat, r.T).T
            mapped_rectangle -= shift
            ct = mapped_rectangle.reshape((1, 4, 2)).astype(np.int32)
            cv2.drawContours(img, [ct], -1, (255, 0, 0), 1, LINE_AA)
            rois.append(ROI(ct, idx=i + 1))

        #rois is an array of ROI objects
        #reference points is an array containing the abslolute coordinates of the three refs

        return reference_points, rois
예제 #10
0
    def _rois_from_img(self, img):

        self._img = img
        if os.path.exists(self._rois_pickle_file):
            with open(self._rois_pickle_file, "rb") as fh:
                import pickle
                rois = pickle.load(fh)
            return rois

        else:
            if os.path.exists(self._target_coord_file):
                with open(self._target_coord_file, "r") as fh:
                    data = fh.read()

                # each dot is in a newline
                data = data.split("\n")[:3]
                # each dot is made by two numbers separated by comma
                src_points = [tuple([int(f) for f in e.split(",")]) for e in data]
                sorted_src_pts = self._sort_src_pts(src_points)

            else:
                try:
                    sorted_src_pts = self._find_target_coordinates(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), self._find_blobs)
                except EthoscopeException as e:
                    # raise e
                    logging.warning("Fall back to find_blobs_new")
                    sorted_src_pts = self._find_target_coordinates(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), self._find_blobs_new)
                # sorted_src_pts = self._find_target_coordinates(img)
                except Exception as e:
                    raise e

            dst_points = np.array([(0,-1),
                                   (0,0),
                                   (-1,0)], dtype=np.float32)
            wrap_mat = cv2.getAffineTransform(dst_points, sorted_src_pts)

            rectangles = self._make_grid(self._n_cols, self._n_rows,
                                         self._top_margin, self._bottom_margin,
                                         self._left_margin,self._right_margin,
                                         self._horizontal_fill, self._vertical_fill,
                                         self._inside_pad, self._outside_pad)

            shift = np.dot(wrap_mat, [1, 1, 0]) - sorted_src_pts[1] # point 1 is the ref, at 0,0
            rois = []
            side = "left"
            point = self._sorted_src_pts[2]

            for i, r in enumerate(rectangles):
                if i > 9:
                    side = "right"
                    point = self._sorted_src_pts[1]

                r = np.append(r, np.zeros((4, 1)), axis=1)
                mapped_rectangle = np.dot(wrap_mat, r.T).T
                mapped_rectangle -= shift
                ct = mapped_rectangle.astype(np.int32)
                # logging.warning(i)
                ct, _, _, _ = refine_contour(ct, img, rotate=False)
                ct = pull_contour_h(ct, point, side)
                cv2.drawContours(img, [ct.reshape((1, 4, 2))], -1, (255, 0, 0), 1, LINE_AA)
                rois.append(ROI(ct, idx=i+1))

        return rois
예제 #11
0
    def _rois_from_img(self, img, camera=None):

        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        grey = cv2.cvtColor(grey, cv2.COLOR_GRAY2BGR)

        self._orig = grey
        cv2.imwrite(
            os.path.join(os.environ["HOME"], "accum_rois_from_img.png"), img)

        # rotate the image so ROIs are horizontal
        rotated, M = self._rotate_img(img)
        logging.info("DETECTED ARENA")
        # segment the ROIs out of the rotated image
        if camera is not None and not isinstance(camera, np.ndarray):
            camera.set_roi_builder()
            time.sleep(5)
            accum = self.fetch_frames(camera, mode="roi_builder")
            cv2.imwrite(os.path.join(os.environ["HOME"], "accum.png"), accum)
            img = accum

        rotated = cv2.warpAffine(grey,
                                 M,
                                 grey.shape[:2][::-1],
                                 flags=cv2.INTER_CUBIC,
                                 borderMode=cv2.BORDER_REPLICATE)

        bin_rotated = self._segment_rois(rotated, debug=debug)[:, :, 0]

        center_plot = np.stack((bin_rotated, ) * 3, axis=2)

        try:
            logging.info("Splitting ROIs")
            contours = self._split_rois(bin_rotated, grey)
        except EthoscopeException as e:
            bin_rotated = self._segment_rois(rotated,
                                             mint=self._mint,
                                             maxt=self._maxt,
                                             half_t=110,
                                             debug=debug)[:, :, 0]
            center_plot = np.stack((bin_rotated, ) * 3, axis=2)
            contours = self._split_rois(bin_rotated, grey)

        logging.info("ROI Detection successful")

        centers = []
        widths = []
        heights = []
        centers_left = []
        centers_right = []
        angles = []
        rects = []
        for i, ct in enumerate(contours):

            # rect = cv2.boundingRect(ct)
            # x,y,w,h = rect
            ## create a contour from the rect
            ## rect is a list of 4 numbers: x,y on top left corner and width and height of the rectangle
            ## roi is a list of 4 tuples: the x,y coordinates of the 4 squares of the rectangle
            # roi = np.array([(x,y), (x+w, y), (x+w,y+h), (x, y+h)])
            rect = cv2.minAreaRect(ct)
            xy, wh, angle = rect
            rects.append(rect)
            w = np.max(wh)
            h = np.min(wh)
            roi = cv2.boxPoints(rect).astype(np.int32)

            center = (np.mean([e[0]
                               for e in roi]), np.mean([e[1] for e in roi]))
            center = tuple(int(e) for e in center)
            centers.append(center)
            angles.append(angle)
            roi.reshape((4, 1, 2))
            cv2.circle(center_plot, center, 10, (0, 255, 0), -1)

            widths.append(w)
            heights.append(h)
            left, _ = find_quadrant(bin_rotated.shape, center)
            if left:
                centers_left.append(center)
            else:
                centers_right.append(center)

        if debug:
            cv2.imshow("center_plot", center_plot)
            cv2.waitKey(0)

        median_x_left = np.median([e[0] for e in centers_left])
        median_x_right = np.median([e[0] for e in centers_right])

        rois = []

        arena_width = self._sorted_src_pts[1, 0] - self._sorted_src_pts[2, 0]
        arena_height = self._sorted_src_pts[0, 1] - self._sorted_src_pts[1, 1]

        long_side = int(self._long_side_fraction * arena_width)
        short_side = int(self._short_side_fraction * arena_width)
        # height =  int(0.7*(arena_height*0.8/10))
        height = 0.8 * np.median(heights)

        # first ROIs whose side is left and then those whose top left corner y coordinate is lowest
        centers = sorted(centers,
                         key=lambda x:
                         (not find_quadrant(bin_rotated.shape, x)[0], x[1]))

        for i, center in enumerate(centers):
            left, _ = find_quadrant(bin_rotated.shape, center)
            side = 'left' if left else 'right'
            angle = angles[i]

            segmented_contour = contours[i]

            # corrected_roi = cv2.boxPoints(rects[i])

            if left:
                corrected_roi = center2rect((median_x_left, center[1]),
                                            height,
                                            left=long_side,
                                            right=short_side,
                                            angle=angle)
                inner_roi = center2rect((median_x_left, center[1]),
                                        height,
                                        left=long_side / 2,
                                        right=short_side / 3,
                                        angle=angle)
            else:
                corrected_roi = center2rect((median_x_right, center[1]),
                                            height,
                                            left=short_side,
                                            right=long_side,
                                            angle=angle)
                inner_roi = center2rect((median_x_right, center[1]),
                                        height,
                                        left=short_side / 3,
                                        right=long_side / 2,
                                        angle=angle)

            final_contour, grey, max_angle, max_pixel = refine_contour(
                cnt, grey)

            ####
            # give it the shape expected by programs downstream
            ct = final_contour.reshape((1, 4, 2)).astype(np.int32)
            # cv2.drawContours(img,[ct], -1, (255,0,0),1,LINE_AA)
            # initialize a ROI object to be returned to the control thread
            # with all the other detected ROIs in the rois list
            rois.append(ROI(ct, idx=i + 1, side=side))

        logging.info("DETECTED ROIS")

        if debug:
            cv2.imshow("grey", grey)
            cv2.waitKey(0)

        # if self._debug or debug or True:
        #     for roi in rois:
        #         tl = (roi.rectangle[0], roi.rectangle[1])
        #         br = (roi.rectangle[0] + roi.rectangle[2], roi.rectangle[1] + roi.rectangle[3])
        #         cv2.rectangle(bin_rotated,tl,br, 128, 2)

        #     cv2.imshow("img_rotated", rotated)
        #     cv2.imshow("bin_rotated", bin_rotated)
        #     cv2.imshow("bin_rotated_contours", bin_rotated)
        #     cv2.waitKey(0)

        return rotated, M, rois
    from ethoscope.core.roi import ROI
    from ethoscope.hardware.interfaces.interfaces import HardwareConnection

    def never_moving():
        return False

    hc = HardwareConnection(RobustSleepDepriver._HardwareInterfaceClass,
                            do_warm_up=False)

    sd = RobustSleepDepriver(
        hc,
        velocity_correction_coef=0.01,
        min_inactive_time=10,  # s
        pulse_duration=1000,  #ms
        date_range="")
    sd._has_moved = never_moving
    sd._t0 = 0

    roi = ROI(polygon=np.array([[0, 10], [10, 10], [10, 0], [0, 0]]), idx=1)
    tracker = AdaptiveBGModel(roi=roi)
    tracker._last_time_point = 30000  #ms

    sd.bind_tracker(tracker)
    print("Applying")
    interact, result = sd.apply()
    print(interact)
    print(result)
    while len(hc._instructions) != 0:
        time.sleep(1)
    hc.stop()