Esempio n. 1
0
def check_ruler_points():
    points = pd.read_csv('../output/ruler_points.csv')
    for _, row in points.iterrows():
        video_id = row.video_id
        img = scipy.misc.imread(os.path.join(IMAGES_DIR, video_id, "0001.jpg"))

        dst_w = 720
        dst_h = 360
        ruler_points = np.array([[row.ruler_x0, row.ruler_y0],
                                 [row.ruler_x1, row.ruler_y1]])
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)
        crop = skimage.transform.warp(img,
                                      tform,
                                      mode='edge',
                                      order=3,
                                      output_shape=(dst_h, dst_w))

        print('ruler:\n', ruler_points)
        print('img:\n', img_points)

        print('ruler from img:\n', tform(img_points))
        print('img from ruler:\n', tform.inverse(ruler_points))
        print('scale', tform.scale)

        plt.subplot(2, 1, 1)
        plt.imshow(img)
        plt.plot([row.ruler_x0, row.ruler_x1], [row.ruler_y0, row.ruler_y1])

        plt.subplot(2, 1, 2)
        plt.imshow(crop)
        plt.show()
Esempio n. 2
0
    def rotate_selection(self, event):
        selection = self.get_selection()
        sel_pos = np.array([self.canvas.coords(s) for s in selection])
        sel_pos = sel_pos[..., :2] + (sel_pos[..., 2:] - sel_pos[..., :2]) / 2

        sel_center = np.mean(sel_pos, axis=0)
        if self.rotation_center is not None:
            sel_center = self.rotation_center

        pos_old = np.array([self._drag_data["x"], self._drag_data["y"]])
        if np.all(pos_old == np.array([0, 0])):
            pos_old = np.array([event.x, event.y])
        direction_old = pos_old - sel_center

        pos_new = np.array([event.x, event.y])
        direction_new = pos_new - sel_center

        X = np.array([direction_new[0], direction_old[0]])
        Y = np.array([direction_new[1], direction_old[1]])

        angle = np.arctan2(Y, X)
        angle = angle[1] - angle[0]

        rot_trans = SimilarityTransform(rotation=angle)
        new_coords = rot_trans.inverse(sel_pos - sel_center) + sel_center

        for s, old, new in zip(selection, sel_pos, new_coords):
            delta_x = new[0] - old[0]
            delta_y = new[1] - old[1]
            self.canvas.move(s, delta_x, delta_y)

        self._drag_data["x"] = event.x
        self._drag_data["y"] = event.y
Esempio n. 3
0
 def reshape_stim(self, stim):
     if isinstance(stim, (ImageStimulus, VideoStimulus)):
         # In the more general case, the implant might not have a 'shape'
         # attribute or have a hex grid. Idea:
         # - Fit electrode locations to a rectangular grid
         # - Downscale the image to that grid size
         # - Index into grid to determine electrode activation
         data = stim.rgb2gray()
         if hasattr(self.earray, 'rot'):
             # We need to rotate the array & image first, otherwise we may
             # end up with an infinitesimally small (dx,dy); for example,
             # when a rectangular grid is rotated by 1deg:
             tf = SimilarityTransform(rotation=np.deg2rad(self.earray.rot))
             x, y = np.array([
                 tf.inverse([e.x, e.y]) for e in self.electrode_objects
             ]).squeeze().T
             data = data.rotate(self.earray.rot)
         else:
             x = [e.x for e in self.electrode_objects]
             y = [e.y for e in self.electrode_objects]
         # Determine grid step by finding the greatest common denominator:
         dx = abs(reduce(lambda a, b: c_gcd(a, b), np.diff(x)))
         dy = abs(reduce(lambda a, b: c_gcd(a, b), np.diff(y)))
         # Build a new rectangular grid:
         try:
             grid = Grid2D((np.min(x), np.max(x)), (np.min(y), np.max(y)),
                           step=(dx, dy))
         except MemoryError:
             raise ValueError("Automatic stimulus reshaping failed. You "
                              "will need to resize the stimulus yourself "
                              "so that there is one activation value per "
                              "electrode.")
         # For each electrode, find the closest pixel on the grid:
         kdtree = cKDTree(np.vstack((grid.x.ravel(), grid.y.ravel())).T)
         _, e_idx = kdtree.query(np.vstack((x, y)).T)
         data = data.resize(grid.x.shape).data[e_idx, ...].squeeze()
         # Sample the stimulus at the correct pixel locations:
         return Stimulus(data,
                         electrodes=self.electrode_names,
                         time=stim.time,
                         metadata=stim.metadata)
     else:
         err_str = ("Number of electrodes in the stimulus (%d) "
                    "does not match the number of electrodes in "
                    "the implant (%d)." %
                    (len(stim.electrodes), self.n_electrodes))
         raise ValueError(err_str)
     return stim