Ejemplo n.º 1
0
    def _normalize_rect(rect_or_start, end=None):
        """ Normalize the given rectangle by making sure top/left etc. is actually top left

        :param rect_or_start: (int, int, <int, int>) Left, top, right, and bottom
        :param end: (None or (int, int)) Right and bottom

        """

        if end is not None:
            rect_or_start = (rect_or_start[0], rect_or_start[1], end[0], end[1])
            rect = util.normalize_rect(rect_or_start)
            return Vec(rect[:2]), Vec(rect[2:4])
        else:
            return util.normalize_rect(rect_or_start)
Ejemplo n.º 2
0
    def _normalize_rect(rect_or_start, end=None):
        """ Normalize the given rectangle by making sure top/left etc. is actually top left

        :param rect_or_start: (int, int, <int, int>) Left, top, right, and bottom
        :param end: (None or (int, int)) Right and bottom

        """

        if end is not None:
            rect_or_start = (rect_or_start[0], rect_or_start[1], end[0],
                             end[1])
            rect = util.normalize_rect(rect_or_start)
            return Vec(rect[:2]), Vec(rect[2:4])
        else:
            return util.normalize_rect(rect_or_start)
Ejemplo n.º 3
0
    def _normalize(rect):
        """ Normalize the given recatangle by making sure top/left etc. is actually top left

        This method might be overridden.

        """

        return util.normalize_rect(rect)
Ejemplo n.º 4
0
 def get_physical_sel(self):
     """
     return (tuple of 4 floats): position in m
     """
     if self.w_start_pos and self.w_end_pos:
         p_pos = (self.cnvs.world_to_physical_pos(self.w_start_pos) +
                  self.cnvs.world_to_physical_pos(self.w_end_pos))
         return util.normalize_rect(p_pos)
     else:
         return None
Ejemplo n.º 5
0
 def _calc_world_pos(self):
     """ Update the world position to reflect the view position
     """
     if self.v_start_pos and self.v_end_pos:
         offset = [v // 2 for v in self.cnvs._bmp_buffer_size]
         w_pos = (self.cnvs.view_to_world(self.v_start_pos, offset) +
                  self.cnvs.view_to_world(self.v_end_pos, offset))
         w_pos = list(util.normalize_rect(w_pos))
         self.w_start_pos = w_pos[:2]
         self.w_end_pos = w_pos[2:4]
Ejemplo n.º 6
0
 def set_physical_sel(self, rect):
     """
     rect (tuple of 4 floats): t, l, b, r positions in m
     """
     if rect is None:
         self.clear_selection()
     else:
         w_pos = (self.cnvs.physical_to_world_pos(rect[:2]) +
                  self.cnvs.physical_to_world_pos(rect[2:4]))
         w_pos = util.normalize_rect(w_pos)
         self.w_start_pos = w_pos[:2]
         self.w_end_pos = w_pos[2:4]
         self._calc_view_pos()
Ejemplo n.º 7
0
 def _calc_view_pos(self):
     """ Update the view position to reflect the world position
     """
     if not self.w_start_pos or not self.w_end_pos:
         logging.warning("Asking to convert non-existing world positions")
         return
     offset = [v // 2 for v in self.cnvs._bmp_buffer_size]
     v_pos = (self.cnvs.world_to_view(self.w_start_pos, offset) +
              self.cnvs.world_to_view(self.w_end_pos, offset))
     v_pos = list(util.normalize_rect(v_pos))
     self.v_start_pos = v_pos[:2]
     self.v_end_pos = v_pos[2:4]
     self._calc_edges()
Ejemplo n.º 8
0
def estimateTiledAcquisitionTime(stream, stage, area):
    # TODO: fix function to limit the acquisition area so that the FoV is taken into account.
    # t_estim = estimateTiledAcquisitionTime(stream, stage, area, overlap=0)

    # For now, it's just simpler to hard-code the time spent per tile, and derive the total time based on it.
    fov = (TILE_FOV_X, TILE_FOV_X * TILE_RES[1] / TILE_RES[0])
    normalized_area = util.normalize_rect(area)
    area_size = (normalized_area[2] - normalized_area[0],
                 normalized_area[3] - normalized_area[1])
    nx = math.ceil(abs(area_size[0] / fov[0]))
    ny = math.ceil(abs(area_size[1] / fov[1]))

    # TODO: compensate for longer dwell times => should be a A+Bx formula?
    return nx * ny * TIME_PER_TILE_1US  # s
Ejemplo n.º 9
0
    def _calc_edges(self):
        """ Calculate the inner and outer edges of the selection according to
        the hover margin
        """
        rect = util.normalize_rect(self.v_start_pos + self.v_end_pos)
        i_l, i_t, o_r, o_b = [v + self.hover_margin for v in rect]
        o_l, o_t, i_r, i_b = [v - self.hover_margin for v in rect]

        self.edges = {
            "i_l": i_l,
            "o_r": o_r,
            "i_t": i_t,
            "o_b": o_b,
            "o_l": o_l,
            "i_r": i_r,
            "o_t": o_t,
            "i_b": i_b
        }
Ejemplo n.º 10
0
    def _calc_edges(self):
        """ Calculate the inner and outer edges of the selection according to
        the hover margin
        """
        rect = util.normalize_rect(self.v_start_pos + self.v_end_pos)
        i_l, i_t, o_r, o_b = [v + self.hover_margin for v in rect]
        o_l, o_t, i_r, i_b = [v - self.hover_margin for v in rect]

        self.edges = {
            "i_l": i_l,
            "o_r": o_r,
            "i_t": i_t,
            "o_b": o_b,
            "o_l": o_l,
            "i_r": i_r,
            "o_t": o_t,
            "i_b": i_b
        }
Ejemplo n.º 11
0
def estimateTiledAcquisitionTime(stream, stage, area):
    """
    Estimate the time needed to acquire a full overview image. Calculate the
    number of tiles needed for the requested area based on set dwell time and
    resolution (number of pixels).

    Note: "estimateTiledAcquisitionTime()" of the "_tiledacq.py" module cannot be used for a couple of reasons:
    Firstly, the e-beam settings are not the current ones, but the one from the fastem_conf.
    Secondly, the xt_client acquisition time is quite a bit longer than the settings suggests.
    Therefore, we have an ad-hoc method here.

    :param stream: (SEMstream) The stream used for the acquisition.
    :param stage: (actuator.MultiplexActuator) The stage in the sample carrier coordinate system.
        The x and y axes are aligned with the x and y axes of the ebeam scanner. UNUSED: Can be used to take
        speed of axes into account for time estimation.
    :param area: (float, float, float, float) xmin, ymin, xmax, ymax coordinates of the overview region
        in the sample carrier coordinate system.

    :return: The estimated total acquisition time for the overview image in seconds.
    """
    # get the resolution per tile used during overview imaging
    res = fastem_conf.SCANNER_CONFIG[fastem_conf.OVERVIEW_MODE]['resolution']
    fov = fastem_conf.SCANNER_CONFIG[
        fastem_conf.OVERVIEW_MODE]['horizontalFoV']

    # calculate area size
    fov = (fov, fov * res[1] / res[0])
    acquisition_area = util.normalize_rect(
        area)  # make sure order is l, b, r, t
    area_size = (acquisition_area[2] - acquisition_area[0],
                 acquisition_area[3] - acquisition_area[1])
    # number of tiles
    nx = math.ceil(abs(area_size[0] / fov[0]))
    ny = math.ceil(abs(area_size[1] / fov[1]))

    # add some overhead per tile for e.g. stage movement
    overhead = 1  # [s]
    # time per tile
    acq_time_tile = res[0] * res[
        1] * stream.emitter.dwellTime.value + overhead  # [s]
    # time for total overview image
    acq_time_overview = nx * ny * acq_time_tile  # [s]

    return acq_time_overview
Ejemplo n.º 12
0
    def stop_selection(self):
        """ End the creation of the current selection """

        logging.debug("Stopping selection")

        if max(self.get_height(), self.get_width()) < gui.SELECTION_MINIMUM:
            logging.debug("Selection too small")
            self.clear_selection()
        else:
            # Make sure that the start and end positions are the top left and
            # bottom right respectively.
            v_pos = util.normalize_rect(self.v_start_pos + self.v_end_pos)
            self.v_start_pos = v_pos[:2]
            self.v_end_pos = v_pos[2:4]

            self._calc_edges()
            self.dragging = False
            self.edit = False
            self.edit_edge = None
Ejemplo n.º 13
0
    def stop_selection(self):
        """ End the creation of the current selection """

        logging.debug("Stopping selection")

        if max(self.get_height(), self.get_width()) < gui.SELECTION_MINIMUM:
            logging.debug("Selection too small")
            self.clear_selection()
        else:
            # Make sure that the start and end positions are the top left and
            # bottom right respectively.
            v_pos = util.normalize_rect(self.v_start_pos + self.v_end_pos)
            self.v_start_pos = v_pos[:2]
            self.v_end_pos = v_pos[2:4]

            self._calc_edges()
            self.dragging = False
            self.edit = False
            self.edit_edge = None
Ejemplo n.º 14
0
    def Draw(self, ctx, shift=(0, 0), scale=1.0):

        if self.w_start_pos and self.w_end_pos:
            offset = [v // 2 for v in self.cnvs._bmp_buffer_size]
            b_pos = (self.cnvs.world_to_buffer(self.w_start_pos, offset) +
                     self.cnvs.world_to_buffer(self.w_end_pos, offset))
            b_pos = util.normalize_rect(b_pos)
            self.update_from_buffer(b_pos[:2], b_pos[2:4], shift + (scale,))

            #logging.warn("%s %s", shift, world_to_buffer_pos(shift))
            rect = (b_pos[0] + 0.5, b_pos[1] + 0.5,
                    b_pos[2] - b_pos[0], b_pos[3] - b_pos[1])

            # draws a light black background for the rectangle
            ctx.set_line_width(2.5)
            ctx.set_source_rgba(0, 0, 0, 0.5)
            ctx.rectangle(*rect)
            ctx.stroke()

            # draws the dotted line
            ctx.set_line_width(2)
            ctx.set_dash([3,])
            ctx.set_line_join(cairo.LINE_JOIN_MITER)
            ctx.set_source_rgba(*self.colour)
            ctx.rectangle(*rect)
            ctx.stroke()

            # Label

            if (self.dragging or self.edit) and self.cnvs.microscope_view:
                w, h = self.cnvs.selection_to_real_size(
                                            self.w_start_pos,
                                            self.w_end_pos
                )
                w = units.readable_str(w, 'm', sig=2)
                h = units.readable_str(h, 'm', sig=2)
                size_lbl = u"{} x {}".format(w, h)

                pos = (b_pos[2] + 10, b_pos[3] + 5)

                self.position_label.pos = pos
                self.position_label.text = size_lbl
                self._write_labels(ctx)
Ejemplo n.º 15
0
    def convert_roi_phys_to_ccd(self, roi):
        """
        Convert the ROI in physical coordinates into a CCD ROI (in pixels)
        roi (4 floats): ltrb positions in m
        return (4 ints or None): ltrb positions in pixels, or 
        """
        ccd_rect = self.get_ccd_fov()
        logging.debug("CCD FoV = %s", ccd_rect)
        phys_width = (ccd_rect[2] - ccd_rect[0], ccd_rect[3] - ccd_rect[1])

        # convert to a proportional ROI
        proi = (
            (roi[0] - ccd_rect[0]) / phys_width[0],
            (roi[1] - ccd_rect[1]) / phys_width[1],
            (roi[2] - ccd_rect[0]) / phys_width[0],
            (roi[3] - ccd_rect[1]) / phys_width[1],
        )
        # ensure it's in ltrb order
        proi = util.normalize_rect(proi)

        # convert to pixel values, rounding to slightly bigger area
        shape = self.ccd.shape[0:2]
        pxroi = (
            int(proi[0] * shape[0]),
            int(proi[1] * shape[1]),
            int(math.ceil(proi[2] * shape[0])),
            int(math.ceil(proi[3] * shape[1])),
        )

        # Limit the ROI to the one visible in the FoV
        trunc_roi = util.rect_intersect(pxroi, (0, 0) + shape)
        if trunc_roi is None:
            return None
        if trunc_roi != pxroi:
            logging.warning(
                "CCD FoV doesn't cover the whole ROI, it would need "
                "a ROI of %s in CCD referential.", pxroi)

        return trunc_roi
Ejemplo n.º 16
0
    def convert_roi_phys_to_ccd(self, roi):
        """
        Convert the ROI in physical coordinates into a CCD ROI (in pixels)
        roi (4 floats): ltrb positions in m
        return (4 ints or None): ltrb positions in pixels, or 
        """
        ccd_rect = self.get_ccd_fov()
        logging.debug("CCD FoV = %s", ccd_rect)
        phys_width = (ccd_rect[2] - ccd_rect[0],
                      ccd_rect[3] - ccd_rect[1])

        # convert to a proportional ROI
        proi = ((roi[0] - ccd_rect[0]) / phys_width[0],
                (roi[1] - ccd_rect[1]) / phys_width[1],
                (roi[2] - ccd_rect[0]) / phys_width[0],
                (roi[3] - ccd_rect[1]) / phys_width[1],
                )
        # ensure it's in ltbr order
        proi = util.normalize_rect(proi)

        # convert to pixel values, rounding to slightly bigger area
        shape = self.ccd.shape[0:2]
        pxroi = (int(proi[0] * shape[0]),
                 int(proi[1] * shape[1]),
                 int(math.ceil(proi[2] * shape[0])),
                 int(math.ceil(proi[3] * shape[1])),
                 )

        # Limit the ROI to the one visible in the FoV
        trunc_roi = util.rect_intersect(pxroi, (0, 0) + shape)
        if trunc_roi is None:
            return None
        if trunc_roi != pxroi:
            logging.warning("CCD FoV doesn't cover the whole ROI, it would need "
                            "a ROI of %s in CCD referential.", pxroi)

        return trunc_roi
Ejemplo n.º 17
0
    def _draw_grid(self, dc_buffer):
        ctx = wx.lib.wxcairo.ContextFromDC(dc_buffer)
        # Calculate the offset of the center of the buffer relative to the
        # top left op the buffer
        offset = tuple(v // 2 for v in self.cnvs._bmp_buffer_size)

        # The start and end position, in buffer coordinates. The return
        # values may extend beyond the actual buffer when zoomed in.
        b_pos = (self.cnvs.world_to_buffer(self.w_start_pos, offset) +
                 self.cnvs.world_to_buffer(self.w_end_pos, offset))
        b_pos = util.normalize_rect(b_pos)
        # logging.debug("start and end buffer pos: %s", b_pos)

        # Calculate the width and height in buffer pixels. Again, this may
        # be wider and higher than the actual buffer.
        width = b_pos[2] - b_pos[0]
        height = b_pos[3] - b_pos[1]

        # logging.debug("width and height: %s %s", width, height)

        # Clip the start and end positions using the actual buffer size
        start_x, start_y = self.cnvs.clip_to_buffer(b_pos[:2])
        end_x, end_y = self.cnvs.clip_to_buffer(b_pos[2:4])

        # logging.debug(
            # "clipped start and end: %s", (start_x, start_y, end_x, end_y))

        rep_x, rep_y = self.repetition

        # The step size in pixels
        step_x = width / rep_x
        step_y = height / rep_y

        r, g, b, _ = self.colour

        # If there are more repetitions in either direction than there
        # are pixels, just fill a semi transparent rectangle
        if width < rep_x or height < rep_y:
            ctx.set_source_rgba(r, g, b, 0.5)
            ctx.rectangle(
                start_x, start_y,
                int(end_x - start_x), int(end_y - start_y))
            ctx.fill()
        else:
            ctx.set_source_rgba(r, g, b, 0.9)
            ctx.set_line_width(1)
            # ctx.set_antialias(cairo.ANTIALIAS_DEFAULT)

            # The number of repetitions that fits into the buffer clipped
            # selection
            buf_rep_x = int(round((end_x - start_x) / step_x))
            buf_rep_y = int(round((end_y - start_y) / step_y))
            buf_shift_x = (b_pos[0] - start_x) % step_x
            buf_shift_y = (b_pos[1] - start_y) % step_y

            for i in range(1, buf_rep_x):
                ctx.move_to(start_x - buf_shift_x + i * step_x, start_y)
                ctx.line_to(start_x - buf_shift_x + i * step_x, end_y)

            for i in range(1, buf_rep_y):
                ctx.move_to(start_x, start_y - buf_shift_y + i * step_y)
                ctx.line_to(end_x, start_y - buf_shift_y + i * step_y)

            ctx.stroke()
Ejemplo n.º 18
0
    def _draw_points(self, dc_buffer):
        ctx = wx.lib.wxcairo.ContextFromDC(dc_buffer)
        # Calculate the offset of the center of the buffer relative to the
        # top left op the buffer
        offset = tuple(v // 2 for v in self.cnvs._bmp_buffer_size)

        # The start and end position, in buffer coordinates. The return
        # values may extend beyond the actual buffer when zoomed in.
        b_pos = (self.cnvs.world_to_buffer(self.w_start_pos, offset) +
                 self.cnvs.world_to_buffer(self.w_end_pos, offset))
        b_pos = util.normalize_rect(b_pos)
        # logging.debug("start and end buffer pos: %s", b_pos)

        # Calculate the width and height in buffer pixels. Again, this may
        # be wider and higher than the actual buffer.
        width = b_pos[2] - b_pos[0]
        height = b_pos[3] - b_pos[1]

        # logging.debug("width and height: %s %s", width, height)

        # Clip the start and end positions using the actual buffer size
        start_x, start_y = self.cnvs.clip_to_buffer(b_pos[:2])
        end_x, end_y = self.cnvs.clip_to_buffer(b_pos[2:4])

        # logging.debug(
        #     "clipped start and end: %s", (start_x, start_y, end_x, end_y))

        rep_x, rep_y = self.repetition

        # The step size in pixels
        step_x = width / rep_x
        step_y = height / rep_y

        if width // 3 < rep_x or height // 3 < rep_y:
            # If we cannot fit enough 3x3 bitmaps into either direction,
            # then we just fill a rectangle
            logging.debug("simple fill")
            r, g, b, _ = self.colour
            ctx.set_source_rgba(r, g, b, 0.5)
            ctx.rectangle(
                start_x, start_y,
                int(end_x - start_x), int(end_y - start_y))
            ctx.fill()
            ctx.stroke()
        else:
            # check whether the cache is still valid
            cl_pos = (start_x, start_y, end_x, end_y)
            if not self._bmp or self._bmp_bpos != cl_pos:
                # Cache the image as it's quite a lot of computations
                half_step_x = step_x / 2
                half_step_y = step_y / 2

                # The number of repetitions that fits into the buffer
                # clipped selection
                buf_rep_x = int((end_x - start_x) / step_x)
                buf_rep_y = int((end_y - start_y) / step_y)
                # TODO: need to take into account shift, like drawGrid
                logging.debug(
                        "Rendering %sx%s points",
                        buf_rep_x,
                        buf_rep_y
                )

                point = img.getdotBitmap()
                point_dc = wx.MemoryDC()
                point_dc.SelectObject(point)
                point.SetMaskColour(wx.BLACK)

                horz_dc = wx.MemoryDC()
                horz_bmp = wx.EmptyBitmap(int(end_x - start_x), 3)
                horz_dc.SelectObject(horz_bmp)
                horz_dc.SetBackground(wx.BLACK_BRUSH)
                horz_dc.Clear()

                blit = horz_dc.Blit
                for i in range(buf_rep_x):
                    x = i * step_x + half_step_x
                    blit(x, 0, 3, 3, point_dc, 0, 0)

                total_dc = wx.MemoryDC()
                self._bmp = wx.EmptyBitmap(
                                int(end_x - start_x),
                                int(end_y - start_y))
                total_dc.SelectObject(self._bmp)
                total_dc.SetBackground(wx.BLACK_BRUSH)
                total_dc.Clear()

                blit = total_dc.Blit
                for j in range(buf_rep_y):
                    y = j * step_y + half_step_y
                    blit(0, y, int(end_x - start_x), 3, horz_dc, 0, 0)

                self._bmp.SetMaskColour(wx.BLACK)
                self._bmp_bpos = cl_pos

            dc_buffer.DrawBitmapPoint(self._bmp,
                    wx.Point(int(start_x), int(start_y)),
                    useMask=True)