예제 #1
0
    def analysis_status_changed(self, *_) -> None:
        analysis = self._analysis
        if analysis is None: return

        image = analysis.image
        if image is not None:
            self.bg_artist.set_array(image)
            self.bg_artist.props.extents = Rect2(0, 0, image.shape[1],
                                                 image.shape[0])
            self.canvas.set_content_size(image.shape[1], image.shape[0])
            self.canvas.zoom(0.0)
        else:
            self.bg_artist.clear_data()

        left_angle = analysis.left_angle
        left_contact = analysis.left_contact
        if left_angle is not None and left_contact is not None:
            self.left_angle_artist.props.delta_angle = left_angle
            self.left_angle_artist.props.x = left_contact.x
            self.left_angle_artist.props.y = left_contact.y
        else:
            self.left_angle_artist.props.delta_angle = math.nan

        right_angle = analysis.right_angle
        right_contact = analysis.right_contact
        if right_angle is not None and right_contact is not None:
            self.right_angle_artist.props.delta_angle = -right_angle
            self.right_angle_artist.props.x = right_contact.x
            self.right_angle_artist.props.y = right_contact.y
        else:
            self.right_angle_artist.props.delta_angle = math.nan

        line = analysis.baseline
        self.baseline_artist.props.line = line
예제 #2
0
    def draw(self, cr: cairo.Context) -> None:
        line = self._line
        stroke_color = self._stroke_color
        stroke_width = self._stroke_width
        scale_strokes = self._scale_strokes

        if line is None:
            return

        if line.pt0 == line.pt1:
            return

        clip_extents = Rect2(cr.clip_extents())

        start = line.eval(x=clip_extents.x0)
        end = line.eval(x=clip_extents.x1)

        if not clip_extents.contains(start):
            start = line.eval(y=clip_extents.y0 if start.y < clip_extents.y0
                              else clip_extents.y1)

        if not clip_extents.contains(end):
            end = line.eval(y=clip_extents.
                            y0 if end.y < clip_extents.y0 else clip_extents.y1)

        cr.move_to(*start)
        cr.line_to(*end)

        cr.save()
        if scale_strokes:
            cr.identity_matrix()
        cr.set_source_rgb(*stroke_color)
        cr.set_line_width(stroke_width)
        cr.stroke()
        cr.restore()
예제 #3
0
def expand_rect(rect: Rect2[float], size: float):
    return Rect2(
        x0=rect.x0 - size,
        y0=rect.y0 - size,
        x1=rect.x1 + size,
        y1=rect.y1 + size,
    )
예제 #4
0
def draw_line(image: np.ndarray,
              line: Line2,
              color: Tuple[float, float, float],
              thickness: int = 1) -> None:
    image_extents = Rect2(position=(0, 0), size=image.shape[1::-1])

    start_point = line.eval(x=image_extents.x0)
    end_point = line.eval(x=image_extents.x1)

    if not image_extents.contains(start_point):
        if start_point.y < image_extents.y0:
            y_to_eval = image_extents.y0
        else:
            y_to_eval = image_extents.y1
        start_point = line.eval(y=y_to_eval)

    if not image_extents.contains(end_point):
        if end_point.y < image_extents.y0:
            y_to_eval = image_extents.y0
        else:
            y_to_eval = image_extents.y1
        end_point = line.eval(y=y_to_eval)

    cv2.line(image,
             pt1=tuple(start_point.map(int)),
             pt2=tuple(end_point.map(int)),
             color=color,
             thickness=thickness)
예제 #5
0
    def _get_region_clip(self) -> Optional[Rect2[int]]:
        image_size_hint = self._image_acquisition.get_image_size_hint()
        if image_size_hint is None:
            return None

        return Rect2(
            position=(0, 0),
            size=image_size_hint,
        )
예제 #6
0
    def _update_dragging_indicator(
            self, current_cursor_pos: Tuple[float, float]) -> None:
        if not self._model.is_defining:
            self.view.bn_dragging.set(None)
            return

        self.view.bn_dragging.set(
            Rect2(
                pt0=self._model.begin_define_pos,
                pt1=current_cursor_pos,
            ))
예제 #7
0
    def set_background_image(self, image: Optional[np.ndarray]) -> None:
        if image is None:
            self._bg_artist.clear_data()
            return

        width = image.shape[1]
        height = image.shape[0]
        self._bg_artist.extents = Rect2(0, 0, width, height)
        self._bg_artist.set_array(image)
        self._canvas.set_content_size(width, height)

        # Set zoom to minimum, i.e. scale image so it always fits.
        self._canvas.zoom(0)
예제 #8
0
    def analysis_status_changed(self) -> None:
        if self._analysis is None: return

        status = self._analysis.bn_status.get()
        drop_region = self._analysis.bn_drop_region.get()

        if status is PendantAnalysisJob.Status.WAITING_FOR_IMAGE:
            self.image_artist.clear_data()
        else:
            image = self._analysis.bn_image.get()
            if drop_region is not None:
                image = image[drop_region.y0:drop_region.y1+1, drop_region.x0:drop_region.x1+1]
            self.image_artist.props.extents = Rect2(0, 0, image.shape[1], image.shape[0])
            self.image_artist.set_array(image)

            self.canvas.set_content_size(image.shape[1], image.shape[0])
            self.canvas.zoom(0.0)

        if status is PendantAnalysisJob.Status.WAITING_FOR_IMAGE \
                or status is PendantAnalysisJob.Status.EXTRACTING_FEATURES:
            self.drop_points_artist.clear_data()
        else:
            drop_points = self._analysis.bn_drop_profile_extract.get().copy()
            if drop_region is not None:
                drop_points -= drop_region.position
            data = np.zeros(image.shape[:2], np.uint32)
            data[tuple(drop_points.T)[::-1]] = 0xff8080ff
            self.drop_points_artist.props.extents = Rect2(0, 0, data.shape[1], data.shape[0])
            self.drop_points_artist.set_data(data, cairo.Format.ARGB32, data.shape[1], data.shape[0])

        if status is not PendantAnalysisJob.Status.FINISHED:
            self.drop_fit_artist.props.polyline = None
        else:
            fit = self._analysis.bn_drop_profile_fit.get().copy()
            if drop_region is not None:
                fit -= drop_region.position
            self.drop_fit_artist.props.polyline = fit
예제 #9
0
    def commit_define(self, end_pos: Vector2[float]) -> None:
        assert self.is_defining
        start_pos = self._begin_define_pos
        self._begin_define_pos = None

        region = Rect2(
            pt0=start_pos,
            pt1=end_pos,
        ).map(int)

        clip = self._bn_clip.get()

        if clip is not None:
            region = Rect2(
                x0=clamp(region.x0, clip.x0, clip.x1),
                y0=clamp(region.y0, clip.y0, clip.y1),
                x1=clamp(region.x1, clip.x0, clip.x1),
                y1=clamp(region.y1, clip.y0, clip.y1),
            )

        if region.w == 0 or region.h == 0:
            return

        self.bn_region.set(region)
예제 #10
0
def draw_angle_marker(image: np.ndarray, vertex_pos: Vector2[float],
                      start_angle: float, delta_angle: float, radius: float,
                      color: Tuple[float, float, float]) -> None:
    if not Rect2(position=(0, 0),
                 size=image.shape[1::-1]).contains(vertex_pos):
        # Vertex is outside of the image, ignore.
        return

    end_angle = start_angle + delta_angle

    start_pos = vertex_pos
    delta_pos = radius * Vector2(math.cos(-end_angle), math.sin(-end_angle))
    end_pos = start_pos + delta_pos

    cv2.line(image,
             pt1=tuple(start_pos.map(int)),
             pt2=tuple(end_pos.map(int)),
             color=color,
             thickness=1)
예제 #11
0
    def draw(self, cr: cairo.Context) -> None:
        polyline = self._polyline
        stroke_width = self._stroke_width
        stroke_color = self._stroke_color

        if polyline is None or len(polyline) == 0:
            self._last_drawn_region = None
            return

        if self._path_cache is not None:
            cr.append_path(self._path_cache)
        else:
            self._show_polyline(cr, polyline)
            self._path_cache = cr.copy_path()
        extents = Rect2(cr.path_extents())

        dx = 1 / cr.get_matrix().xx
        dy = 1 / cr.get_matrix().yy

        cr.save()

        if self._scale_strokes:
            stroke_scale = max(dx, dy)
            cr.identity_matrix()
        else:
            stroke_scale = 1.0

        cr.set_line_width(stroke_width)
        cr.set_source_rgba(*stroke_color)
        cr.stroke()

        cr.restore()

        extents = expand_rect(extents, max(stroke_width * stroke_scale, dx,
                                           dy))
        self._last_drawn_region = cairo.Region(
            cairo.RectangleInt(
                int(math.floor(extents.x)),
                int(math.floor(extents.y)),
                int(math.ceil(extents.w)),
                int(math.ceil(extents.h)),
            ))
예제 #12
0
def needle_guess(data: np.ndarray) -> Sequence[float]:
    params = np.empty(len(NeedleParam))
    data = data.astype(float)

    extents = Rect2(data.min(axis=1), data.max(axis=1))
    diagonal = int(math.ceil((extents.w**2 + extents.h**2)**0.5))
    data -= np.reshape(extents.center, (2, 1))
    votes = hough(data, diagonal)

    needles = np.zeros(shape=(votes.shape[0], 3))
    for i in range(votes.shape[0]):
        peaks, props = scipy.signal.find_peaks(votes[i], prominence=0)
        if len(peaks) < 2: continue
        ix = np.argsort(props['prominences'])[::-1]
        peak1_i, peak2_i = peaks[ix[:2]]
        prom1, prom2 = props['prominences'][ix[:2]]
        if prom2 < prom1 / 2: continue

        peak1 = ((peak1_i - 1) / (len(votes[i]) - 3) - 0.5) * diagonal
        peak2 = ((peak2_i - 1) / (len(votes[i]) - 3) - 0.5) * diagonal

        needles[i][0] = (peak1 + peak2) / 2
        needles[i][1] = math.fabs(peak1 - peak2) / 2
        needles[i][2] = prom1 + prom2

    scores = scipy.ndimage.gaussian_filter(needles[:, 2],
                                           sigma=10,
                                           mode='wrap')
    needle_i = scores.argmax()

    theta = -np.pi / 2 + (needle_i / len(needles)) * np.pi
    rho, radius = needles[needle_i][:2]

    rho_offset = np.cos(theta) * extents.xc + np.sin(theta) * extents.yc
    rho += rho_offset

    params[NeedleParam.ROTATION] = theta
    params[NeedleParam.RHO] = rho
    params[NeedleParam.RADIUS] = radius

    return params
예제 #13
0
    def set_labels(self, labels: Optional[np.ndarray]) -> None:
        if labels is None:
            self._features_artist.clear_data()
            return

        data = colorize_labels(
            labels,
            colors=np.array(
                [
                    0x00000000,
                    0xff8080ff,  # Drop edges
                    0xff8080ff,  # Needle edges
                ],
                dtype=np.uint32).view(np.uint8).reshape(-1, 4),
        )

        width = labels.shape[1]
        height = labels.shape[0]

        self._features_artist.extents = Rect2(0, 0, width, height)
        self._features_artist.set_data(data, cairo.Format.ARGB32, width,
                                       height)
예제 #14
0
    def set_features(self, features: Optional[ConanFeatures]) -> None:
        if features is None:
            self._features_artist.clear_data()
            return

        if features.labels is None:
            self._features_artist.clear_data()
            return

        data = colorize_labels(
            features.labels,
            colors=np.array([
                0x00000000,
                0xffbbbbff,  # All edges
                0xff0000ff,  # Drop edges
            ], dtype=np.uint32).view(np.uint8).reshape(-1, 4),
        )

        width = features.labels.shape[1]
        height = features.labels.shape[0]

        self._features_artist.extents = Rect2(0, 0, width, height)
        self._features_artist.set_data(data, cairo.Format.ARGB32, width, height)
예제 #15
0
def extract_contact_angle_features(
    image,
    baseline: Optional[Line2],
    inverted: bool,
    *,
    roi: Optional[Rect2[int]] = None,
    thresh: float = 0.5,
    labels: bool = False,
) -> ContactAngleFeatures:
    if roi is None:
        roi = Rect2(0, 0, image.shape[1] - 1, image.shape[0] - 1)

    # Clip roi to within image extents.
    roi = Rect2(
        max(0, roi.x0),
        max(0, roi.y0),
        min(image.shape[1], roi.x1),
        min(image.shape[0], roi.y1),
    )

    subimage = image[roi.y0:roi.y1 + 1, roi.x0:roi.x1 + 1]
    if baseline is not None:
        baseline -= roi.position

        if baseline.pt1.x < baseline.pt0.x:
            baseline = Line2(baseline.pt1, baseline.pt0)

        if inverted:
            up = baseline.perp
            right = baseline.unit
            origin = np.array(baseline.pt0)
        else:
            # Origin is at the top in image coordinates, we want to analyse the drop with the origin at the
            # bottom since it's easier to reason with.
            up = -baseline.perp
            right = baseline.unit
            origin = np.array(baseline.pt0)

    if len(subimage.shape) > 2:
        subimage = cv2.cvtColor(subimage, cv2.COLOR_RGB2GRAY)

    blur = cv2.GaussianBlur(subimage, ksize=(5, 5), sigmaX=0)
    dx = cv2.Scharr(blur, cv2.CV_16S, dx=1, dy=0)
    dy = cv2.Scharr(blur, cv2.CV_16S, dx=0, dy=1)

    # Use magnitude of gradient squared to get sharper edges.
    mask = (dx.astype(float)**2 + dy.astype(float)**2)
    mask = np.sqrt(mask)
    mask = (mask / mask.max() * (2**8 - 1)).astype(np.uint8)

    # Ignore weak gradients.
    mask[mask < thresh * mask.max()] = 0

    cv2.adaptiveThreshold(mask,
                          maxValue=1,
                          adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                          thresholdType=cv2.THRESH_BINARY,
                          blockSize=5,
                          C=0,
                          dst=mask)

    if labels:
        # Hack: Thin edges using cv2.Canny()
        grad_max = (abs(dx) + abs(dy)).max()
        edges = cv2.Canny(mask * dx, mask * dy, grad_max * thresh / 2,
                          grad_max * thresh)
        edge_points = np.array(edges.nonzero()[::-1])
    else:
        edges = None
        edge_points = np.empty((2, 0), dtype=int)

    if baseline is not None:
        mask_ij = np.array(mask.nonzero())
        y = up @ (mask_ij[::-1] - origin.reshape(2, 1))

        ix = y.argsort()
        mask_ij = mask_ij[:, ix]
        y = y[ix]

        # Ignore edges below and within 2 pixels of the baseline.
        stop = np.searchsorted(y, 2.0, side='right')
        mask[tuple(mask_ij[:, :stop])] = 0

        # Use the two (needle separates drop edge into two) largest edges in the image.
        _, cc_labels, cc_stats, _ = cv2.connectedComponentsWithStats(
            mask, connectivity=4)
        ix = np.argsort(cc_stats[:, cv2.CC_STAT_WIDTH] *
                        cc_stats[:, cv2.CC_STAT_HEIGHT])[::-1]
        ix = ix[:3]
        # Label 0 is the background.
        ix = ix[ix != 0]

        if len(ix) >= 2:
            cc_extents0 = Rect2(
                x=cc_stats[ix[0], cv2.CC_STAT_LEFT],
                y=cc_stats[ix[0], cv2.CC_STAT_TOP],
                w=cc_stats[ix[0], cv2.CC_STAT_WIDTH],
                h=cc_stats[ix[0], cv2.CC_STAT_HEIGHT],
            )
            cc_extents1 = Rect2(
                x=cc_stats[ix[1], cv2.CC_STAT_LEFT],
                y=cc_stats[ix[1], cv2.CC_STAT_TOP],
                w=cc_stats[ix[1], cv2.CC_STAT_WIDTH],
                h=cc_stats[ix[1], cv2.CC_STAT_HEIGHT],
            )

            # If one of the connected components is contained within the other, then just use the outer one.
            # Or if one component is much larger than the other.
            if cc_extents1.x0 > cc_extents0.x0 \
                    and cc_extents1.y0 > cc_extents0.y0 \
                    and cc_extents1.x1 < cc_extents0.x1 \
                    and cc_extents1.y1 < cc_extents0.y1 \
                    or cc_extents0.w*cc_extents0.h > 10*cc_extents1.w*cc_extents1.h:
                mask &= cc_labels == ix[0]
            else:
                mask &= (cc_labels == ix[0]) | (cc_labels == ix[1])
        elif len(ix) == 1:
            mask &= cc_labels == ix[0]

        # Hack: Thin edges using cv2.Canny()
        if edges is None:
            grad_max = (abs(dx) + abs(dy)).max()
            drop_edges = cv2.Canny(mask * dx, mask * dy, grad_max * thresh / 2,
                                   grad_max * thresh)
        else:
            drop_edges = edges & mask

        drop_points = np.array(drop_edges.nonzero()[::-1])

        if drop_points.shape[1] > 0:
            mask = np.zeros(drop_points.shape[1], dtype=bool)

            x, y = [up, right] @ (drop_points - origin.reshape(2, 1))

            # Sort in ascending y coordinate.
            ix = y.argsort()
            x, y = x[ix], y[ix]
            drop_points = drop_points[:, ix]

            # Divide into 2 pixel high level sets.
            levels = np.histogram_bin_edges(y, bins=max(1, int(y.max() / 2)))
            levels_ix = (0, *np.searchsorted(y, levels[1:], side='right'))

            # Find left and right-most edges in pairs of level sets.
            for start, stop in zip(levels_ix,
                                   levels_ix[min(2,
                                                 len(levels_ix) - 1):]):
                level_set = x[start:stop]

                # Left-to-right index.
                ltr_ix = level_set.argsort()

                # Split into clusters where x distances are less than 2*sqrt(2) ~ 2.828.
                # We use 2*sqrt(2) instead of sqrt(2) to allow for single pixel gaps.
                contiguous_groups = np.split(
                    ltr_ix,
                    #  (np.diff(level_set[ltr_ix]) > 2.828).nonzero()[0] + 1,
                    (np.diff(level_set[ltr_ix]) > 2.828).nonzero()[0] + 1,
                )
                mask[start + contiguous_groups[0]] = True
                mask[start + contiguous_groups[-1]] = True

            drop_points = drop_points[:, mask]
    else:
        drop_points = np.empty((2, 0), dtype=int)

    edge_points = edge_points + np.reshape(roi.position, (2, 1))
    drop_points = drop_points + np.reshape(roi.position, (2, 1))

    if labels:
        labels_array = np.zeros(image.shape[:2], np.uint8)
        labels_array[tuple(edge_points)[::-1]] = 1
        labels_array[tuple(drop_points)[::-1]] = 2
    else:
        labels_array = None

    return ContactAngleFeatures(
        labels=labels_array,
        drop_points=drop_points,
    )