コード例 #1
0
def test_subdivide_polygon():
    new_square1 = square
    new_square2 = square[:-1]
    new_square3 = square[:-1]
    # test iterative subdvision
    for _ in range(10):
        square1, square2, square3 = new_square1, new_square2, new_square3
        # test different B-Spline degrees
        for degree in range(1, 7):
            mask_len = len(_SUBDIVISION_MASKS[degree][0])
            # test circular
            new_square1 = subdivide_polygon(square1, degree)
            np.testing.assert_array_equal(new_square1[-1], new_square1[0])
            np.testing.assert_equal(new_square1.shape[0],
                                    2 * square1.shape[0] - 1)
            # test non-circular
            new_square2 = subdivide_polygon(square2, degree)
            np.testing.assert_equal(new_square2.shape[0],
                                    2 * (square2.shape[0] - mask_len + 1))
            # test non-circular, preserve_ends
            new_square3 = subdivide_polygon(square3, degree, True)
            np.testing.assert_equal(new_square3[0], square3[0])
            np.testing.assert_equal(new_square3[-1], square3[-1])

            np.testing.assert_equal(new_square3.shape[0],
                                    2 * (square3.shape[0] - mask_len + 2))
コード例 #2
0
def test_subdivide_polygon():
    new_square1 = square
    new_square2 = square[:-1]
    new_square3 = square[:-1]
    # test iterative subdvision
    for _ in range(10):
        square1, square2, square3 = new_square1, new_square2, new_square3
        # test different B-Spline degrees
        for degree in range(1, 7):
            mask_len = len(_SUBDIVISION_MASKS[degree][0])
            # test circular
            new_square1 = subdivide_polygon(square1, degree)
            np.testing.assert_array_equal(new_square1[-1], new_square1[0])
            np.testing.assert_equal(new_square1.shape[0],
                                    2 * square1.shape[0] - 1)
            # test non-circular
            new_square2 = subdivide_polygon(square2, degree)
            np.testing.assert_equal(new_square2.shape[0],
                                    2 * (square2.shape[0] - mask_len + 1))
            # test non-circular, preserve_ends
            new_square3 = subdivide_polygon(square3, degree, True)
            np.testing.assert_equal(new_square3[0], square3[0])
            np.testing.assert_equal(new_square3[-1], square3[-1])

            np.testing.assert_equal(new_square3.shape[0],
                                    2 * (square3.shape[0] - mask_len + 2))
コード例 #3
0
ファイル: segbaby.py プロジェクト: Mschikay/FCIS-babycam
    def drawEdge(self, newmask, edge, width=30, degree=7):
        '''
        This function realizes: 
        1. expanding sementation area 
        2. smoothing the edge
        3. transition from foreground to background based on the mask
        '''
        # @edge: return the drawn image
        # @width: paint the edge with a width of @width
        # @degree: to what degree that the polylines become curve
        i, contours, h = cv2.findContours(newmask, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_NONE)
        longest = 0  # find the largest contour(which contains the baby)
        for i in range(len(contours)):
            if len(contours[i]) > longest:
                longest = i
        maxcontour = contours[longest]
        hull = cv2.convexHull(
            maxcontour
        )  # smoothing: find the min encircling convex based on max contours
        l = len(hull)
        hull = hull.reshape(l, 2)
        hull = np.row_stack((hull, [hull[0][0],
                                    hull[0][1]]))  # loop-locked points
        for _ in range(1):
            curve = measure.subdivide_polygon(
                hull, degree=degree, preserve_ends=True)  # polyline to curve

        lcurve = len(curve)
        curve = (curve.reshape(lcurve, 1, 2)).astype(int)
        cv2.drawContours(edge, [curve], 0, (255, 255, 255),
                         -1)  # paint inside area
        cv2.drawContours(edge, [curve], -1, (255, 255, 255),
                         width)  # paint outside edge
        return edge
コード例 #4
0
def simplify(centers, plot=True):
    fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(sz1, sz2))
    simp_fault = []
    comp_fault = []
    for center in centers:
        coords = []
        for line in center:
            coord = line.coords
            coords.append(coord)
        coords = num.array(coords)
        geom_object = num.array([(coords[i][m])
                                 for i in range(num.shape(coords)[0])
                                 for m in range(num.shape(coords[i])[0])])
        new_geom_object = geom_object.copy()
        for _ in range(1):
            new_geom_object = subdivide_polygon(new_geom_object,
                                                degree=2,
                                                preserve_ends=True)
            new_geom_object = approximate_polygon(new_geom_object,
                                                  tolerance=0.8)
        comp_fault.append(new_geom_object)

        appr_geom_object = geom_object.copy()
        # approximate subdivided polygon with Douglas-Peucker algorithm
        appr_geom_object = approximate_polygon(appr_geom_object, tolerance=10)

        ax1.scatter(appr_geom_object[:, 1], appr_geom_object[:, 0])
        simp_fault.append(appr_geom_object[:, :])

        ax2.scatter(new_geom_object[:, 1], new_geom_object[:, 0])
    ax1.set_title('Simple line')
    ax2.set_title('More complexity')
    plt.close()
    return simp_fault, comp_fault
コード例 #5
0
def handle_highlight():
    if request.method == 'POST':
        f = request.files['file']
        bgname = "data/%s_%s.png" % (request.values["id"],
                                     request.values["floor"])
        fname = "data/%s_%s.png" % (request.values["id"],
                                    request.values["shopId"])
        mname = "data/m_%s_%s.png" % (request.values["id"],
                                      request.values["shopId"])
        f.save(fname)

        xy = (int(request.values["offsetLeft"]),
              int(request.values["offsetTop"]))
        save_mask_image(bgname, fname, mname, xy)

        dst = io.imread(fname, as_grey=True)
        contours = measure.find_contours(dst, 0.5)
        cords = np.concatenate(contours)

        new_img = measure.subdivide_polygon(cords,
                                            degree=2,
                                            preserve_ends=True)
        appr_img = measure.approximate_polygon(new_img, tolerance=1)

        return json.dumps(appr_img.tolist(), cls=NumpyEncoder)
コード例 #6
0
ファイル: geojson_from_mpl.py プロジェクト: GabsRSA/stageemi
def return_smoother_poly(poly, **kwargs):
    new_poly = poly.copy()
    for _ in range(kwargs.get("level", 5)):
        new_poly = subdivide_polygon(new_poly,
                                     degree=kwargs.get("degree", 2),
                                     preserve_ends=True)

    return new_poly
コード例 #7
0
    def find_roi(self):
        """
        Finds the ROI based on a gaussian filter and threshold.
        Using this mask gives similar results to the Icy mask
        :return: cont: Numpy array of all the points determining the shape of the ROI
        :return: roivolume: Area of the detected ROI
        :return: masked_detections: list of tuples (binary image, base image) with only the spots inside the ROI
        """
        # Filtered image for masking: gaussian filter + threshold + fill holes
        filt = np.copy(self.image[0])
        for ch in range(1, len(self.image)):
            filt += self.image[ch]
        filt = (filters.gaussian(filt, 10))  # Higher sigma for smoother edges

        threshold = np.mean(filt) * (100 / ROI_THRESHOLD)
        filt[filt < threshold] = 0
        filt[filt >= threshold] = 1
        filt = morphology.binary_closing(filt)
        # filt = morphology.binary_fill_holes(filt)

        # Keep only areas with a significant volume
        labels = label(filt, connectivity=2)
        label_props = regionprops(labels)
        mask = np.copy(labels)
        arealist = []
        for i in range(len(label_props)):
            arealist.append(label_props[i].area)
        for i in range(len(label_props)):
            if label_props[i].area < np.mean(arealist):
                mask[mask == i + 1] = 0
        mask[mask > 0] = 1

        roivolume = len(mask[np.nonzero(mask)])

        # Find contours around the mask. Padding is used to properly find contours near maximum edges
        pad_mask = np.pad(mask, ((0, 2), (0, 2)),
                          mode='constant',
                          constant_values=0)
        cont = find_contours(pad_mask,
                             level=0,
                             fully_connected='high',
                             positive_orientation='high')

        print('Number of regions in ROI:', len(cont))
        print('Volume of ROI:', roivolume)

        # Subdivide polygon for smoother contours and more precise points for distance to boundary
        for c in range(len(cont)):
            cont[c] = subdivide_polygon(cont[c], 7)

        # Binary image of spots inside the ROI
        masked_detections = [(self.detections[i][0] * mask, self.image[i], i)
                             for i in range(len(self.detections))]

        for c in range(len(cont)):
            cont[c] = np.array(cont[c])

        return cont, roivolume, masked_detections
コード例 #8
0
def slice_roi_contours(mask_2d,
                       sop,
                       ipp,
                       trans_matrix,
                       roi_name,
                       contour_type=cv2.RETR_TREE,
                       chain_mode=cv2.CHAIN_APPROX_NONE,
                       smooth_polygon_times=2,
                       smooth_polgygon_degree=3):
    """
    针对每张mask,基于相应的转换矩阵,获得轮廓点信息。
    :param mask_2d:
    :param sop: slice的唯一标识号
    :param ipp:
    :param trans_matrix: 图像坐标到物理坐标的变换矩阵
    :param roi_name:
    :param contour_type:
    :param chain_mode:
    :param smooth_polygon_times:
    :param smooth_polgygon_degree:
    :return:
    """

    label_list = []
    # plt.imshow(mask_2d)
    points_list = find_fontours(mask_2d,
                                contour_type=contour_type,
                                chain_mode=chain_mode)
    ##正确办法:将每个连通区域的轮廓点,逐一存储,才能防止viewer上多个连通区域被杂乱链接来来。
    trans_matrix = grid2world4slice(trans_matrix, ipp)
    for k in range(len(points_list)):
        obj_k_points = points_list[k][:, 0, :]
        """如果没有如下操作,前端显示中将出现重大缺口"""
        # 在n*2的坐标点最后,添加初始的点,让整个连通区域闭合。
        obj_k_points = np.concatenate(
            [obj_k_points, obj_k_points[0, :].reshape(1, -1)], axis=0)
        if obj_k_points.shape[0] > 5:
            for _ in range(smooth_polygon_times):
                obj_k_points = subdivide_polygon(obj_k_points,
                                                 degree=smooth_polgygon_degree)
        obj_contour_n4 = np.ones(
            (obj_k_points.shape[0], 4))  # 建立一个n*4的矩阵,用于后续做变换
        obj_contour_n4[:, :2] = obj_k_points
        obj_contour_n4[:, 2] = 0
        obj_contour_n4_world_coor = np.array(
            np.dot(obj_contour_n4, trans_matrix))
        obj_contour_n4_world_coor = np.round(obj_contour_n4_world_coor,
                                             decimals=4)
        # obj_contour_n4_world_coor[:,2] = ipp
        slice_output = SliceContours(obj_contour_n4_world_coor[:, :3], sop,
                                     roi_name)
        label_list.append(slice_output)
    return label_list
コード例 #9
0
def subd_polygon(new_object):
    """
    Uses scikitimage approximate_polygon function to subdivide polygons
    from a mask of floodfill output

    @parms new_object output mask from floodfill
    """
    contour = find_contours(new_object, 0)[0]
    subd_polygon_coords = subdivide_polygon(contour,
                                            degree=1,
                                            preserve_ends=True)
    return (subd_polygon_coords)
コード例 #10
0
def to_contours():
    for root, dirs, files in os.walk('data2'):
        for fname in files:
            dst = io.imread('data2/' + fname, as_grey=True)
            contours = measure.find_contours(dst, 0.5)
            cords = np.concatenate(contours)

            new_img = measure.subdivide_polygon(cords,
                                                degree=2,
                                                preserve_ends=True)
            appr_img = measure.approximate_polygon(new_img, tolerance=1)

            print(fname, len(appr_img.tolist()))
コード例 #11
0
def subdivide_polygon_closed(input_image,
                             tolerance=conf.polygon_subdivision_tolerance,
                             simplify=False):
    contours_list = measure.find_contours(input_image, 0.2)
    coords = []
    for contour in contours_list:
        p = measure.subdivide_polygon(contour,
                                      degree=conf.b_spline_degree,
                                      preserve_ends=True)
        if simplify:
            coords.append(measure.approximate_polygon(p, tolerance=tolerance))
        else:
            coords.append(p)
    return contours_list, coords
コード例 #12
0
    def find_roi_contour(self):
        """
        Find the points creating the outline of the mask to measure distance to boundary
        :return: Numpy array of points defining the contour
        """
        # Find contours around the mask. Padding is used to properly find contours near maximum edges
        pad_mask = np.pad(self.roi_mask, ((0, 2), (0, 2)), mode='constant', constant_values=0)
        roi_contour = find_contours(pad_mask, level=0, fully_connected='high', positive_orientation='high')

        # Subdivide polygon for smoother contours and more precise points for distance to boundary
        for c in range(len(roi_contour)):
            roi_contour[c] = subdivide_polygon(roi_contour[c], 7)

        return roi_contour
コード例 #13
0
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image.Image:
    """
    Yields the subimages of image im defined in the list of bounding polygons
    with baselines preserving order.

    Args:
        im: Input image
        bounds: A list of dicts in baseline:
            ```
            {'type': 'baselines',
             'lines': [{'baseline': [[x_0, y_0], ... [x_n, y_n]],
                        'boundary': [[x_0, y_0], ... [x_n, y_n]]},
                       ....]
            }
            ```
            or bounding box format:
            ```
            {'boxes': [[x_0, y_0, x_1, y_1], ...],
             'text_direction': 'horizontal-lr'}
            ```

    Yields:
        The extracted subimage
    """
    if 'type' in bounds and bounds['type'] == 'baselines':
        # select proper interpolation scheme depending on shape
        if im.mode == '1':
            order = 0
            im = im.convert('L')
        else:
            order = 1
        im = np.array(im)

        for line in bounds['lines']:
            if line['boundary'] is None:
                raise KrakenInputException('No boundary given for line')
            pl = np.array(line['boundary'])
            baseline = np.array(line['baseline'])
            c_min, c_max = int(pl[:, 0].min()), int(pl[:, 0].max())
            r_min, r_max = int(pl[:, 1].min()), int(pl[:, 1].max())

            if (pl < 0).any() or (pl.max(axis=0)[::-1] >= im.shape[:2]).any():
                raise KrakenInputException(
                    'Line polygon outside of image bounds')
            if (baseline < 0).any() or (baseline.max(axis=0)[::-1] >=
                                        im.shape[:2]).any():
                raise KrakenInputException('Baseline outside of image bounds')

            # fast path for straight baselines requiring only rotation
            if len(baseline) == 2:
                baseline = baseline.astype(float)
                # calculate direction vector
                lengths = np.linalg.norm(np.diff(baseline.T), axis=0)
                p_dir = np.mean(np.diff(baseline.T) * lengths / lengths.sum(),
                                axis=1)
                p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2, axis=-1)))
                angle = np.arctan2(p_dir[1], p_dir[0])
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                offset_polygon = pl - (c_min, r_min)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask = np.zeros(patch.shape[:2], dtype=bool)
                mask[r, c] = True
                patch[mask != True] = 0
                extrema = offset_polygon[(0, -1), :]
                # scale line image to max 600 pixel width
                tform, rotated_patch = _rotate(patch,
                                               angle,
                                               center=extrema[0],
                                               scale=1.0,
                                               cval=0)
                i = Image.fromarray(rotated_patch.astype('uint8'))
            # normal slow path with piecewise affine transformation
            else:
                if len(pl) > 50:
                    pl = approximate_polygon(pl, 2)
                full_polygon = subdivide_polygon(pl, preserve_ends=True)
                pl = geom.MultiPoint(full_polygon)

                bl = zip(baseline[:-1:], baseline[1::])
                bl = [geom.LineString(x) for x in bl]
                cum_lens = np.cumsum([0] + [line.length for line in bl])
                # distance of intercept from start point and number of line segment
                control_pts = []
                for point in pl.geoms:
                    npoint = np.array(point.coords)[0]
                    line_idx, dist, intercept = min(
                        ((idx, line.project(point),
                          np.array(
                              line.interpolate(line.project(point)).coords))
                         for idx, line in enumerate(bl)),
                        key=lambda x: np.linalg.norm(npoint - x[2]))
                    # absolute distance from start of line
                    line_dist = cum_lens[line_idx] + dist
                    intercept = np.array(intercept)
                    # side of line the point is at
                    side = np.linalg.det(
                        np.array([[
                            baseline[line_idx + 1][0] - baseline[line_idx][0],
                            npoint[0] - baseline[line_idx][0]
                        ],
                                  [
                                      baseline[line_idx + 1][1] -
                                      baseline[line_idx][1],
                                      npoint[1] - baseline[line_idx][1]
                                  ]]))
                    side = np.sign(side)
                    # signed perpendicular distance from the rectified distance
                    per_dist = side * np.linalg.norm(npoint - intercept)
                    control_pts.append((line_dist, per_dist))
                # calculate baseline destination points
                bl_dst_pts = baseline[0] + np.dstack(
                    (cum_lens, np.zeros_like(cum_lens)))[0]
                # calculate bounding polygon destination points
                pol_dst_pts = np.array([
                    baseline[0] + (line_dist, per_dist)
                    for line_dist, per_dist in control_pts
                ])
                # extract bounding box patch
                c_dst_min, c_dst_max = int(pol_dst_pts[:, 0].min()), int(
                    pol_dst_pts[:, 0].max())
                r_dst_min, r_dst_max = int(pol_dst_pts[:, 1].min()), int(
                    pol_dst_pts[:, 1].max())
                output_shape = np.around(
                    (r_dst_max - r_dst_min + 1, c_dst_max - c_dst_min + 1))
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                # offset src points by patch shape
                offset_polygon = full_polygon - (c_min, r_min)
                offset_baseline = baseline - (c_min, r_min)
                # offset dst point by dst polygon shape
                offset_bl_dst_pts = bl_dst_pts - (c_dst_min, r_dst_min)
                offset_pol_dst_pts = pol_dst_pts - (c_dst_min, r_dst_min)
                # mask out points outside bounding polygon
                mask = np.zeros(patch.shape[:2], dtype=bool)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask[r, c] = True
                patch[mask != True] = 0
                # estimate piecewise transform
                src_points = np.concatenate((offset_baseline, offset_polygon))
                dst_points = np.concatenate(
                    (offset_bl_dst_pts, offset_pol_dst_pts))
                tform = PiecewiseAffineTransform()
                tform.estimate(src_points, dst_points)
                o = warp(patch,
                         tform.inverse,
                         output_shape=output_shape,
                         preserve_range=True,
                         order=order)
                i = Image.fromarray(o.astype('uint8'))
            yield i.crop(i.getbbox()), line
    else:
        if bounds['text_direction'].startswith('vertical'):
            angle = 90
        else:
            angle = 0
        for box in bounds['boxes']:
            if isinstance(box, tuple):
                box = list(box)
            if (box < [0, 0, 0, 0] or box[::2] >= [im.size[0], im.size[0]]
                    or box[1::2] >= [im.size[1], im.size[1]]):
                logger.error('bbox {} is outside of image bounds {}'.format(
                    box, im.size))
                raise KrakenInputException('Line outside of image bounds')
            yield im.crop(box).rotate(angle, expand=True), box
コード例 #14
0
ファイル: Image_Detection.py プロジェクト: turnerhusa/TP_EAS
edge_roberts = roberts(image)
edge_sobel = prewitt(image)

edges = skimage.feature.canny(
    image=image,
    sigma=sigma,
    low_threshold=low_threshold,
    high_threshold=high_threshold,
)

contours = measure.find_contours(edges, .08)

new_contour = contours[0]
for _ in range(5):
    new_contour = subdivide_polygon(new_contour, degree=2, preserve_ends=True)

print(str(len(contours)) + ", " + str(len(contours[0])))
# contours_unsort = measure.find_contours(edges, 0.8)

# dtype = [('x',float), ('y',float)]
# contours = np.ndarray(contours_unsort, dtype=dtype)
# contours.sort(order='x')

newContour_stack = LifoQueue()
numOfContours = len(contours)
for n in range(0, numOfContours):
    #contours[n] = np.array(( [contours[n][0][0], contours[n][0][1]], [contours[n][-1][0], contours[n][-1][1]] ))
    if (n > 0):
        #insert connection line
        newContour_stack.put(
コード例 #15
0
        dd = skimage.measure.regionprops(labels)
        print(len(dd))
        dst = skimage.color.label2rgb(labels)  #根据不同的标记显示不同的颜色
        print('regions number:', labels.max() + 1)  #显示连通区域块数(从0开始标记)
        fig, axi2 = plt.subplots(nrows=1, ncols=1)

        for region in dd:
            # take regions with large enough areas
            if region.area >= 1000:
                # draw rectangle around segmented coins
                minr, minc, maxr, maxc = region.bbox
                rect = cv2.rectangle(dst, (minc, minr), (maxc, maxr),
                                     (0, 0, 255), 2)
                # print('xxx:', region.coords)
                new_xx = subdivide_polygon(region.coords,
                                           degree=5,
                                           preserve_ends=True)
                coords = approximate_polygon(new_xx, tolerance=40)
                print('yyy:', coords)

                axi2.plot(coords[:, 1], coords[:, 0], '-g', linewidth=2)
        cv2.imshow('cc', dst)
        plt.show()

        # approximate / simplify coordinates of the
        # for contour in find_contours(xx, 0):
        #     coords = approximate_polygon(contour, tolerance=2.5)
        #     coords2 = approximate_polygon(contour, tolerance=39.5)
        #     print("Number of coordinates:", len(contour), len(coords), len(coords2))

    cv2.imshow('xx', xx)
コード例 #16
0
ファイル: plot_polygon.py プロジェクト: yuxi5002/skimage
                 [2.01209677, 2.76041667],
                 [1.99193548, 1.99479167],
                 [2.11290323, 2.63020833],
                 [2.2016129, 2.734375],
                 [2.25403226, 2.60416667],
                 [2.14919355, 1.953125],
                 [2.30645161, 2.36979167],
                 [2.39112903, 2.36979167],
                 [2.41532258, 2.1875],
                 [2.1733871, 1.703125],
                 [2.07782258, 1.16666667]])

# subdivide polygon using 2nd degree B-Splines
new_hand = hand.copy()
for _ in range(5):
    new_hand = subdivide_polygon(new_hand, degree=2, preserve_ends=True)

# approximate subdivided polygon with Douglas-Peucker algorithm
appr_hand = approximate_polygon(new_hand, tolerance=0.02)

print("Number of coordinates:", len(hand), len(new_hand), len(appr_hand))

fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9, 4))

ax1.plot(hand[:, 0], hand[:, 1])
ax1.plot(new_hand[:, 0], new_hand[:, 1])
ax1.plot(appr_hand[:, 0], appr_hand[:, 1])


# create two ellipses in image
img = np.zeros((800, 800), 'int32')
コード例 #17
0
def draw_line(img_path, save_path, xy_list):
    '''
    画出物体的边缘,并且填涂颜色
    img_path:
    save_path	
    '''
    from PIL import Image
    from pylab import imshow, save
    from pylab import array
    from pylab import plot
    from pylab import title
    import numpy as np
    import matplotlib.pyplot as plt
    from skimage import measure, data, color
    import cv2

    # 读取图像到数组中
    im = array(Image.open(img_path))

    # 减去自己, 使图像变成黑色,作为背景色,之后根据xy_list的 segmentation 坐标添涂前景颜色
    im = im - im
    imshow(im)

    fig = plt.gcf()
    fig.set_facecolor('black')

    # coco 数据集中有部分图像是 “单通道”
    if len(im.shape) == 3:
        height, width, channels = im.shape
    elif len(im.shape) == 2:
        height, width = im.shape


# 如果dpi=300,那么图像大小=height*width
    fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
    plt.margins(0, 0)
    for i in range(len(xy_list)):
        hand = np.array(xy_list[i])
        new_hand = hand.copy()
        for _ in range(5):
            new_hand = measure.subdivide_polygon(new_hand, degree=2)
        appr_hand = measure.approximate_polygon(new_hand, tolerance=0.02)

        # 画出 segmentation 边界
        plt.plot(new_hand[:, 0],
                 new_hand[:, 1],
                 'r',
                 linewidth=0.5,
                 color='white')
        plt.xticks([])
        plt.yticks([])
        plt.axis('off')
        # 填涂颜色
        plt.fill(new_hand[:, 0], new_hand[:, 1], facecolor='white', alpha=1)
    plt.close()

    fig.savefig(save_path,
                format='jpg',
                transparent=True,
                dpi=300,
                pad_inches=0)
    del (xy_list)
コード例 #18
0
ファイル: op4.py プロジェクト: GanYuLin/opencv-project
hand = np.array([[1.64516129, 1.16145833], [1.64516129, 1.59375],
                 [1.35080645, 1.921875], [1.375, 2.18229167],
                 [1.68548387, 1.9375], [1.60887097, 2.55208333],
                 [1.68548387, 2.69791667], [1.76209677, 2.56770833],
                 [1.83064516, 1.97395833], [1.89516129, 2.75],
                 [1.9516129, 2.84895833], [2.01209677, 2.76041667],
                 [1.99193548, 1.99479167], [2.11290323, 2.63020833],
                 [2.2016129, 2.734375], [2.25403226, 2.60416667],
                 [2.14919355, 1.953125], [2.30645161, 2.36979167],
                 [2.39112903, 2.36979167], [2.41532258, 2.1875],
                 [2.1733871, 1.703125], [2.07782258, 1.16666667]])

#检测所有图形的轮廓
new_hand = hand.copy()
for _ in range(5):
    new_hand = measure.subdivide_polygon(new_hand, degree=2)

# approximate subdivided polygon with Douglas-Peucker algorithm
appr_hand = measure.approximate_polygon(new_hand, tolerance=0.02)

print("Number of coordinates:", len(hand), len(new_hand), len(appr_hand))

fig, axes = plt.subplots(2, 2, figsize=(9, 8))
ax0, ax1, ax2, ax3 = axes.ravel()

ax0.plot(hand[:, 0], hand[:, 1], 'r')
ax0.set_title('original hand')
# plt.show()
ax1.plot(new_hand[:, 0], new_hand[:, 1], 'g')
ax1.set_title('subdivide_polygon')
# plt.show()
コード例 #19
0
ファイル: segmentation.py プロジェクト: free-variation/kraken
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image:
    """
    Yields the subimages of image im defined in the list of bounding polygons
    with baselines preserving order.

    Args:
        im (PIL.Image.Image): Input image
        bounds (list): A list of tuples (x1, y1, x2, y2)

    Yields:
        (PIL.Image) the extracted subimage
    """
    if 'type' in bounds and bounds['type'] == 'baselines':
        old_settings = np.seterr(all='ignore')

        siz = np.array(im.size, dtype=np.float)
        # select proper interpolation scheme depending on shape
        if im.mode == '1':
            order = 0
            im = im.convert('L')
        else:
            order = 1
        im = np.array(im)

        for line in bounds['lines']:
            if not line['boundary']: continue  # AHT
            pl = np.array(line['boundary'])
            #print(im.shape,pl); sys.exit(0) # AHT
            pl = np.clip(pl, [0, 0], [im.shape[1] - 1, im.shape[0] - 1])  #AHT
            baseline = np.array(line['baseline'])
            c_min, c_max = int(pl[:, 0].min()), int(pl[:, 0].max())
            r_min, r_max = int(pl[:, 1].min()), int(pl[:, 1].max())

            if (pl < 0).any() or (pl.max(axis=0)[::-1] >= im.shape[:2]).any():
                raise KrakenInputException(
                    f'Line polygon outside of image bounds')
            if (baseline < 0).any() or (baseline.max(axis=0)[::-1] >=
                                        im.shape[:2]).any():
                raise KrakenInputException('Baseline outside of image bounds')

            # fast path for straight baselines requiring only rotation
            if len(baseline) == 2:
                baseline = baseline.astype(np.float)
                # calculate direction vector
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore', RuntimeWarning)
                    slope, _, _, _, _ = linregress(baseline[:, 0], baseline[:,
                                                                            1])
                if np.isnan(slope):
                    p_dir = np.array([
                        0.,
                        np.sign(np.diff(baseline[(0, -1), 1])).item() * 1.
                    ])
                else:
                    p_dir = np.array([
                        1,
                        np.sign(np.diff(baseline[(0, -1), 0])).item() * slope
                    ])
                    p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2, axis=-1)))
                angle = np.arctan2(p_dir[1], p_dir[0])
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                offset_polygon = pl - (c_min, r_min)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask = np.zeros(patch.shape[:2], dtype=np.bool)
                mask[r, c] = True
                #patch[mask != True] = 0
                patch[mask != True] = 255  # AHT
                extrema = offset_polygon[(0, -1), :]
                # scale line image to max 600 pixel width
                tform, rotated_patch = _rotate(patch,
                                               angle,
                                               center=extrema[0],
                                               scale=1.0,
                                               cval=0)
                i = Image.fromarray(rotated_patch.astype('uint8'))
            # normal slow path with piecewise affine transformation
            else:
                if len(pl) > 50:
                    pl = approximate_polygon(pl, 2)
                full_polygon = subdivide_polygon(pl, preserve_ends=True)
                pl = geom.MultiPoint(full_polygon)

                bl = zip(baseline[:-1:], baseline[1::])
                bl = [geom.LineString(x) for x in bl]
                cum_lens = np.cumsum([0] + [l.length for l in bl])
                # distance of intercept from start point and number of line segment
                control_pts = []
                for point in pl.geoms:
                    npoint = np.array(point)
                    line_idx, dist, intercept = min(
                        ((idx, line.project(point),
                          np.array(line.interpolate(line.project(point))))
                         for idx, line in enumerate(bl)),
                        key=lambda x: np.linalg.norm(npoint - x[2]))
                    # absolute distance from start of line
                    line_dist = cum_lens[line_idx] + dist
                    intercept = np.array(intercept)
                    # side of line the point is at
                    side = np.linalg.det(
                        np.array([[
                            baseline[line_idx + 1][0] - baseline[line_idx][0],
                            npoint[0] - baseline[line_idx][0]
                        ],
                                  [
                                      baseline[line_idx + 1][1] -
                                      baseline[line_idx][1],
                                      npoint[1] - baseline[line_idx][1]
                                  ]]))
                    side = np.sign(side)
                    # signed perpendicular distance from the rectified distance
                    per_dist = side * np.linalg.norm(npoint - intercept)
                    control_pts.append((line_dist, per_dist))
                # calculate baseline destination points
                bl_dst_pts = baseline[0] + np.dstack(
                    (cum_lens, np.zeros_like(cum_lens)))[0]
                # calculate bounding polygon destination points
                pol_dst_pts = np.array([
                    baseline[0] + (line_dist, per_dist)
                    for line_dist, per_dist in control_pts
                ])
                # extract bounding box patch
                c_dst_min, c_dst_max = int(pol_dst_pts[:, 0].min()), int(
                    pol_dst_pts[:, 0].max())
                r_dst_min, r_dst_max = int(pol_dst_pts[:, 1].min()), int(
                    pol_dst_pts[:, 1].max())
                output_shape = np.around(
                    (r_dst_max - r_dst_min + 1, c_dst_max - c_dst_min + 1))
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                # offset src points by patch shape
                offset_polygon = full_polygon - (c_min, r_min)
                offset_baseline = baseline - (c_min, r_min)
                # offset dst point by dst polygon shape
                offset_bl_dst_pts = bl_dst_pts - (c_dst_min, r_dst_min)
                offset_pol_dst_pts = pol_dst_pts - (c_dst_min, r_dst_min)
                # mask out points outside bounding polygon
                mask = np.zeros(patch.shape[:2], dtype=np.bool)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask[r, c] = True
                #patch[mask != True] = 0
                patch[mask != True] = 255  # AHT
                # estimate piecewise transform
                src_points = np.concatenate((offset_baseline, offset_polygon))
                dst_points = np.concatenate(
                    (offset_bl_dst_pts, offset_pol_dst_pts))
                tform = PiecewiseAffineTransform()
                tform.estimate(src_points, dst_points)
                #o = warp(patch, tform.inverse, output_shape=output_shape, preserve_range=True, order=order)
                o = warp(patch,
                         tform.inverse,
                         output_shape=output_shape,
                         preserve_range=True,
                         order=order,
                         mode='edge')  # AHT
                i = Image.fromarray(o.astype('uint8'))
            comp_BB = ImageOps.invert(i.convert('L')).getbbox()  # AHT
            #yield i.crop(i.getbbox()), line
            yield i.crop(comp_BB), line  # AHT
    else:
        if bounds['text_direction'].startswith('vertical'):
            angle = 90
        else:
            angle = 0
        for box in bounds['boxes']:
            if isinstance(box, tuple):
                box = list(box)
            if (box < [0, 0, 0, 0] or box[::2] >= [im.size[0], im.size[0]]
                    or box[1::2] >= [im.size[1], im.size[1]]):
                logger.error('bbox {} is outside of image bounds {}'.format(
                    box, im.size))
                raise KrakenInputException('Line outside of image bounds')
            yield im.crop(box).rotate(angle, expand=True), box
コード例 #20
0
ファイル: load_data.py プロジェクト: AlliedToasters/rim_trace
 def draw_rim(self, crop_off=.25, res=224, rot=0, disp=(0, 0)):
     img = self.cropped
     dim = self.cropped.size[0]
     coords = self.coords
     out = []
     for pnt in coords:
         point = (pnt[1], pnt[0])
         as_radians = rot * math.pi / 180
         point = self.rotate_around_point(point,
                                          as_radians,
                                          origin=(dim / 2, dim / 2))
         out.append((point[1], point[0]))
     coords = out
     img = img.rotate(rot)
     crop_off = int(crop_off * img.size[0])
     img_post_size = img.size[0] - (2 * crop_off)
     res_factor = res / img_post_size
     disp0 = disp[0] / res_factor
     disp1 = disp[1] / res_factor
     img = img.crop(
         (crop_off - disp1, crop_off - disp0,
          img.size[0] - crop_off - disp1, img.size[1] - crop_off - disp0))
     img = img.resize((res, res), resample=PIL.Image.BILINEAR)
     arr = np.array(img)
     coords = [(((x[0] - crop_off) * res_factor) + disp[0],
                ((x[1] - crop_off) * res_factor) + disp[1]) for x in coords]
     target = np.zeros((res, res))
     coords = np.array([list(x) for x in coords])
     coord_groups = []
     mean_d = np.mean(self.d)
     std = np.std(self.d)
     crds = []
     thresh = 2.5
     for i, d in enumerate(self.d):
         if i == 0:
             if d < thresh * mean_d:
                 crds.append(coords[-1])
         if d < thresh * mean_d:
             crds.append(coords[i])
         elif len(crds) > 1:
             crds = np.array([list(x) for x in crds])
             coord_groups.append(crds)
             crds = []
         else:
             pass
     crds = np.array([list(x) for x in crds])
     coord_groups.append(crds)
     for crds in coord_groups:
         new_coords = crds.copy()
         for _ in range(5):
             new_coords = subdivide_polygon(new_coords,
                                            degree=2,
                                            preserve_ends=True)
         crds = new_coords
         rounded = set()
         for crd in crds:
             nxt = (int(round(crd[0])), int(round(crd[1])))
             rounded.add(nxt)
         pxls = (np.array([x[0] for x in rounded]),
                 np.array([x[1] for x in rounded]))
         target[pxls] = 255
     return arr, target
コード例 #21
0
def labels():
    bg_name = "data2/B0FFFF6TJG_1.png"
    data = io.imread(bg_name)
    data_rgb = color.rgba2rgb(data)
    data_gray = color.rgb2gray(data_rgb)  # drop transparent layer

    # crop image
    mask = ~(data_gray == 1)
    mask_points = np.argwhere(mask)
    top,left,bottom,right = np.min(mask_points[:,0]), np.min(mask_points[:,1]), \
        np.max(mask_points[:,0]), np.max(mask_points[:,1])

    top = top - 10 > 0 and top - 10 or 0
    left = left - 10 > 0 and left - 10 or 0
    bottom = bottom + 10 < data.shape[0] and bottom + 10 or data.shape[0]
    right = right + 10 < data.shape[1] and right + 10 or data.shape[1]

    print("top,left,bottom,right", top, left, bottom, right)

    image = data_gray[top:bottom, left:right].copy()
    mask = mask[top:bottom, left:right]
    # print(image[0])

    # (np.isclose(image,0.0)) |
    """
    mask = ((np.isclose(image,0.928392)) | (np.isclose(image,0.844302)) | 
            (np.isclose(image,0.872443)) | (np.isclose(image,0.976991)) |
            (np.isclose(image,0.868027)) | (np.isclose(image,0.868027)) |
            (np.isclose(image,0.901961)) | (np.isclose(image,0.899898)) | 
            (np.isclose(image,0.884697)) |
            (np.isclose(image,0.901674)) | (np.isclose(image,0.8058))
            )
    """
    #mask = ~(image == 1)
    image[mask] = 0
    image[~mask] = 1

    #edgs = feature.canny(image, sigma=3)

    #labels = measure.label(edgs, connectivity=1)  # 8连通区域标记
    #dst = color.label2rgb(labels)  # 根据不同的标记显示不同的颜色
    #print('regions number:', labels.max() + 1)  # 显示连通区域块数(从0开始标记)

    #dst = morphology.convex_hull_object(edgs)

    contours = measure.find_contours(image, 0.5)
    #cords = np.concatenate(contours)

    cordarr = []
    for cords in contours:
        appr_img = measure.subdivide_polygon(cords,
                                             degree=2,
                                             preserve_ends=True)
        appr_img = measure.approximate_polygon(appr_img, tolerance=1)
        appr_img += np.array([top, left])
        cordarr.append(appr_img.tolist())
        print("appr_img:", len(appr_img.tolist()))

    print("cordarr:", len(cordarr))

    f, (ax0, ax1) = plt.subplots(2, figsize=(15, 10))
    ax0.imshow(data)
    ax0.set_title('Input image')
    ax1.imshow(image)
    ax1.set_title('After mask')
    plt.show()