示例#1
0
def vectorize_to_v4_label(
        segmentation_map,
        legend: Dict[int, str],
        max_num_points: Optional[int] = 50) -> DefaultDict[str, List[dict]]:
    """Converts a segmentation map into polygons.

    Given a raster pixel wise array of predictions in `segmentation_map`,
    this method converts it into vectorized polygons suitable for use in as
    predictions in Labelbox image segmentation front ends.

    A pixel value of 0 is used to denote background pixels and
    the remaining pixel values are interpereted following the
    `legend` argument.

    Args:
        segmentation_map: A width x height array.
        legend: A dictonary mapping pixel values
            used in `segmentation_map` to semantic
            class names.
        max_num_points: The maximum number of points in the simplified path.
            If `None`, then no path simplification is performed.

    Returns:
        A dictionary suitable for use as a `prediction`
        in image-segmentation v4 frontend after calling `json.dumps`.
    """
    assert len(segmentation_map.shape) == 2, \
        'Segmentation maps must be numpy arrays with shape (width, height)'
    label: DefaultDict[str, List[dict]] = collections.defaultdict(lambda: [])
    for polygon, pixel_value in rasterio.features.shapes(segmentation_map):
        pixel_value = int(pixel_value)
        # ignore background (denoted by pixel value 0)
        if pixel_value in legend and pixel_value != 0:
            xy_list = polygon['coordinates'][0]

            if max_num_points:
                epsilon = 0.001
                xy_list = simplify_coords(xy_list, epsilon)
                while len(xy_list) > max_num_points:
                    epsilon *= 2
                    xy_list = simplify_coords(xy_list, epsilon)

            geometry = []
            for point in xy_list:
                geometry.append({'x': int(point[0]), 'y': int(point[1])})

            class_name = legend[pixel_value]
            label[class_name].append({'geometry': geometry})
    return label
示例#2
0
def normalize_resample_simplify(strokes, epsilon=1.0, resample_spacing=1.0):
    if len(strokes) == 0:
        raise ValueError('empty image')

    # find min and max
    amin = None
    amax = None
    for x, y, _ in strokes:
        cur_min = [np.min(x), np.min(y)]
        cur_max = [np.max(x), np.max(y)]
        amin = cur_min if amin is None else np.min([amin, cur_min], axis=0)
        amax = cur_max if amax is None else np.max([amax, cur_max], axis=0)

    # drop any drawings that are linear along one axis
    arange = np.array(amax) - np.array(amin)
    if np.min(arange) == 0:
        raise ValueError('bad range of values')

    arange = np.max(arange)
    output = []
    for x, y, _ in strokes:
        xy = np.array([x, y], dtype=float).T
        xy -= amin
        xy *= 255.
        xy /= arange
        resampled = resample(xy[:, 0], xy[:, 1], resample_spacing)
        simplified = simplify_coords(resampled, epsilon)
        xy = np.around(simplified).astype(np.uint8)
        output.append(xy.T.tolist())

    return output
示例#3
0
def geojson_get_height_graph_data(data: GeoJSONRequest):
    _coords = [xy[0:2] for xy in data.dict()['geometry']['coordinates']]
    simplified_track = simplify_coords(_coords, epsilon=0.00003)
    simplified_track = resample_track_list(simplified_track, 300)
    track_elevation = [hi.get_height(y, x) for x, y in simplified_track]
    _coordinates = [(round(pt['lon'], 6), round(pt['lat'],
                                                6), pt['altitude_m'])
                    for pt in track_elevation]
    _lc_values = [
        lc.get_value_at_position(_lat, _lon)
        for _lon, _lat, _altitude_m in _coordinates
    ]
    _features = []
    _track = [_coordinates[0]]
    _current_value = _lc_values[0]
    _number_coords = len(_lc_values)
    for _i in range(1, _number_coords):
        _track.append(_coordinates[_i])
        if _current_value != _lc_values[_i] or _i == _number_coords - 1:
            _feature = Feature(
                geometry=LineString(_track),
                properties={"attributeType": str(_current_value)})
            _features.append(_feature)
            _track = [_coordinates[_i]]
            _current_value = _lc_values[_i]
    _land_cover_feature_collection = FeatureCollection(
        _features, properties={"summary": "land cover"})
    return [_land_cover_feature_collection]
示例#4
0
def convert_mask_to_polygon(mask,
                            max_polygon_points=100,
                            score_threshold=0.5,
                            max_refinement_iterations=25,
                            edge_safety_padding=1):
    """Convert a numpy mask to a polygon outline in normalized coordinates.

    :param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])
    :type: mask: <class 'numpy.array'>
    :param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon
    :type: max_polygon_points: Int
    :param score_threshold: Score cutoff for considering a pixel as in object.
    :type: score_threshold: Float
    :param max_refinement_iterations: Maximum number of times to refine the polygon
    trying to reduce the number of pixels to meet max polygon points.
    :type: max_refinement_iterations: Int
    :param edge_safety_padding: Number of pixels to pad the mask with
    :type edge_safety_padding: Int
    :return: normalized polygon coordinates
    :rtype: list of list
    """
    # Convert to numpy bitmask
    mask = mask[0]
    mask_array = np.array((mask > score_threshold), dtype=np.uint8)
    image_shape = mask_array.shape

    # Pad the mask to avoid errors at the edge of the mask
    embedded_mask = np.zeros((image_shape[0] + 2 * edge_safety_padding,
                              image_shape[1] + 2 * edge_safety_padding),
                             dtype=np.uint8)
    embedded_mask[edge_safety_padding:image_shape[0] + edge_safety_padding,
                  edge_safety_padding:image_shape[1] +
                  edge_safety_padding] = mask_array

    # Find Image Contours
    contours = measure.find_contours(embedded_mask, 0.5)
    simplified_contours = []

    for contour in contours:

        # Iteratively reduce polygon points, if necessary
        if max_polygon_points is not None:
            simplify_factor = 0
            while len(
                    contour
            ) > max_polygon_points and simplify_factor < max_refinement_iterations:
                contour = simplify_coords(contour, simplify_factor)
                simplify_factor += 1

        # Convert to [x, y, x, y, ....] coordinates and correct for padding
        unwrapped_contour = [0] * (2 * len(contour))
        unwrapped_contour[::2] = np.ceil(contour[:, 1]) - edge_safety_padding
        unwrapped_contour[1::2] = np.ceil(contour[:, 0]) - edge_safety_padding

        simplified_contours.append(unwrapped_contour)

    return _normalize_contour(simplified_contours, image_shape)
示例#5
0
def simplify_coordinator(coord_curve, epsilon=0.0001):
    """

    Args:
        coord_curve (list[list[float, float]]): a list of lat, lon coordinates
        epsilon (float):
    Returns:
        list[list[float, float]]
    """
    coord_curve = np.asarray(coord_curve, order='C')
    return simplify_coords(coord_curve, epsilon)
示例#6
0
def data_chart2(dictionary, key):
    start_graph = datetime.now()

    acc2 = key['acc']
    date_from_space = key['date_from']
    date_to_space = key['date_to']
    days_delta = key['days_delta']
    pk_d = dictionary
    dev_rom_id = Devices.objects.filter(dev_id=pk_d).values_list('dev_rom_id',
                                                                 flat=True)[0]
    combined = []
    cursor = connection.cursor()

    query = ("SELECT "
             "maapi_dev_rom_{rom_id}_values.dev_timestamp, "
             "maapi_dev_rom_{rom_id}_values.dev_value "
             "FROM maapi_dev_rom_{rom_id}_values  "
             "WHERE "
             "dev_id={id} "
             "AND dev_timestamp >= '{date_from_space}' "
             "AND dev_timestamp <= '{date_to_space}'  ".format(
                 acc2=acc2,
                 rom_id=dev_rom_id.replace("-", "_"),
                 id=pk_d,
                 date_from_space=date_from_space,
                 date_to_space=date_to_space))
    cursor.execute(query)

    ff = cursor.fetchall()
    f_max = -1000000000000000
    f_min = 100000000000000
    f_avg = 0
    stop_graph = datetime.now()
    start_graph2 = datetime.now()
    if len(list(ff)) <= 0:
        return (0, 0)
    else:
        for f in ff:
            date = int(calendar.timegm((f[0]).timetuple())) * 1000
            value = round(f[1], 1)
            if f_max <= value: f_max = value
            if f_min >= value: f_min = value
            f_avg += value
            combined.append([int(date), value])
        stop_graph2 = datetime.now()
        start_stop = ((stop_graph - start_graph).microseconds) / 1000
        st = ((stop_graph2 - start_graph2).microseconds) / 1000
        fff = simplify_coords(combined, 0.1)
        print(fff)
        return json.dumps(fff), start_stop, st, f_min, f_max, round(
            f_avg / len(list(ff)), 2)
示例#7
0
def Simplify(x,y, stdev,smoothing, MinAmplitude,Sparam):
    Xs = []
    Ys =[]
    Yr=[]
    if np.std(y)> stdev :
        Y = runningMean(y,smoothing)
        if max(Y)-min(Y) > MinAmplitude :
            Yr = (np.array(Y)-min(Y))/(max(Y)-min(Y)) # rescale between 0 and 1 
            coords = list(zip(x,Yr))
            simplified = np.array(simplify_coords(coords, Sparam))
            simplified = simplified[simplified[:,0].argsort()]
            Xs = simplified[:,0]
            Ys = simplified[:,1]
            for i in range(0,len(Xs)-2) : 
                if abs(Xs[i+2]-Xs[i]) < 1000 and abs(Ys[i+2]-Ys[i]) < 0.3 and (abs(Ys[i+2]-Ys[i+1])>0.6 or abs(Ys[i]-Ys[i+1])>0.6):
                    Ys[i+1] = (Ys[i+2]+Ys[i])/2 
    return Xs,Ys,Yr
示例#8
0
    def getcontours(self, pil_image, contour_simplify):
        pil_image = self.find_edges(pil_image)
        IM1 = pil_image.copy()
        IM2 = pil_image.rotate(-90,
                               expand=True).transpose(Image.FLIP_LEFT_RIGHT)
        dots1 = self.getdots(IM1)
        contours1 = self.connectdots(dots1)
        dots2 = self.getdots(IM2)
        contours2 = self.connectdots(dots2)

        for i in range(len(contours2)):
            contours2[i] = [(c[1], c[0]) for c in contours2[i]]
        contours = contours1 + contours2

        for i in range(len(contours)):
            for j in range(len(contours)):
                if len(contours[i]) > 0 and len(contours[j]) > 0:
                    if self.distsum(contours[j][0], contours[i][-1]) < 8:
                        contours[i] = contours[i] + contours[j]
                        contours[j] = []

        for i in range(len(contours)):
            contours[i] = [
                contours[i][j] for j in range(0, len(contours[i]), 8)
            ]

        contours = [c for c in contours if len(c) > 1]

        for i in range(0, len(contours)):
            contours[i] = [(v[0], v[1]) for v in contours[i]]

        for i in range(0, len(contours)):
            for j in range(0, len(contours[i])):
                contours[i][j] = int(contours[i][j][0] +
                                     10 * noise.pnoise3(i * 0.5, j * 0.1, 1)
                                     ), int(contours[i][j][1] + 10 *
                                            noise.pnoise3(i * 0.5, j * 0.1, 2))

        simple_contours = [
            simplify_coords(c, contour_simplify) for c in contours
        ]

        return simple_contours
示例#9
0
def smoothen_triplegs(triplegs, method='douglas-peucker', epsilon=1.0):
    """reduces number of points while retaining structure of tripleg
    Parameters
    ----------
    triplegs: shapely file
        triplegs to be reduced
    method: method used to smoothen
        only one method available so far
    epsilon: float
        slack parameter, higher epsilon means removing more points
    """
    input_copy = copy.deepcopy(triplegs)
    input_copy.geom = [
        LineString(
            ast.literal_eval(
                str(simplify_coords(input_copy.geom[i].coords, epsilon))))
        for i in range(len(input_copy.geom))
    ]
    return input_copy
示例#10
0
def image_idx_Tobox_curve(image, valid):

    idx_valid = np.where(valid)[0] + 1
    bbox_lt = []
    for i in idx_valid:

        _, contours, _ = cv2.findContours((image == i).astype(np.uint8,
                                                              copy=False),
                                          cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

        c = max(contours, key=cv2.contourArea).reshape(-1,
                                                       2).astype(np.float32,
                                                                 copy=False)

        c = simplify_coords(c, 1.0)
        if c.shape[0] < 3:
            continue
        Polygon_c = Polygon(c)

        if not Polygon_c.is_valid:

            Polygon_c = Polygon_c.buffer(0)
            if not Polygon_c.is_valid:
                pass

            if type(Polygon_c) is shapely.geometry.multipolygon.MultiPolygon:

                continue
            try:
                c = np.array(list(Polygon_c.exterior.coords),
                             dtype=np.float32).reshape(-1, 2)
            except:

                continue

        contours = [c]
        bbox_lt.append(contours)

    return bbox_lt
示例#11
0
def draw(contours):
    """
    This function controls your mouse and draws the calculated contours

    Move mouse quickly to the top-left to stop this function
    """
    t0 = time.time()
    for n, contour in enumerate(contours):
        if time.time() - t0 < 80:
            # simplify contours for faster and easier drawing
            contour = simplify_coords(contour, 2.0)

            # 468 and 250 are x,y coordinates for top-left of the drawing space
            pa.moveTo(contour[0][1] + 468, contour[0][0] + 250)

            # draw as much as possible before 80 seconds!
            # to make it look more human, add pauses and speed randomizers
            for x in contour[1:]:
                if time.time() - t0 < 80:
                    pa.dragTo(x[1] + 468, x[0] + 250, 0)
        else:
            break
def prepare_graph_edges(graph: nx.Graph) -> List:
    node, nodes = graph.node, graph.nodes()
    all_coords = []
    # draw edges by pts
    for (s, e) in graph.edges():
        for k in range(len(graph[s][e])):
            ps = graph[s][e][k]['pts']
            coords = []
            start = (int(nodes[s]['o'][1]), int(nodes[s]['o'][0]))
            all_points = set()

            for i in range(1, len(ps)):
                pt1 = (int(ps[i - 1][1]), int(ps[i - 1][0]))
                pt2 = (int(ps[i][1]), int(ps[i][0]))
                if pt1 not in all_points and pt2 not in all_points:
                    coords.append(pt1)
                    all_points.add(pt1)
                    coords.append(pt2)
                    all_points.add(pt2)
            end = (int(nodes[e]['o'][1]), int(nodes[e]['o'][0]))

            same_order = True
            if len(coords) > 1:
                same_order = np.math.hypot(
                    start[0] - coords[0][0],
                    start[1] - coords[0][1]) <= np.math.hypot(
                        end[0] - coords[0][0], end[1] - coords[0][1])
            if same_order:
                coords.insert(0, start)
                coords.append(end)
            else:
                coords.insert(0, end)
                coords.append(start)
            coords = simplify_coords(coords, 2.0)
            all_coords.append(coords)
    return all_coords
def generate_contours(masks, simplify=True):
    """
    Generates contours around masks output by the model
    and also simplifies them using the Ramer-Douglas-Peucker algorithm
    Inputs:
        masks: a numpy array with the masks for each of the instances detected (stacked along first dimension)
    Outputs:
        all_contours: a list of polygons corresponding to each binary mask
    
    NOTE: While tensorflow stacks images along third dimension, pytorch stacks them along first dimension
    so this is NOT exactly the same function as the one used previously in TF-based detection-service

    @author Dinis Gokaydin <*****@*****.**>
    """

    all_contours = []

    N = masks.shape[0]

    for i in range(N):

        # Mask Polygon
        # Pad to ensure proper polygons for masks that touch image edges.
        mask = masks[i, :, :]
        padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2),
                               dtype=np.uint8)
        padded_mask[1:-1, 1:-1] = mask
        contours = find_contours(padded_mask, 0.5)

        if simplify:
            for ind, contour in enumerate(contours):
                contours[ind] = simplify_coords(contour, 5)

        all_contours.append(contours)

    return all_contours
示例#14
0
def ramer_douglas_peucker(df, epsilon=0.00005):
    arr = df[['lon', 'lat']].values.copy(order='C')
    df = pd.DataFrame(simplify_coords(arr, epsilon), columns=['lon', 'lat'])
    return df
示例#15
0
# this tests numpy array simplification using RDP
# 216804 --> 3061 points (98.5% reduction)
# 50ms per VW operation on MBA Core i7

from simplification.cutil import simplify_coords
import json
import numpy as np

with open("simplification/test/coords_complex.json", "r") as f:
    coords = np.array(json.load(f))
for x in range(50):
    simplify_coords(coords, 14.0)
示例#16
0
def get_simplified_track(data: SimplifyRequest):
    input_track = [[_t.lon, _t.lat] for _t in data.track]
    simplified_track = simplify_coords(input_track, epsilon=data.epsilon)
    output_track = [{'lat': y, 'lon': x} for x, y in simplified_track]
    return output_track
示例#17
0
def simpl(stroke):
    return simplify_coords([[s.x, s.y] for s in stroke.segments], 2.0)
示例#18
0
def to_line_strings(mask,
                    sigma=0.5,
                    threashold=0.3,
                    small_obj_size=300,
                    dilation=1):
    mask = gaussian(mask, sigma=sigma)
    mask = mask[..., 0]
    mask[mask < threashold] = 0
    mask[mask >= threashold] = 1
    mask = np.array(mask, dtype="uint8")
    mask = mask[:1300, :1300]
    mask = cv2.copyMakeBorder(mask, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
    if dilation > 0:
        mask = binary_dilation(mask, iterations=dilation)
    mask, _ = ndimage.label(mask)
    mask = remove_small_objects(mask, small_obj_size)
    mask[mask > 0] = 1

    ske = np.array(skeletonize(mask), dtype="uint8")
    ske = ske[8:-8, 8:-8]
    graph = sknw.build_sknw(ske, multi=True)
    line_strings = []
    lines = []
    all_coords = []
    node, nodes = graph.node, graph.nodes()
    # draw edges by pts
    for (s, e) in graph.edges():
        for k in range(len(graph[s][e])):
            ps = graph[s][e][k]['pts']
            coords = []
            start = (int(nodes[s]['o'][1]), int(nodes[s]['o'][0]))
            all_points = set()

            for i in range(1, len(ps)):
                pt1 = (int(ps[i - 1][1]), int(ps[i - 1][0]))
                pt2 = (int(ps[i][1]), int(ps[i][0]))
                if pt1 not in all_points and pt2 not in all_points:
                    coords.append(pt1)
                    all_points.add(pt1)
                    coords.append(pt2)
                    all_points.add(pt2)
            end = (int(nodes[e]['o'][1]), int(nodes[e]['o'][0]))

            same_order = True
            if len(coords) > 1:
                same_order = np.math.hypot(
                    start[0] - coords[0][0],
                    start[1] - coords[0][1]) <= np.math.hypot(
                        end[0] - coords[0][0], end[1] - coords[0][1])
            if same_order:
                coords.insert(0, start)
                coords.append(end)
            else:
                coords.insert(0, end)
                coords.append(start)
            coords = simplify_coords(coords, 2.0)
            all_coords.append(coords)

    for coords in all_coords:
        if len(coords) > 0:
            line_obj = LineString(coords)
            lines.append(line_obj)
            line_string_wkt = line_obj.wkt
            line_strings.append(line_string_wkt)
    new_lines = remove_duplicates(lines)
    new_lines = filter_lines(new_lines, calculate_node_count(new_lines))
    line_strings = [l.wkt for l in new_lines]
    return line_strings
示例#19
0
文件: drawing.py 项目: nptit/kaggle
def simplify_strokes(strokes, epsilon):
    # Ramer-Douglas-Peucker algorithm.
    new_strokes = [simplify_coords(s, epsilon) for s in strokes]
    return new_strokes
示例#20
0
    def main(self, data, simplify_factor=None):
        """
        Hashmap function resolves bookkeeping results to object arcs.

        The hashmap function is the fifth step in the topology computation.
        The following sequence is adopted:
        1. extract
        2. join
        3. cut 
        4. dedup   
        5. hashmap   
 
        Developping Notes:
        * PostGIS Tips for Power Users: http://2010.foss4g.org/presentations/3369.pdf
        """

        # make data available within class
        self.data = data

        # resolve bookkeeping to arcs in objects, including backward check of arcs
        list(self.resolve_objects("arcs", self.data["objects"]))

        # parse the linestrings into list of coordinates
        # only if linestrings are quantized, apply delta encoding.
        if "transform" in data.keys():
            if simplify_factor is not None:
                if simplify_factor >= 1:
                    for idx, ls in enumerate(data["linestrings"]):
                        self.data["linestrings"][idx] = cutil.simplify_coords(
                            np.array(ls), simplify_factor)
                    self.simp = True

            for idx, ls in enumerate(data["linestrings"]):
                if self.simp:
                    ls = ls.astype(int)
                else:
                    ls = np.array(ls).astype(int)
                ls_p1 = copy.copy(ls[0])
                ls -= np.roll(ls, 1, axis=0)
                ls[0] = ls_p1
                self.data["linestrings"][idx] = ls.tolist()

        else:
            if simplify_factor is not None:
                print("xxx")
                if simplify_factor >= 1:
                    for idx, ls in enumerate(data["linestrings"]):
                        self.data["linestrings"][idx] = cutil.simplify_coords(
                            np.array(ls), simplify_factor).tolist()
            else:
                for idx, ls in enumerate(data["linestrings"]):
                    self.data["linestrings"][idx] = np.array(ls).tolist()

        objects = {}
        objects["geometries"] = []
        objects["type"] = "GeometryCollection"
        for idx, feature in enumerate(data["objects"]):
            feat = data["objects"][feature]

            if "geometries" in feat:
                feat["type"] = feat["geometries"][0]["type"]

            if feat["type"] == "Polygon":
                if "geometries" in feat:
                    f_arc = feat["geometries"][0]["arcs"]
                else:
                    f_arc = feat["arcs"]

                feat["arcs"] = f_arc

            if feat["type"] == "MultiPolygon":
                if "geometries" in feat:
                    f_arcs = feat["geometries"][0]["arcs"]
                else:
                    f_arcs["arcs"]
                feat["arcs"] = [[arc] for arc in f_arcs]

            feat.pop("geometries", None)
            objects["geometries"].append(feat)

        data["objects"] = {}
        data["objects"]["data"] = objects

        # prepare to return object
        data = self.data
        data["arcs"] = data["linestrings"]
        del data["linestrings"]
        del data["junctions"]
        del data["bookkeeping_geoms"]
        del data["bookkeeping_duplicates"]
        del data["bookkeeping_arcs"]
        del data["bookkeeping_shared_arcs"]

        return data
示例#21
0
def to_line_strings(mask, sigma=0.5, threashold=0.3, small_obj_size=300, dilation=1):
    mask = gaussian(mask, sigma=sigma)
    mask = mask[..., 0]
    mask[mask < threashold] = 0
    mask[mask >= threashold] = 1
    mask = np.array(mask, dtype="uint8")
    mask = mask[:1300, :1300]
    mask = cv2.copyMakeBorder(mask, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
    if dilation > 0:
        mask = binary_dilation(mask, iterations=dilation)
    mask, _ = ndimage.label(mask)
    mask = remove_small_objects(mask, small_obj_size)
    mask[mask > 0] = 1

    ske = np.array(skeletonize(mask), dtype="uint8")
    ske=ske[8:-8,8:-8]
    graph = sknw.build_sknw(ske, multi=True)
    line_strings = []
    lines = []
    all_coords = []
    node, nodes = graph.node, graph.nodes()
    # draw edges by pts
    for (s, e) in graph.edges():
        for k in range(len(graph[s][e])):
            ps = graph[s][e][k]['pts']
            coords = []
            start = (int(nodes[s]['o'][1]), int(nodes[s]['o'][0]))
            all_points = set()

            for i in range(1, len(ps)):
                pt1 = (int(ps[i - 1][1]), int(ps[i - 1][0]))
                pt2 = (int(ps[i][1]), int(ps[i][0]))
                if pt1 not in all_points and pt2 not in all_points:
                    coords.append(pt1)
                    all_points.add(pt1)
                    coords.append(pt2)
                    all_points.add(pt2)
            end = (int(nodes[e]['o'][1]), int(nodes[e]['o'][0]))

            same_order = True
            if len(coords) > 1:
                same_order = np.math.hypot(start[0] - coords[0][0], start[1] - coords[0][1]) <= np.math.hypot(end[0] - coords[0][0], end[1] - coords[0][1])
            if same_order:
                coords.insert(0, start)
                coords.append(end)
            else:
                coords.insert(0, end)
                coords.append(start)
            coords = simplify_coords(coords, 2.0)
            all_coords.append(coords)

    for coords in all_coords:
        if len(coords) > 0:
            line_obj = LineString(coords)
            lines.append(line_obj)
            line_string_wkt = line_obj.wkt
            line_strings.append(line_string_wkt)
    new_lines = remove_duplicates(lines)
    new_lines = filter_lines(new_lines, calculate_node_count(new_lines))
    line_strings = [ l.wkt for l in new_lines]
    return line_strings
示例#22
0
def vectorize(blob):
    (skel, dist), path = blob

    logger.info(f"Vectorizing {path}...")
    graph = sknw.build_sknw(skel)
    df = gpd.GeoDataFrame(columns=['geometry', 'width'])

    ul, lr = get_geotiff_bbox(path)
    dy = ul[0] - lr[0]
    dx = lr[1] - ul[1]

    for s, e in graph.edges():
        H, W = dist.shape
        widths = [
            dist[tuple(pt)] for pt in graph[s][e]['pts'].astype(np.int32)
        ]
        width = np.percentile(widths, 33) / H * dy

    graph[s][e]['pts'] = simplify_coords(graph[s][e]['pts'], 2.0)

    # Pop short stubs
    pops = []
    for (s, e) in graph.edges():
        if graph[s][e]['weight'] < 10 and (len(graph[s]) == 1
                                           or len(graph[e]) == 1):
            pops += [(s, e)]
    for s, e in pops:
        graph.remove_edge(s, e)

    # Pop orphaned nodes
    pops = []
    for node in graph.nodes():
        if len(graph[node]) == 0:
            pops += [node]
    for node in pops:
        graph.remove_node(node)

    # Bind nearby point pairs
    for n1, n2 in combinations(graph.node, 2):
        diff = la.norm(graph.node[n1]['o'] - graph.node[n2]['o'])
        if diff < 20 and n2 not in graph[n1]:
            pts = np.zeros((3, 2))
            pts[0, :] = graph.node[n1]['o']
            pts[1, :] = (graph.node[n1]['o'] + graph.node[n2]['o']) / 2
            pts[2, :] = graph.node[n2]['o']
            graph.add_edge(n1, n2, pts=pts, weight=diff)

    # Bind points along a line
    for n1 in graph.node:
        pushes = []
        for n2 in graph[n1]:
            for n3 in graph.node:
                vec1 = graph.node[n1]['o'] - graph.node[n2]['o']
                vec2 = graph.node[n3]['o'] - graph.node[n1]['o']
                diff = la.norm(vec2)
                vec1 /= la.norm(vec1)
                vec2 /= diff
                try:
                    angle = np.arccos(vec1 @ vec2) * 180 / np.pi
                except:
                    logger.critical("Could not compute points on vector!")
                    angle = 90
                if abs(angle % 360) < 2 and diff < 150:
                    pts = np.zeros((3, 2))
                    pts[0, :] = graph.node[n1]['o']
                    pts[1, :] = (graph.node[n1]['o'] + graph.node[n3]['o']) / 2
                    pts[2, :] = graph.node[n3]['o']
                    pushes += [(n1, n3, pts, diff)]
        for n1, n2, pts, diff in pushes:
            graph.add_edge(n1, n2, pts=pts, weight=diff)

    for s, e in graph.edges():
        N = len(graph[s][e]['pts']) + 2
        pts = np.zeros((N, 2), dtype=np.float64)
        pts[0] = graph.node[s]['o']
        pts[-1] = graph.node[e]['o']
        pts[1:-1] = graph[s][e]['pts']

        pts[:, 0] = 1 - pts[:, 0] / H
        pts[:, 0] *= dy
        pts[:, 0] += lr[0]

        pts[:, 1] = pts[:, 1] / W
        pts[:, 1] *= dx
        pts[:, 1] += ul[1]

        df = df.append({
            'geometry': LineString(pts[:, ::-1]),
            'width': width
        },
                       ignore_index=True)
    return df
 def simplify(self):
     new_list = list()
     for linestring in self.linestring_list:
         if len(linestring) > 2:
             new_list.append(simplify_coords(linestring, epsilon=1.0))
     self.linestring_list = new_list
示例#24
0
文件: pagerender.py 项目: gbatiz/remy
 def simpl(stroke, tolerance=10.0):
     return simplify_coords([[s.x, s.y] for s in stroke.segments],
                            tolerance)
示例#25
0
    def randomWalk(self, image):
        """
        actual calculations
        :param image: bitmap to squigglify
        :return:
        """
        self.removeOldGraphicsItems()
        group = QGraphicsItemGroup()
        no_of_walks = self.parent.noOfWalksWalkify.value()
        coordinates = {}
        self.applyThreshold(image)
        for w in range(no_of_walks):
            x, y = self.find_darkest(image)
            x, y, color = self.find_darkest_neighbor(image, x, y)
            coordinates[w] = np.array([[x, y]])
            no_of_line_segments = self.parent.noOfLineSegmentsWalkify.value()
            adjustbrightness = self.parent.localBrightnessAdjustmentWalkify.value(
            )
            stroke_width = self.parent.lineWidthWalkify.value()
            for s in range(0, no_of_line_segments):
                dx, dy, dc = self.find_darkest_neighbor(image, x, y)
                self.lighten_area_around(image, adjustbrightness, dx, dy)
                x, y = dx, dy
                coordinates[w] = np.append(coordinates[w], [[x, y]], axis=0)

        for w in range(no_of_walks):
            coordinates[w] = simplify_coords(
                coordinates[w],
                self.parent.polylineSimplificationToleranceWalkify.value())

        for w in range(no_of_walks):
            path = QPainterPath()
            in_the_middle_of_a_quad = False
            for idx, c in enumerate(coordinates[w]):
                quad = self.parent.useSmootherShapesWalkify.checkState(
                ) == Qt.Checked
                if not quad:
                    if idx == 0:
                        path.moveTo(coordinates[w][idx][0],
                                    coordinates[w][idx][1])
                    else:
                        path.lineTo(coordinates[w][idx][0],
                                    coordinates[w][idx][1])
                else:
                    if idx == 0:
                        path.moveTo(coordinates[w][idx][0],
                                    coordinates[w][idx][1])
                    elif idx % 2 == 1:
                        middlex, middley = coordinates[w][idx][0], coordinates[
                            w][idx][1]
                        in_the_middle_of_a_quad = True
                    else:
                        path.quadTo(middlex, middley, coordinates[w][idx][0],
                                    coordinates[w][idx][1])
                        in_the_middle_of_a_quad = False

            if in_the_middle_of_a_quad:
                path.lineTo(middlex, middley)

            item = QGraphicsPathItem(path)
            pen = QPen()
            pen.setWidth(stroke_width)
            item.setPen(pen)
            group.addToGroup(item)

        self.addNewGraphicsItems(group)
示例#26
0
文件: robotGrid.py 项目: sdss/kaiju
    def getPathPair(
        self, speed=2, smoothPoints=5, collisionShrink=0.05, pathDelay=1,
        epsilon=None
    ):
        """
        Get paths in format that jaeger expects.  No checking is done, so
        whoever calls this should check things on the RobotGrid
        and decide what to do next.

        Parameters
        -----------
        speed: float
            RPM at output, how fast robots move, max speed is 3
        smoothPoints: int
            window width for smoothing a path's velocity profile,
            in units of steps.  Smooths out fast switching between forward
            and reverse moves on the axes.
        collisionShrink: float
            mm, how much to decrease the collision buffer to allow
            for smoothing
        pathDely: float
            seconds, how far in the future to put the first point, this
            allows a robot to "catch up" to the expected starting point
            for the path, if it's not there already.

        Returns
        ---------
        toDestination: dict
            alpha/beta points in time for all robots.  Path begins at robot
            grid's initialized state, moving toward destination state.

        fromDestination: dict
            alpha/beta points in time for all robots.  Path begins at robot
            grid's destination state, moving toward the initialized state.
            This is just simply a reversed version of toDestination.
        """
        if epsilon is None:
            epsilon = self.epsilon

        toDestination = {}
        fromDestination = {}

        for r in self.robotDict.values():
            # if robot is offline, don't get a path for it
            if r.isOffline:
                continue

            ap = [x[1] for x in r.alphaPath]
            bp = [x[1] for x in r.betaPath]
            # buffer ends
            ap = np.array([[ap[0]]*smoothPoints + ap + [ap[-1]]*smoothPoints]).flatten()
            bp = np.array([[bp[0]]*smoothPoints + bp + [bp[-1]]*smoothPoints]).flatten()
            steps = np.arange(len(ap))
            # smooth
            aps = savgol_filter(ap, smoothPoints, polyorder=3)
            bps = savgol_filter(bp, smoothPoints, polyorder=3)
            # import pdb; pdb.set_trace()
            # simplify
            apss = simplify_coords(np.array([steps, aps]).T, epsilon)
            bpss = simplify_coords(np.array([steps, bps]).T, epsilon)
            # linearly interpolate back to original density
            # (for collision checking after smoothing/simplifying)
            apssi = np.interp(steps, apss[:,0], apss[:,1])
            bpssi = np.interp(steps, bpss[:,0], bpss[:,1])
            # set on the robot object for checking for collisions
            r.interpSimplifiedAlphaPath = [[t,x] for t,x in zip(steps, apssi)]
            r.interpSimplifiedBetaPath = [[t,x] for t,x in zip(steps, bpssi)]


            armPathToDest = {}
            armPathFromDest = {}

            for axis, data in zip(["alpha", "beta"], [apss, bpss]):
                # to destination path
                times = data[:, 0] * self.stepSize / (speed * 360 / 60.)
                angle = data[:, 1]
                armPathToDest[axis] = [(pos, time + pathDelay) for pos, time in zip(angle, times)]

                # from destination path, a reverse of the same thing
                timesR = np.abs(times - times[-1])[::-1]
                angleR = angle[::-1]
                armPathFromDest[axis] = [(pos, time + pathDelay) for pos, time in zip(angleR, timesR)]

            toDestination[r.id] = armPathToDest
            fromDestination[r.id] = armPathFromDest

        # self.shrinkCollisionBuffer(collisionShrink)
        # self.verifySmoothed(len(steps))
        # self.growCollisionBuffer(collisionShrink)

        return toDestination, fromDestination
示例#27
0
def get_edge(i_url, bbox):

    err_val = 0
    img = cv2.imread(i_url)
    #cv2.cvtColor(img, img, cv2.COLOR_RGB2BGR)

    x0 = max(int(bbox[0]), 0)
    y0 = max(int(bbox[1]), 0)
    w = max(int(bbox[2]), 0)
    h = max(int(bbox[3]), 0)

    img = img[y0:y0 + h, x0:x0 + w]

    # img= cv2.copyMakeBorder(img,0,0,10,10,cv2.BORDER_REPLICATE)
    img = cv2.resize(img, (256, 960))
    img = torch.from_numpy(img)

    # img = torch.cat(img)
    try:
        img = img.view(-1, 256, 960, 3)
    except:
        err_val = 1
        return i_url, [], [], err_val

    img = torch.transpose(img, 1, 3)
    img = torch.transpose(img, 2, 3)
    img = img.float()

    edge_logits, tg2, class_prob = model(img.to(device))
    edge_logits = torch.sigmoid(edge_logits)

    edge_logits = edge_logits[0, 0, :, :].cpu().detach().numpy()
    # print(len(edge_logits))
    arrs1 = np.zeros((24, 90), np.uint8)

    for j in range(len(edge_logits)):
        for k in range((len(edge_logits[j]))):
            j1 = math.floor(j)
            k1 = math.floor(k)
            if edge_logits[j][k] > 0.6:
                arrs1[j1 + 2][k1 + 5] = 255

    borders = np.zeros((24, 90), np.uint8)
    kernel5 = np.ones((3, 3), np.uint8)

    arrs1 = cv2.morphologyEx(arrs1, cv2.MORPH_CLOSE, kernel5)

    contours, hierarchy = cv2.findContours(arrs1, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(borders, contours, -1, 255, 1)

    arrs1 = np.zeros((24, 90), np.float32)
    for j in range(len(borders)):
        for k in range((len(borders[j]))):
            j1 = math.floor(j)
            k1 = math.floor(k)
            if borders[j][k] > 180:
                arrs1[j1][k1] = 1.0

    arrs1 = torch.from_numpy(arrs1)
    hull, err_val = get_hull(arrs1)
    if (err_val == 1):
        return i_url, [], [], err_val

    hull = np.asarray(hull)

    hull = simplify_coords(hull, 0.1)

    hull = hull.tolist()

    original_hull = convert_hull_to_cv(hull, w, h)

    total_points = 200
    # original_hull = uniformsample(np.asarray(original_hull), total_points).astype(int)
    original_hull = np.asarray(original_hull).astype(int)
    all_points_x = np.int32((original_hull[:, 0] + x0)).tolist()
    all_points_y = np.int32((original_hull[:, 1] + y0)).tolist()

    # all_points_x = simplify_coords_vw(all_points_x, 30.0)
    # all_points_y = simplify_coords_vw(all_points_y, 30.0)

    return i_url, all_points_x, all_points_y, err_val