コード例 #1
0
ファイル: utils.py プロジェクト: petrabarus/angkot
def to_geojson(obj):
    import geojson
    from shapely.geometry import asMultiLineString

    if obj.route is not None:
        p = asMultiLineString(obj.route)
        geometry = geojson.loads(geojson.dumps(p))
    else:
        geometry = {
            'type': 'MultiLineString',
            'coordinates': []
        }

    return {
        'type': 'Feature',
        'properties': {
            'province': obj.province,
            'city': obj.city,
            'company': obj.company,
            'number': obj.number,
            'origin': obj.origin,
            'destination': obj.destination,
            'accept': [],
        },
        'geometry': geometry
    }
コード例 #2
0
ファイル: rendered_scene.py プロジェクト: LoicGoulefert/lines
 def mls(self) -> MultiLineString:
     """
     Return vector data as a shapely.geometry.MultiLineString object
     :return: the MultiLineString object
     """
     if self._mls is None:
         self._mls = asMultiLineString(self._vectors)
     return self._mls
コード例 #3
0
    def test_numpy_adapter(self):
        from numpy import array
        from numpy.testing import assert_array_equal

        # Adapt a sequence of Numpy arrays to a multilinestring
        a = [array(((1.0, 2.0), (3.0, 4.0)))]
        geoma = asMultiLineString(a)
        assert_array_equal(geoma.context, [array([[1., 2.], [3., 4.]])])
        self.assertEqual(dump_coords(geoma), [[(1.0, 2.0), (3.0, 4.0)]])
コード例 #4
0
ファイル: ops.py プロジェクト: rtorresca/Shapely
    def linemerge(self, lines):
        """Merges all connected lines from a source

        The source may be a MultiLineString, a sequence of LineString objects,
        or a sequence of objects than can be adapted to LineStrings.  Returns a
        LineString or MultiLineString when lines are not contiguous.
        """
        source = None
        if hasattr(lines, 'type') and lines.type == 'MultiLineString':
            source = lines
        elif hasattr(lines, '__iter__'):
            try:
                source = asMultiLineString([ls.coords for ls in lines])
            except AttributeError:
                source = asMultiLineString(lines)
        if source is None:
            raise ValueError("Cannot linemerge %s" % lines)
        result = lgeos.GEOSLineMerge(source._geom)
        return geom_factory(result)
コード例 #5
0
ファイル: ops.py プロジェクト: gepcel/Shapely
    def linemerge(self, lines):
        """Merges all connected lines from a source

        The source may be a MultiLineString, a sequence of LineString objects,
        or a sequence of objects than can be adapted to LineStrings.  Returns a
        LineString or MultiLineString when lines are not contiguous.
        """
        source = None
        if hasattr(lines, "type") and lines.type == "MultiLineString":
            source = lines
        elif hasattr(lines, "__iter__"):
            try:
                source = asMultiLineString([ls.coords for ls in lines])
            except AttributeError:
                source = asMultiLineString(lines)
        if source is None:
            raise ValueError("Cannot linemerge %s" % lines)
        result = lgeos.GEOSLineMerge(source._geom)
        return geom_factory(result)
コード例 #6
0
    def test_numpy(self):

        from numpy import array
        from numpy.testing import assert_array_equal

        # Construct from a numpy array
        geom = MultiLineString([array(((0.0, 0.0), (1.0, 2.0)))])
        self.assertIsInstance(geom, MultiLineString)
        self.assertEqual(len(geom.geoms), 1)
        self.assertEqual(dump_coords(geom), [[(0.0, 0.0), (1.0, 2.0)]])

        # Adapt a sequence of Numpy arrays to a multilinestring
        a = [array(((1.0, 2.0), (3.0, 4.0)))]
        geoma = asMultiLineString(a)
        assert_array_equal(geoma.context, [array([[1., 2.], [3., 4.]])])
        self.assertEqual(dump_coords(geoma), [[(1.0, 2.0), (3.0, 4.0)]])
コード例 #7
0
    def test_numpy(self):

        from numpy import array
        from numpy.testing import assert_array_equal

        # Construct from a numpy array
        geom = MultiLineString([array(((0.0, 0.0), (1.0, 2.0)))])
        self.assertIsInstance(geom, MultiLineString)
        self.assertEqual(len(geom.geoms), 1)
        self.assertEqual(dump_coords(geom), [[(0.0, 0.0), (1.0, 2.0)]])

        # Adapt a sequence of Numpy arrays to a multilinestring
        a = [array(((1.0, 2.0), (3.0, 4.0)))]
        geoma = asMultiLineString(a)
        assert_array_equal(geoma.context, [array([[1., 2.], [3., 4.]])])
        self.assertEqual(dump_coords(geoma), [[(1.0, 2.0), (3.0, 4.0)]])
コード例 #8
0
ファイル: hatched.py プロジェクト: mintyPT/hatched
def hatch(
    file_path: str,
    hatch_pitch: int = 5,
    levels: Tuple[int, int, int] = (64, 128, 192),
    blur_radius: int = 10,
    image_scale: float = 1.0,
    interpolation: int = cv2.INTER_LINEAR,
    h_mirror: bool = False,
    invert: bool = False,
    circular: bool = False,
) -> None:
    """
    Create hatched shading vector for an image, display it and save it to svg.
    :param file_path: input image path
    :param hatch_pitch: hatching pitch in pixel (correspond to the densest possible hatching)
    :param levels: pixel value of the 3 threshold between black, dark, light and white (0-255)
    :param blur_radius: blurring radius to apply on the input image (0 to disable)
    :param image_scale: scale factor to apply on the image before processing
    :param interpolation: interpolation to apply for scaling (typically either
        `cv2.INTER_LINEAR` or `cv2.INTER_NEAREST`)
    :param h_mirror: apply horizontal mirror on the image if True
    :param invert: invert pixel value of the input image before processing (in this case, the
        level thresholds are inverted as well)
    :param circular: use circular hatching instead of diagonal
    :return:
    """

    # Load the image, resize it and apply blur
    img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
    scale_x = int(img.shape[1] * image_scale)
    scale_y = int(img.shape[0] * image_scale)
    img = cv2.resize(img, (scale_x, scale_y), interpolation=interpolation)
    if blur_radius > 0:
        img = cv2.blur(img, (blur_radius, blur_radius))
    h, w = img.shape

    if h_mirror:
        img = np.flip(img, axis=1)

    if invert:
        img = 255 - img
        levels = tuple(255 - i for i in reversed(levels))

    # border for contours to be closed shapes
    r = np.zeros(shape=(img.shape[0] + 2, img.shape[1] + 2))
    r[1:-1, 1:-1] = img

    # Find contours at a constant value of 0.8
    black_cnt = measure.find_contours(r, levels[0])
    dark_cnt = measure.find_contours(r, levels[1])
    light_cnt = measure.find_contours(r, levels[2])

    light_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))
    dark_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))
    black_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))

    try:
        black_p = build_mask(black_cnt)
        dark_p = build_mask(dark_cnt)
        light_p = build_mask(light_cnt)

        if circular:
            build_func = build_circular_hatch
        else:
            build_func = build_hatch

        light_lines = build_func(4 * hatch_pitch, 0, w, h)
        dark_lines = build_func(4 * hatch_pitch, 2 * hatch_pitch, w, h)
        black_lines = build_func(2 * hatch_pitch, hatch_pitch, w, h)

        frame = Polygon([(3, 3), (w - 6, 3), (w - 6, h - 6), (3, h - 6)])

        light_mls = shapely.ops.linemerge(
            asMultiLineString(light_lines).difference(light_p).intersection(
                frame))
        dark_mls = shapely.ops.linemerge(
            asMultiLineString(dark_lines).difference(dark_p).intersection(
                frame))
        black_mls = shapely.ops.linemerge(
            asMultiLineString(black_lines).difference(black_p).intersection(
                frame))
    except Exception as exc:
        print(f"Error: {exc}")

    # save vector data to svg file
    save_to_svg(
        os.path.splitext(file_path)[0] + ".svg", w, h,
        [light_mls, dark_mls, black_mls])

    # Plot everything
    # ===============

    plt.subplot(1, 2, 1)
    plt.imshow(img, cmap=plt.cm.gray)

    # noinspection PyShadowingNames
    def plot_cnt(contours, spec):
        for cnt in contours:
            plt.plot(cnt[:, 1], cnt[:, 0], spec, linewidth=2)

    plot_cnt(black_cnt, "b-")
    plot_cnt(dark_cnt, "g-")
    plot_cnt(light_cnt, "r-")

    plt.subplot(1, 2, 2)

    if invert:
        # plt.style.use('dark_background')
        plt.gca().set_facecolor((0, 0, 0))
        spec = "w-"
    else:
        spec = "k-"

    for mls in [light_mls, dark_mls, black_mls]:
        for ls in mls:
            plt.plot(ls.xy[0], h - np.array(ls.xy[1]), spec, lw=0.3)

    # for ls in light_p.boundary:
    #     plt.plot(ls.xy[0], h - np.array(ls.xy[1]), "r-", lw=0.3)

    plt.axis("equal")
    plt.xticks([])
    plt.yticks([])

    plt.show()
コード例 #9
0
def calc_dist(in_pts_file, in_net_file, out_path, out_name):
    
    # Create shapefile from csv file
    start_time = time.time()
    shapeout = in_pts_file.replace(".csv","_fiona.shp")
    yourschema =  {'geometry': 'Point',
                   'properties': {'point_id': 'int','long': 'float','lat': 'float'}}
    try:
        with fiona.open(shapeout, 'w',crs=fiona.crs.from_epsg(4326),driver='ESRI Shapefile', schema=yourschema) as output:
            reader = pd.read_csv(in_pts_file)
            count = 0
            for index, row in reader.iterrows():
                # geometry       
                tmp_point = sg.Point(float(row['long']), float(row['lat']))
                # attributes
                prop = {'point_id': int(count),'long': float(row['long']),'lat': float(row['lat'])}
                # write the row (geometry + attributes in GeoJSON format)
                output.write({'geometry': sg.mapping(tmp_point), 'properties':prop})
                count += 1
            del row, reader
            output.close()
            print(output.closed)
    except:
        print(output.closed)
    print("---Creating a shapefile takes %s seconds for %s points---" % ((time.time() - start_time),count))
    
    # Convert crs
    start_time = time.time()
    points = geopandas.read_file(shapeout)
    # change CRS to epsg 6350: NAD83
    points = points.to_crs({'init': 'epsg:6350'})
    points.to_file(shapeout.replace(".shp", "_NAD83.shp"))
    print("---Projecting a shapefile takes %s seconds for %sMB file---" % ((time.time() - start_time),
                                                                          int(os.path.getsize(shapeout)*1e-6)))
    
    # Read network file
    start_time = time.time()
    network = geopandas.read_file(in_net_file)
    # change CRS to epsg 6350: NAD83
    network = network.to_crs({'init': 'epsg:6350'})
    network.to_file(in_net_file.replace(".shp", "_NAD83.shp"))
    print("---Projecting a shapefile takes %s seconds for %sMB file---" % ((time.time() - start_time),
                                                                          int(os.path.getsize(in_net_file)*1e-6)))
    net_lines = network['geometry']
    line_output = asMultiLineString(net_lines)
    net_shply = sg.MultiLineString(line_output)
    print("Network converted to Shapely geometry object.")
    
    # Calculate distance
    start_time = time.time()
    distance = []
    count = 0
    with fiona.open(shapeout.replace(".shp", "_NAD83.shp")) as coords:
        for feature in coords:
            geom = sg.shape(feature["geometry"])
            distance.append( geom.distance(net_shply) )
            count += 1
    print("---Distance calculation takes %s seconds for %s points and %sMB network file---" 
          % ((time.time() - start_time),count,int(os.path.getsize(in_net_file)*1e-6)))
    
    # Store distance to csv
    with open(out_path + '\\' + out_name, 'w', newline='') as myfile: 
        w = csv.writer(myfile, delimiter=',')
        w.writerows(zip(distance)) 
    myfile.close()
    print("Distance calculated and stored!")
コード例 #10
0
def test_multilinestring_adapter_deprecated():
    coords = [[[5.0, 6.0], [7.0, 8.0]]]
    with pytest.warns(ShapelyDeprecationWarning, match="proxy geometries"):
        asMultiLineString(coords)
コード例 #11
0
ファイル: cut.py プロジェクト: olehb/topojson
    def _cutter(self, data):
        """
        Entry point for the class Cut.

        The cut function is the third step in the topology computation.
        The following sequence is adopted:
        1. extract
        2. join
        3. cut
        4. dedup
        5. hashmap

        Parameters
        ----------
        data : dict
            object created by the method topojson.join.

        Returns
        -------
        dict
            object updated and expanded with
            - updated key: linestrings
            - new key: bookkeeping_duplicates
            - new key: bookkeeping_linestrings
        """

        if data["junctions"]:
            # split each feature given the intersections
            # prepare the junctions as a 2d coordinate array
            mp = data["junctions"]
            if isinstance(mp, geometry.Point):
                mp = geometry.MultiPoint([mp])

            # create spatial index on junctions
            tree_splitter = STRtree(mp)
            slist = []
            # junctions are only existing in coordinates of linestring
            if self.options.shared_coords:
                for ls in data["linestrings"]:
                    line, splitter = np_array_bbox_points_line(ls, tree_splitter)
                    # prev function returns None for splitter if there is nothing to split
                    if splitter is not None:
                        slines = fast_split(line, splitter)
                        slist.append(list(geometry.asMultiLineString(slines)))
                    else:
                        slist.append([ls])

            # junctions can exist between existing coords of linestring
            else:
                for ls in data["linestrings"]:
                    # slines = split(ls, mp)
                    line, splitter = insert_coords_in_line(ls, tree_splitter)
                    # prev function returns None for splitter if there is nothing to split
                    if splitter is not None:
                        slines = fast_split(line, splitter)
                        slist.append(list(geometry.MultiLineString(slines)))
                    else:
                        slist.append([ls])

            # flatten the splitted linestrings, create bookkeeping_geoms array
            # and find duplicates
            self._segments_list, bk_array = self._flatten_and_index(slist)
            self._duplicates = find_duplicates(self._segments_list)
            self._bookkeeping_linestrings = bk_array.astype(float)

        elif data["bookkeeping_geoms"]:
            bk_array = np_array_from_lists(data["bookkeeping_geoms"]).ravel()
            bk_array = np.expand_dims(
                bk_array[~np.isnan(bk_array)].astype(np.int64), axis=1
            )
            self._segments_list = data["linestrings"]
            self._duplicates = find_duplicates(data["linestrings"])
            self._bookkeeping_linestrings = bk_array

        else:
            self._segments_list = data["linestrings"]

        # prepare to return object
        data["linestrings"] = self._segments_list
        data["bookkeeping_duplicates"] = self._duplicates
        data["bookkeeping_linestrings"] = self._bookkeeping_linestrings

        return data
コード例 #12
0
    def joiner(self, data):
        """
        Entry point for the class Join. This function identiefs junctions
        (intersection points) of shared paths.

        The join function is the second step in the topology computation.
        The following sequence is adopted:
        1. extract
        2. join
        3. cut
        4. dedup
        5. hashmap

        Detects the junctions of shared paths from the specified hash of linestrings.

        After decomposing all geometric objects into linestrings it is necessary to
        detect the junctions or start and end-points of shared paths so these paths can
        be 'merged' in the next step. Merge is quoted as in fact only one of the
        shared path is kept and the other path is removed.

        Parameters
        ----------
        data : dict
            object created by the method topojson.extract.
        quant_factor : int, optional (default: None)
            quantization factor, used to constrain float numbers to integer values.
            - Use 1e4 for 5 valued values (00001-99999)
            - Use 1e6 for 7 valued values (0000001-9999999)

        Returns
        -------
        dict
            object expanded with
            - new key: junctions
            - new key: transform (if quant_factor is not None)
        """

        # presimplify linestrings if required
        if self.options.presimplify > 0:
            # set default if not specifically given in the options
            if type(self.options.presimplify) == bool:
                simplify_factor = 2
            else:
                simplify_factor = self.options.presimplify

            data["linestrings"] = simplify(
                data["linestrings"],
                simplify_factor,
                algorithm=self.options.simplify_algorithm,
                package=self.options.simplify_with,
                input_as="linestring",
            )

        # compute the bounding box of input geometry
        lsbs = geometry.asMultiLineString(data["linestrings"]).bounds
        ptbs = geometry.asMultiPoint(data["coordinates"]).bounds
        data["bbox"] = compare_bounds(lsbs, ptbs)

        # prequantize linestrings if required
        if self.options.prequantize > 0:
            # set default if not specifically given in the options
            if type(self.options.prequantize) == bool:
                quant_factor = 1e6
            else:
                quant_factor = self.options.prequantize

            data["linestrings"], data["transform"] = quantize(
                data["linestrings"], data["bbox"], quant_factor)

            data["coordinates"], data["transform"] = quantize(
                data["coordinates"], data["bbox"], quant_factor)

        if not self.options.topology or not data["linestrings"]:
            data["junctions"] = self.junctions
            return data

        if self.options.shared_paths == "coords":

            def _get_verts(geom):
                # get coords of each LineString
                return [x for x in geom.coords]

            geoms = {}
            junctions = []

            for ls in data["linestrings"]:
                verts = _get_verts(ls)
                for i, vert in enumerate(verts):
                    ran = geoms.pop(vert, None)
                    neighs = sorted([
                        verts[i - 1], verts[i + 1 if i < len(verts) - 1 else 0]
                    ])
                    if ran and ran != neighs:
                        junctions.append(vert)
                    geoms[vert] = neighs

            self.junctions = [geometry.Point(xy) for xy in set(junctions)]
        else:

            # create list with unique combinations of lines using a rdtree
            line_combs = select_unique_combs(data["linestrings"])

            # iterate over index combinations
            for i1, i2 in line_combs:
                g1 = data["linestrings"][i1]
                g2 = data["linestrings"][i2]

                # check if geometry are equal
                # being equal meaning the geometry object coincide with each other.
                # a rotated polygon or reversed linestring are both considered equal.
                if not g1.equals(g2):
                    # geoms are unique, let's find junctions
                    self.shared_segs(g1, g2)

            # self.segments are nested lists of LineStrings, get coordinates of each nest
            s_coords = []
            for segment in self.segments:
                s_coords.extend([[(x.xy[0][y], x.xy[1][y]) for x in segment
                                  for y in range(len(x.xy[0]))]])
                # s_coords.extend([[y for x in segment for y in list(x.coords)]])

            # only keep junctions that appear only once in each segment (nested list)
            # coordinates that appear multiple times are not junctions
            for coords in s_coords:
                self.junctions.extend([
                    geometry.Point(i) for i in coords if coords.count(i) == 1
                ])

            # junctions can appear multiple times in multiple segments, remove duplicates
            self.junctions = [
                loads(xy) for xy in list(set([x.wkb for x in self.junctions]))
            ]

        # prepare to return object
        data["junctions"] = self.junctions

        return data
コード例 #13
0
def _build_hatch(
    img: np.ndarray,
    hatch_pitch: float = 5.0,
    levels: Tuple[int, int, int] = (64, 128, 192),
    circular: bool = False,
    invert: bool = False,
) -> Tuple[MultiLineString, Any, Any, Any]:
    if invert:
        levels = tuple(255 - i for i in reversed(levels))

    h, w = img.shape

    # border for contours to be closed shapes
    r = np.zeros(shape=(img.shape[0] + 2, img.shape[1] + 2))
    r[1:-1, 1:-1] = img

    # Find contours at a constant value of 0.8
    black_cnt = measure.find_contours(r, levels[0])
    dark_cnt = measure.find_contours(r, levels[1])
    light_cnt = measure.find_contours(r, levels[2])

    light_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))
    dark_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))
    black_mls = asMultiLineString(np.empty(shape=(0, 2, 2)))

    try:
        black_p = _build_mask(black_cnt)
        dark_p = _build_mask(dark_cnt)
        light_p = _build_mask(light_cnt)

        if circular:
            build_func = _build_circular_hatch
        else:
            build_func = _build_diagonal_hatch

        if not circular:
            # correct offset to ensure desired distance between hatches
            hatch_pitch /= math.cos(math.pi / 4)

        light_lines = build_func(4 * hatch_pitch, 0, w, h)
        dark_lines = build_func(4 * hatch_pitch, 2 * hatch_pitch, w, h)
        black_lines = build_func(2 * hatch_pitch, hatch_pitch, w, h)

        frame = Polygon([(3, 3), (w - 6, 3), (w - 6, h - 6), (3, h - 6)])

        light_mls = shapely.ops.linemerge(
            asMultiLineString(light_lines).difference(light_p).intersection(
                frame))
        dark_mls = shapely.ops.linemerge(
            asMultiLineString(dark_lines).difference(dark_p).intersection(
                frame))
        black_mls = shapely.ops.linemerge(
            asMultiLineString(black_lines).difference(black_p).intersection(
                frame))
    except Exception as exc:
        print(f"Error: {exc}")

    return (
        MultiLineString([ls for ls in light_mls] + [ls for ls in dark_mls] +
                        [ls for ls in black_mls]),
        black_cnt,
        dark_cnt,
        light_cnt,
    )