コード例 #1
0
def createIntermediateImage(translated, intermediateShape, pixels):
		intermediate=()
		for x in translated.tolist()[0]:
			intermediate = intermediate + ((x[0],x[1]),)

		poly_unordered = MultiPoint(intermediate).convex_hull

		img_width, img_height = pixels

		intermediate_img = Image.new('1', (img_width, img_height))
		pixels = intermediate_img.load()
		white = 1 #(255,255,255)
		black = 0 #(0,0,0)

		for i in range(img_width):
			for j in range(img_height):
				point = Point(i,j)
				if poly_unordered.contains(point):
					pixels[i,j] = white #formerly grey
				else:
					pixels[i,j] = black

		for i in intermediate:
			pixels[i[0],i[1]] = white

		intermediate_img.save(intermediateShape)
コード例 #2
0
ファイル: tesselect.py プロジェクト: antonsergeev/tesselect
def PreciseLabels(data_shape, argmin_xy, out_edges, mask_center):
    """
    Fuction
    """
    mesh_x, mesh_y = np.meshgrid(np.arange(data_shape[1]), np.arange(data_shape[0]))
    coords = np.vstack((mesh_x.ravel(), mesh_y.ravel())).T
    coords = MultiPoint(coords)
    label_data_prec = np.zeros(data_shape, dtype=int)
    
    num = np.sum(mask_center)  # number of precise labels
    percentage = np.rint(np.linspace(0,num,21)).astype(int)
    count = 0  # number of calculated labels
    print('Calculated: ', end='')
    
    for i, outs in enumerate(out_edges):
        if mask_center[i] == True:
            poly = MultiPoint(argmin_xy.T[outs]).convex_hull
            inpoints = [point for point in coords if poly.contains(point)]
            for point in inpoints:
                label_data_prec[point.y, point.x] = i + 1
            
            if count in percentage:
                print('{}%... '.format(np.argwhere(percentage==count)[0,0]*5), end='')
            elif count == num - 1:
                print('100%')
            count += 1
    
    return label_data_prec
コード例 #3
0
ファイル: test_tools.py プロジェクト: geopandas/geopandas
class TestTools:
    def setup_method(self):
        self.p1 = Point(0, 0)
        self.p2 = Point(1, 1)
        self.p3 = Point(2, 2)
        self.mpc = MultiPoint([self.p1, self.p2, self.p3])

        self.mp1 = MultiPoint([self.p1, self.p2])
        self.line1 = LineString([(3, 3), (4, 4)])

    def test_collect_single(self):
        result = collect(self.p1)
        assert self.p1.equals(result)

    def test_collect_single_force_multi(self):
        result = collect(self.p1, multi=True)
        expected = MultiPoint([self.p1])
        assert expected.equals(result)

    def test_collect_multi(self):
        result = collect(self.mp1)
        assert self.mp1.equals(result)

    def test_collect_multi_force_multi(self):
        result = collect(self.mp1)
        assert self.mp1.equals(result)

    def test_collect_list(self):
        result = collect([self.p1, self.p2, self.p3])
        assert self.mpc.equals(result)

    def test_collect_GeoSeries(self):
        s = GeoSeries([self.p1, self.p2, self.p3])
        result = collect(s)
        assert self.mpc.equals(result)

    def test_collect_mixed_types(self):
        with pytest.raises(ValueError):
            collect([self.p1, self.line1])

    def test_collect_mixed_multi(self):
        with pytest.raises(ValueError):
            collect([self.mpc, self.mp1])

    def test_epsg_from_crs(self):
        assert epsg_from_crs({'init': 'epsg:4326'}) == 4326
        assert epsg_from_crs({'init': 'EPSG:4326'}) == 4326
        assert epsg_from_crs('+init=epsg:4326') == 4326

    @pytest.mark.skipif(
        LooseVersion(pyproj.__version__) >= LooseVersion('2.0.0'),
        reason="explicit_crs_from_epsg depends on parsing data files of "
               "proj.4 < 6 / pyproj < 2 ")
    def test_explicit_crs_from_epsg(self):
        expected = {'no_defs': True, 'proj': 'longlat', 'datum': 'WGS84', 'init': 'epsg:4326'}
        assert explicit_crs_from_epsg(epsg=4326) == expected
        assert explicit_crs_from_epsg(epsg='4326') == expected
        assert explicit_crs_from_epsg(crs={'init': 'epsg:4326'}) == expected
        assert explicit_crs_from_epsg(crs="+init=epsg:4326") == expected
コード例 #4
0
ファイル: test_tools.py プロジェクト: geopandas/geopandas
    def setup_method(self):
        self.p1 = Point(0, 0)
        self.p2 = Point(1, 1)
        self.p3 = Point(2, 2)
        self.mpc = MultiPoint([self.p1, self.p2, self.p3])

        self.mp1 = MultiPoint([self.p1, self.p2])
        self.line1 = LineString([(3, 3), (4, 4)])
コード例 #5
0
ファイル: spatial.py プロジェクト: rockychen-dpaw/gokart
def extractPoints(geom):
    if isinstance(geom,Point) or isinstance(geom,MultiPoint):
        return geom
    elif isinstance(geom,GeometryCollection):
        result = None
        for g in geom:
            p = extractPoints(g)
            if not p:
                continue
            elif not result:
                result = p
            elif isinstance(result,MultiPoint):
                result = [geom1 for geom1 in result.geoms]
                if isinstance(p,Point): 
                    result.append(p)
                    result = MultiPoint(result)
                else:
                    for geom1 in p.geoms:
                        result.append(geom1)
                    result = MultiPoint(result)
            else:
                if isinstance(p,Point): 
                    result = MultiPoint([result,p])
                else:
                    result = [result]
                    for geom1 in p.geoms:
                        result.append(geom1)
                    result = MultiPoint(result)
        return result
    else:
        return None
コード例 #6
0
    def Find_Average_Hexagon_Color(self, hex_coords, im):
        from shapely.geometry import Point, MultiPoint

        coords = list(hex_coords)

        # Default RGB values to black opaque and pixel counter to zero.
        rgb = [0, 0, 0]
        count = 0

        # Calculate hexagon bounding box.
        minx = min(coords[::2])
        maxx = max(coords[::2])
        miny = min(coords[1::2])
        maxy = max(coords[1::2])

        bbox_coords = [minx, miny, maxx, miny, maxx, maxy, minx, maxy]

        # Calculate polygon center.
        midx = (minx + maxx) / 2.0
        midy = (miny + maxy) / 2.0

        coords[::2]  = [(self.scale * (x - midx)) + midx for x in coords[::2]]
        coords[1::2] = [(self.scale * (y - midy)) + midy for y in coords[1::2]]

        subhex_coords = list(zip(coords[::2], coords[1::2]))

        subhex_hull = MultiPoint(subhex_coords).convex_hull

        # Flatten subhex list of tuples to conventional list for plotting.
        subhex_coords = list(sum(subhex_coords, ()))

        for x in range(int(math.floor(minx)), int(math.ceil(maxx))):
            for y in range(int(math.floor(miny)), int(math.ceil(maxy))):
                mypt = Point(x, y)
                if(subhex_hull.contains(mypt)):
                    r, g, b = im.getpixel(tuple([x, y]))
                    rgb[0] += r
                    rgb[1] += g
                    rgb[2] += b
                    count  += 1

        rgb[0] = rgb[0] / count
        rgb[1] = rgb[1] / count
        rgb[2] = rgb[2] / count

        rgb_color = tuple([int(i) for i in rgb])

        return bbox_coords, subhex_coords, rgb_color
コード例 #7
0
class TestTools(unittest.TestCase):
    def setUp(self):
        self.p1 = Point(0,0)
        self.p2 = Point(1,1)
        self.p3 = Point(2,2)
        self.mpc = MultiPoint([self.p1, self.p2, self.p3])

        self.mp1 = MultiPoint([self.p1, self.p2])
        self.line1 = LineString([(3,3), (4,4)])

    def test_collect_single(self):
        result = collect(self.p1)
        self.assert_(self.p1.equals(result))

    def test_collect_single_force_multi(self):
        result = collect(self.p1, multi=True)
        expected = MultiPoint([self.p1])
        self.assert_(expected.equals(result))

    def test_collect_multi(self):
        result = collect(self.mp1)
        self.assert_(self.mp1.equals(result))

    def test_collect_multi_force_multi(self):
        result = collect(self.mp1)
        self.assert_(self.mp1.equals(result))

    def test_collect_list(self):
        result = collect([self.p1, self.p2, self.p3])
        self.assert_(self.mpc.equals(result))

    def test_collect_GeoSeries(self):
        s = GeoSeries([self.p1, self.p2, self.p3])
        result = collect(s)
        self.assert_(self.mpc.equals(result))

    def test_collect_mixed_types(self):
        with self.assertRaises(ValueError):
            collect([self.p1, self.line1])

    def test_collect_mixed_multi(self):
        with self.assertRaises(ValueError):
            collect([self.mpc, self.mp1])
コード例 #8
0
ファイル: clip_images.py プロジェクト: robintw/LandsatUtils
def clip_images(directory, points, dist):

    buffer_dist = dist

    files = os.listdir(directory)
    regex = re.compile('.tif$', re.IGNORECASE)
    files = [os.path.join(directory, f) for f in files if regex.search(f)]

    from_crs = Proj({'init': 'epsg:4326'})

    with rasterio.open(files[0]) as src:
        dest_crs = Proj(src.crs)

    # points = ['57.232,-2.971']
    points = [x.split(',') for x in points]
    points = [[float(x), float(y)] for y, x in points]
    points = [transform(from_crs, dest_crs, x, y) for x, y in points]

    if len(points) > 1:
        points = MultiPoint(points)
        boundary = points.bounds
        points = [[boundary[0], boundary[1]], [boundary[2], boundary[3]]]
    else:
        points = Point(points[0][0], points[0][1])
        area = points.buffer(buffer_dist)
        boundary = area.bounds
        points = [[boundary[0], boundary[1]], [boundary[2], boundary[3]]]

    [[ulx, lry], [lrx, uly]] = points

    for image in files:
        output_image_new = image + '.tmp'

        command_new = 'gdalwarp -overwrite -of GTiff '\
                      '-te %s %s %s %s %s %s' % (ulx, lry, lrx, uly,
                                                 image,
                                                 output_image_new)

        return_code = call(command_new, shell=True)

        os.remove(image)
        os.rename(output_image_new, image)
コード例 #9
0
def get_neighborhood_from_kml(results, kml_path):
  test_lat, test_lon = (results['geometry']['location']['lat'],
                        results['geometry']['location']['lng'])
  test_point = Point(float(test_lat), float(test_lon))
  kml = etree.parse(kml_path)
  placemarks = kml.findall('//kml:Placemark', namespaces=NSMAP)
  found_neighborhood = None
  for placemark in placemarks:
    el = placemark.find(COORDINATES, namespaces=NSMAP)
    coords = []
    for coord in el.text.split(' '):
      lat, lon, _ = coord.split(',')
      coords.append((float(lon), float(lat)))
    poly = MultiPoint(coords).convex_hull
    if poly.contains(test_point):
      name = placemark.find('kml:name', namespaces=NSMAP)
      found_neighborhood = name.text
  if found_neighborhood is None:
    return get_neighborhood(results)
  return found_neighborhood
コード例 #10
0
def get_neighborhood_from_kml(lat_lon, kml_path):
  test_lat, test_lon = lat_lon
  test_point = Point(float(test_lat), float(test_lon))
  kml = etree.parse(kml_path)
  placemarks = kml.findall('//kml:Placemark', namespaces=NSMAP)
  found_neighborhood = None
  for el in kml.findall('.//kml:coordinates', namespaces=NSMAP):
    coords = []
    for coord in el.text.split(' '):
      lat, lon, _ = coord.split(',')
      coords.append((float(lon), float(lat)))
    poly = MultiPoint(coords).convex_hull
    if poly.contains(test_point):
      placemark = el.getparent().getparent().getparent().getparent()
      if 'MultiGeometry' in str(placemark.tag):
        placemark = placemark.getparent()
      val = placemark.find('kml:ExtendedData/kml:Data[@name=\'32CitiesNa\']/kml:value', namespaces=NSMAP)
      if val is not None:
        found_neighborhood = val.text
  return found_neighborhood
コード例 #11
0
ファイル: seismic_network.py プロジェクト: nimanshr/toolbox
class SeismicNetwork(object):
    def __init__(self, net_lats, net_lons):
        poly_x, poly_y = pyproj.transform(wgs84, pj_laea, net_lons, net_lats)
        self.polygon = MultiPoint(zip(poly_x, poly_y)).convex_hull

    def contains(self, lat, lon):
        x, y = pyproj.transform(wgs84, pj_laea, lon, lat)
        point = Point(x, y)
        if self.polygon.contains(point):
            return True
        else:
            return False

    def inside_network(self, epi_lats, epi_lons):
        """
        This function returns epicenter coordinates located inside a seismic
        station network. The point-in-polygon problem is solved based on ray
        casting method.

        :param epi_lats: Latitudes of earthquake epicenters.
        :param epi_lons: Longitudes of earthquake epicenters.

        :type epi_lats: numpy.array, list/tuple or scalar
        :type epi_lons: numpy.array, list/tuple or scalar

        :returns:
            Epicenter coordinates located within network. The first and second
            columns are latitude and longitude, respectively.
        :rtype: numpy.array
        """
        epi_x, epi_y = pyproj.transform(wgs84, pj_laea, epi_lons, epi_lats)
        r = []
        for i, (x, y) in enumerate(zip(epi_x, epi_y)):
            epicenter = Point(x, y)
            if epicenter.within(self.polygon):
                r.append((epi_lats[i], epi_lons[i]))
        return np.array(r)
コード例 #12
0
    def __init__(self, data_folder, results_folder, casename, miso_data):
        print("loading plotting data...")
        self.miso_map = miso_data.miso_map
        self.iso_map = miso_data.iso_map
        self.states_map = miso_data.states_map
        self.utilities_map = miso_data.utilities_map
        self.casename = casename

        miso_data.utility_territory_mapping()
        self.map_dict = miso_data.map_dict
        assert (type(casename)) == str
        # loads data, mostly
        # data loads
        miso_map = pd.read_excel(
            join(data_folder, "NREL-Seams Model (MISO).xlsx"), sheet_name="Mapping"
        )
        miso_loads = pd.read_excel(
            join(data_folder, "NREL-Seams Model (MISO).xlsx"), sheet_name="Load"
        )
        miso_tx = pd.read_excel(
            join(data_folder, "NREL-Seams Model (MISO).xlsx"),
            sheet_name="Transmission",
        )

        # new loads
        miso_busmap = pd.read_csv(
            os.path.join(data_folder, "MISO_data", "Bus Mapping Extra Data.csv")
        )
        miso_bus_zone = pd.read_excel(
            join(data_folder, "MISO_data", "Bus_to_SeamsRegion.xlsx")
        )
        miso_busmap = miso_busmap.merge(miso_bus_zone, left_on="Name", right_on="Bus")
        miso_busmap = miso_busmap[
            ~miso_busmap["Seams Region"].isin(["PJM-C", "CSWS+", "MDU", "MN-NW"])
        ]
        miso_busmap = miso_busmap.rename(columns={"Seams Region": "Seams_Region"})
        miso_seam_zone = pd.DataFrame(columns=["Seams_Region", "geometry"])
        for i in list(miso_busmap.Seams_Region.unique()):
            tmp = miso_busmap[miso_busmap.Seams_Region == i]
            tmp_Lon = list(tmp.Lon)
            tmp_Lat = list(tmp.Lat)
            Seams_loc = MultiPoint(list(zip(tmp_Lon, tmp_Lat)))
            miso_seam_zone = miso_seam_zone.append(
                [{"Seams_Region": i, "geometry": Seams_loc,}], ignore_index=True,
            )
        miso_seam_zone_gdf = gpd.GeoDataFrame(miso_seam_zone)
        miso_seam_zone_gdf["centroid"] = miso_seam_zone_gdf.centroid
        miso_seam_zone_gdf = miso_seam_zone_gdf.set_geometry("centroid")
        miso_seam_zone_gdf.crs = "EPSG:4326"
        self.miso_seam_zone_gdf = miso_seam_zone_gdf  # for later use

        # results loads
        region_lole = pd.read_csv(
            join(results_folder, casename + "regionlole.csv"), header=None
        )
        region_eue = pd.read_csv(
            join(results_folder, casename + "regioneue.csv"), header=None
        )
        region_period_eue = pd.read_csv(
            join(results_folder, casename + "regionperiodeue.csv"), header=None
        )
        period_eue = pd.read_csv(
            join(results_folder, casename + "periodeue.csv"), header=None
        )
        period_lolp = pd.read_csv(
            join(results_folder, casename + "periodlolp.csv"), header=None
        )

        utilization = pd.read_csv(
            join(results_folder, casename + "utilizations.csv"), header=None
        )

        flow = pd.read_csv(join(results_folder, casename + "flows.csv"), header=None)

        # clean and reformat some of the loaded info
        region_lole.index, region_eue.index = (
            list(miso_map["CEP Bus ID"]),
            list(miso_map["CEP Bus ID"]),
        )
        region_lole.columns, region_eue.columns = ["LOLE"], ["EUE"]
        region_df = pd.concat([region_lole, region_eue], axis=1)
        tmps = len(region_period_eue.columns)
        region_df["load"] = list(miso_loads.iloc[:tmps, 1:].sum(axis=0))
        region_df["names"] = miso_map["CEP Bus Name"].values
        # clean and reformat transmission info

        # create attributes of stuff we want later
        self.results_folder = results_folder
        self.miso_map = miso_map
        self.miso_loads = miso_loads
        self.miso_tx = miso_tx
        self.region_df = region_df
        self.region_lole = region_lole
        self.region_eue = region_eue
        self.region_period_eue = region_period_eue
        self.period_eue = period_eue
        self.period_lolp = period_lolp
        self.utilization = utilization
        self.flow = flow
        print("...plotting data loaded")
コード例 #13
0
def get_the_most_possible_value(valueArea, ocr_result_content):
    ''' 第一步:初始化返回结果 '''
    find_value_content_dic = []  # 返回的iou匹配处理结果记录
    ''' 格式化area区域,为计算iou做准备 '''
    # 计算iou记录area的标准化处理
    line1 = [
        valueArea[0][0], valueArea[0][1], valueArea[1][0], valueArea[1][1],
        valueArea[2][0], valueArea[2][1], valueArea[3][0], valueArea[3][1]
    ]
    # 四边形四个点坐标的一维数组表示,[x0,y0,x1,y1....]
    a = np.array(line1).reshape(4, 2)  # 四边形二维坐标表示
    poly1 = Polygon(
        a).convex_hull  # python四边形对象,会自动计算四个点,最后四个点顺序为:左上 左下  右下 右上 左上
    ''' 遍历ocr结果,寻找合适框 '''
    #         print(Polygon(a).convex_hull)  # 可以打印看看是不是这样子
    for i in range(len(ocr_result_content)):
        content_dic = ocr_result_content[i]
        # confidence = content_dic['confidence']
        # text = content_dic['text']
        text_box_position = content_dic['text_box_position']
        points = text_box_position
        ''' 格式化box区域,为计算iou做准备 '''
        line2 = [
            points[0][0], points[0][1], points[1][0], points[1][1],
            points[2][0], points[2][1], points[3][0], points[3][1]
        ]
        b = np.array(line2).reshape(4, 2)
        poly2 = Polygon(b).convex_hull
        #         print(Polygon(b).convex_hull)
        ''' 合并box坐标,计算box的区域面积 '''
        union_poly = np.concatenate((b, b))  # 合并两个box坐标,变为8*2
        # print(union_poly)
        #         print(MultiPoint(union_poly).convex_hull)  # 包含两四边形最小的多边形点
        ''' 计算iou值 '''
        if not poly1.intersects(poly2):  # 如果两四边形不相交
            iou = 0
        else:
            try:
                inter_area = poly1.intersection(poly2).area  # 相交面积
                #                 print(inter_area)
                # union_area = poly1.area + poly2.area - inter_area
                union_area = MultiPoint(union_poly).convex_hull.area
                #                 print(union_area)
                if union_area == 0:
                    iou = 0
                    # iou = float(inter_area) / (union_area-inter_area)  #错了
                else:
                    iou = float(inter_area) / union_area
                # iou=float(inter_area) /(poly1.area+poly2.area-inter_area)
                # 源码中给出了两种IOU计算方式,第一种计算的是: 交集部分/包含两个四边形最小多边形的面积
                # 第二种: 交集 / 并集(常见矩形框IOU计算方式)
            except shapely.geos.TopologicalError:
                # print('shapely.geos.TopologicalError occured, iou set to 0')
                iou = 0
        ''' 根据iou判断并处理最后结果 '''
        if iou > 0.7:
            if len(find_value_content_dic) <= 0:
                find_value_content_dic.append(content_dic)
            else:
                insert_index = -1
                for point_id in range(len(find_value_content_dic)):
                    # insert_index = point_id
                    value_box_position = find_value_content_dic[point_id][
                        'text_box_position']
                    if text_box_position[0][0] > value_box_position[0][0]:
                        iou_h = (min(text_box_position[3][1],
                                     value_box_position[2][1]) -
                                 max(text_box_position[0][1],
                                     value_box_position[1][1])) / (
                                         max(text_box_position[3][1],
                                             value_box_position[2][1]) -
                                         min(text_box_position[0][1],
                                             value_box_position[1][1]))
                    else:
                        iou_h = (min(value_box_position[3][1],
                                     text_box_position[2][1]) -
                                 max(value_box_position[0][1],
                                     text_box_position[1][1])) / (
                                         max(value_box_position[3][1],
                                             text_box_position[2][1]) -
                                         min(value_box_position[0][1],
                                             text_box_position[1][1]))

                    if iou_h > 0.2:
                        if text_box_position[0][0] < value_box_position[0][0]:
                            insert_index = point_id
                            break
                        else:
                            pass
                    elif text_box_position[0][1] < value_box_position[0][1]:
                        insert_index = point_id
                        break
                    else:
                        pass
                        # insert_index =
                if insert_index < 0:
                    find_value_content_dic.append(content_dic)
                else:
                    find_value_content_dic.insert(insert_index, content_dic)

    #     print("index_value, score_index", index_value, score_index)

    return find_value_content_dic
コード例 #14
0
import pandas as pd
from shapely.geometry import MultiPoint

# read the csv file containing borderline longitudes and latitudes for all Indian districts
df = pd.read_csv(r"C:\Users\sam\IndiaMap\Ind_adm2_Points.csv")

district = ""
geoList = []
result_df = pd.DataFrame(
    data=None, columns=['State', 'District', 'Latitude', 'Longitude'])

for index, row in df.iterrows():
    # check if this is anew district value
    if district and (district != df.iloc[index]['District']):
        # calculate centroid for previous district
        points = MultiPoint(geoList)
        # save the state, district, long-lat and centroid to new dataframe
        result_df = result_df.append(
            {
                'State': df['State'].iloc[index - 1],
                'District': df['District'].iloc[index - 1],
                'Latitude': points.centroid.x,
                'Longitude': points.centroid.y
            },
            ignore_index=True)
        # clear old geoList (APPEND NEW LONG-LAT ALSO)
        del geoList[:]
    # save this new district's name
    district = df.iloc[index]['District']
    # add this long lat info to later calculate centroid
    geoList.append((df.iloc[index]['Latitude'], df.iloc[index]['Longitude']))
コード例 #15
0
from matplotlib import pyplot
from shapely.geometry import MultiPoint

from descartes.patch import PolygonPatch

from figures import SIZE

fig = pyplot.figure(1, figsize=SIZE, dpi=90)
fig.set_frameon(True)

# 1
ax = fig.add_subplot(121)

points2 = MultiPoint([(0, 0), (2, 2)])
for p in points2:
    ax.plot(p.x, p.y, 'o', color='#999999')
hull2 = points2.convex_hull
x, y = hull2.xy
ax.plot(x, y, color='#6699cc', linewidth=3, alpha=0.5, zorder=2)

ax.set_title('a) N = 2')

xrange = [-1, 4]
yrange = [-1, 3]
ax.set_xlim(*xrange)
ax.set_xticks(range(*xrange) + [xrange[-1]])
ax.set_ylim(*yrange)
ax.set_yticks(range(*yrange) + [yrange[-1]])
ax.set_aspect(1)

#2
コード例 #16
0
def main():
    #-- start MPI communicator
    comm = MPI.COMM_WORLD

    #-- Read the system arguments listed after the program
    long_options = ['help','directory=','region=','verbose','mode=']
    optlist,arglist = getopt.getopt(sys.argv[1:],'hD:R:VM:',long_options)

    #-- working data directory for location of RGI files
    base_dir = os.getcwd()
    #-- region of Randolph Glacier Inventory to run
    RGI_REGION = 17
    #-- verbosity settings
    VERBOSE = False
    #-- permissions mode of the local files (number in octal)
    MODE = 0o775
    for opt, arg in optlist:
        if opt in ('-h','--help'):
            usage() if (comm.rank==0) else None
            sys.exit()
        elif opt in ("-D","--directory"):
            base_dir = os.path.expanduser(arg)
        elif opt in ("-R","--region"):
            RGI_REGION = np.int(arg)
        elif opt in ("-V","--verbose"):
            #-- output module information for process
            info(comm.rank,comm.size)
            VERBOSE = True
        elif opt in ("-M","--mode"):
            MODE = int(arg, 8)

    #-- enter HDF5 file as system argument
    if not arglist:
        raise IOError('No input file entered as system arguments')
    #-- tilde-expansion of listed input file
    FILE = os.path.expanduser(arglist[0])

    #-- read data from input file
    print('{0} -->'.format(FILE)) if (VERBOSE and (comm.rank==0)) else None
    #-- Open the HDF5 file for reading
    fileID = h5py.File(FILE, 'r', driver='mpio', comm=comm)
    DIRECTORY = os.path.dirname(FILE)
    #-- extract parameters from ICESat-2 ATLAS HDF5 file name
    rx = re.compile('(ATL\d{2})_(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})_'
        '(\d{4})(\d{2})(\d{2})_(\d{3})_(\d{2})(.*?).h5$',re.VERBOSE)
    PRD,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX = rx.findall(FILE).pop()

    #-- read data on rank 0
    if (comm.rank == 0):
        #-- read RGI for region and create shapely polygon objects
        poly_dict,RGI_file = load_glacier_inventory(base_dir,RGI_REGION)
    else:
        #-- create empty object for list of shapely objects
        poly_dict = None
        RGI_file = None

    #-- Broadcast Shapely polygon objects
    poly_dict = comm.bcast(poly_dict, root=0)
    RGI_file = comm.bcast(RGI_file, root=0)
    #-- RGI version and name
    RGI_VERSION,RGI_NAME = re.findall('\d_rgi(\d+)_(.*?)$',RGI_file).pop()

    #-- read each input beam within the file
    IS2_atl06_beams = []
    for gtx in [k for k in fileID.keys() if bool(re.match(r'gt\d[lr]',k))]:
        #-- check if subsetted beam contains land ice data
        try:
            fileID[gtx]['land_ice_segments']['segment_id']
        except KeyError:
            pass
        else:
            IS2_atl06_beams.append(gtx)

    #-- number of GPS seconds between the GPS epoch
    #-- and ATLAS Standard Data Product (SDP) epoch
    atlas_sdp_gps_epoch = fileID['ancillary_data']['atlas_sdp_gps_epoch'][:]

    #-- copy variables for outputting to HDF5 file
    IS2_atl06_mask = {}
    IS2_atl06_fill = {}
    IS2_atl06_mask_attrs = {}
    #-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
    #-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
    #-- Add this value to delta time parameters to compute full gps_seconds
    IS2_atl06_mask['ancillary_data'] = {}
    IS2_atl06_mask_attrs['ancillary_data'] = {}
    for key in ['atlas_sdp_gps_epoch']:
        #-- get each HDF5 variable
        IS2_atl06_mask['ancillary_data'][key] = fileID['ancillary_data'][key][:]
        #-- Getting attributes of group and included variables
        IS2_atl06_mask_attrs['ancillary_data'][key] = {}
        for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
            IS2_atl06_mask_attrs['ancillary_data'][key][att_name] = att_val

    #-- for each input beam within the file
    for gtx in sorted(IS2_atl06_beams):
        #-- output data dictionaries for beam
        IS2_atl06_mask[gtx] = dict(land_ice_segments={})
        IS2_atl06_fill[gtx] = dict(land_ice_segments={})
        IS2_atl06_mask_attrs[gtx] = dict(land_ice_segments={})

        #-- number of segments
        segment_id = fileID[gtx]['land_ice_segments']['segment_id'][:]
        n_seg, = fileID[gtx]['land_ice_segments']['segment_id'].shape
        #-- invalid value for beam
        fv = fileID[gtx]['land_ice_segments']['h_li'].fillvalue

        #-- define indices to run for specific process
        ind = np.arange(comm.Get_rank(), n_seg, comm.Get_size(), dtype=np.int)

        #-- extract delta time
        delta_time = np.ma.array(fileID[gtx]['land_ice_segments']['delta_time'][:],
            mask=(fileID[gtx]['land_ice_segments']['delta_time'][:]==fv),
            fill_value=fv)
        #-- extract lat/lon
        longitude = np.ma.array(fileID[gtx]['land_ice_segments']['longitude'][:],
            mask=(fileID[gtx]['land_ice_segments']['longitude'][:]==fv),
            fill_value=fv)
        latitude = np.ma.array(fileID[gtx]['land_ice_segments']['latitude'][:],
            mask=(fileID[gtx]['land_ice_segments']['latitude'][:]==fv),
            fill_value=fv)

        #-- convert reduced lat/lon to shapely multipoint object
        xy_point = MultiPoint(list(zip(longitude[ind],latitude[ind])))

        #-- create distributed intersection map for calculation
        distributed_map = np.zeros((n_seg),dtype=np.bool)
        distributed_RGIId = np.zeros((n_seg),dtype='|S14')
        #-- create empty intersection map array for receiving
        associated_map = np.zeros((n_seg),dtype=np.bool)
        associated_RGIId = np.zeros((n_seg),dtype='|S14')
        for key,poly_obj in poly_dict.items():
            #-- finds if points are encapsulated (within RGI polygon)
            int_test = poly_obj.intersects(xy_point)
            if int_test:
                #-- extract intersected points
                int_map = list(map(poly_obj.intersects,xy_point))
                int_indices, = np.nonzero(int_map)
                #-- set distributed_map indices to True for intersected points
                distributed_map[ind[int_indices]] = True
                distributed_RGIId[ind[int_indices]] = key
        #-- communicate output MPI matrices between ranks
        #-- operation is a logical "or" across the elements.
        comm.Allreduce(sendbuf=[distributed_map, MPI.BOOL], \
            recvbuf=[associated_map, MPI.BOOL], op=MPI.LOR)
        #-- operation is a element summation.
        comm.Allreduce(sendbuf=[distributed_RGIId, MPI.CHAR], \
            recvbuf=[associated_RGIId, MPI.CHAR], op=MPI.SUM)
        distributed_map = None
        distributed_RGIId = None
        #-- wait for all processes to finish calculation
        comm.Barrier()

        #-- group attributes for beam
        IS2_atl06_mask_attrs[gtx]['Description'] = fileID[gtx].attrs['Description']
        IS2_atl06_mask_attrs[gtx]['atlas_pce'] = fileID[gtx].attrs['atlas_pce']
        IS2_atl06_mask_attrs[gtx]['atlas_beam_type'] = fileID[gtx].attrs['atlas_beam_type']
        IS2_atl06_mask_attrs[gtx]['groundtrack_id'] = fileID[gtx].attrs['groundtrack_id']
        IS2_atl06_mask_attrs[gtx]['atmosphere_profile'] = fileID[gtx].attrs['atmosphere_profile']
        IS2_atl06_mask_attrs[gtx]['atlas_spot_number'] = fileID[gtx].attrs['atlas_spot_number']
        IS2_atl06_mask_attrs[gtx]['sc_orientation'] = fileID[gtx].attrs['sc_orientation']
        #-- group attributes for land_ice_segments
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['Description'] = ("The land_ice_segments group "
            "contains the primary set of derived products. This includes geolocation, height, and "
            "standard error and quality measures for each segment. This group is sparse, meaning "
            "that parameters are provided only for pairs of segments for which at least one beam "
            "has a valid surface-height measurement.")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['data_rate'] = ("Data within this group are "
            "sparse.  Data values are provided only for those ICESat-2 20m segments where at "
            "least one beam has a valid land ice height measurement.")

        #-- geolocation, time and segment ID
        #-- delta time
        IS2_atl06_mask[gtx]['land_ice_segments']['delta_time'] = delta_time
        IS2_atl06_fill[gtx]['land_ice_segments']['delta_time'] = delta_time.fill_value
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['units'] = "seconds since 2018-01-01"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['long_name'] = "Elapsed GPS seconds"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['standard_name'] = "time"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['calendar'] = "standard"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['description'] = ("Number of GPS "
            "seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset "
            "is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
            "between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By "
            "adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the "
            "time in gps_seconds relative to the GPS epoch can be computed.")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['delta_time']['coordinates'] = \
            "segment_id latitude longitude"
        #-- latitude
        IS2_atl06_mask[gtx]['land_ice_segments']['latitude'] = latitude
        IS2_atl06_fill[gtx]['land_ice_segments']['latitude'] = latitude.fill_value
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['units'] = "degrees_north"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['contentType'] = "physicalMeasurement"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['long_name'] = "Latitude"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['standard_name'] = "latitude"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['description'] = ("Latitude of "
            "segment center")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['valid_min'] = -90.0
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['valid_max'] = 90.0
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['latitude']['coordinates'] = \
            "segment_id delta_time longitude"
        #-- longitude
        IS2_atl06_mask[gtx]['land_ice_segments']['longitude'] = longitude
        IS2_atl06_fill[gtx]['land_ice_segments']['longitude'] = longitude.fill_value
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['units'] = "degrees_east"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['contentType'] = "physicalMeasurement"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['long_name'] = "Longitude"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['standard_name'] = "longitude"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['description'] = ("Longitude of "
            "segment center")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['valid_min'] = -180.0
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['valid_max'] = 180.0
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['longitude']['coordinates'] = \
            "segment_id delta_time latitude"
        #-- segment ID
        IS2_atl06_mask[gtx]['land_ice_segments']['segment_id'] = segment_id
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id']['units'] = "1"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id']['contentType'] = "referenceInformation"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id']['long_name'] = "Along-track segment ID number"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id']['description'] = ("A 7 digit number "
            "identifying the along-track geolocation segment number.  These are sequential, starting with "
            "1 for the first segment after an ascending equatorial crossing node. Equal to the segment_id for "
            "the second of the two 20m ATL03 segments included in the 40m ATL06 segment")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['segment_id']['coordinates'] = \
            "delta_time latitude longitude"

        #-- subsetting variables
        IS2_atl06_mask[gtx]['land_ice_segments']['subsetting'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['Description'] = ("The subsetting group "
            "contains parameters used to reduce land ice segments to specific regions of interest.")
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['data_rate'] = ("Data within this group "
            "are stored at the land_ice_segments segment rate.")

        #-- output mask to HDF5
        key = RGI_NAME.replace('_',' ')
        IS2_atl06_mask[gtx]['land_ice_segments']['subsetting'][RGI_NAME] = associated_map
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['contentType'] = "referenceInformation"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['long_name'] = '{0} Mask'.format(key)
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['description'] = ('Mask calculated '
            'using the {0} region from the Randolph Glacier Inventory.').format(key)
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['source'] = \
            'RGIv{0}'.format(RGI_VERSION)
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['reference'] = \
            'https://www.glims.org/RGI/'
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting'][RGI_NAME]['coordinates'] = \
            "../segment_id ../delta_time ../latitude ../longitude"
        #-- output RGI identifier
        IS2_atl06_mask[gtx]['land_ice_segments']['subsetting']['RGIId'] = associated_RGIId
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId'] = {}
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['contentType'] = "referenceInformation"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['long_name'] = "RGI Identifier"
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['description'] = ('Identification '
            'code within version {0} of the Randolph Glacier Inventory (RGI).').format(RGI_VERSION)
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['source'] = \
            'RGIv{0}'.format(RGI_VERSION)
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['reference'] = \
            'https://www.glims.org/RGI/'
        IS2_atl06_mask_attrs[gtx]['land_ice_segments']['subsetting']['RGIId']['coordinates'] = \
            "../segment_id ../delta_time ../latitude ../longitude"
        #-- wait for all processes to finish calculation
        comm.Barrier()

    #-- parallel h5py I/O does not support compression filters at this time
    if (comm.rank == 0) and associated_map.any():
        #-- output HDF5 file with RGI masks
        args = (PRD,RGI_VERSION,RGI_NAME,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX)
        file_format='{0}_RGI{1}_{2}_{3}{4}{5}{6}{7}{8}_{9}{10}{11}_{12}_{13}{14}.h5'
        #-- print file information
        print('\t{0}'.format(file_format.format(*args))) if VERBOSE else None
        HDF5_ATL06_mask_write(IS2_atl06_mask, IS2_atl06_mask_attrs, CLOBBER=True,
            INPUT=os.path.basename(FILE), FILL_VALUE=IS2_atl06_fill,
            FILENAME=os.path.join(DIRECTORY,file_format.format(*args)))
        #-- change the permissions mode
        os.chmod(os.path.join(DIRECTORY,file_format.format(*args)), MODE)
    #-- close the input file
    fileID.close()
コード例 #17
0
ファイル: sid_afs_stats.py プロジェクト: loniitkina/sid
    dt = datetime.strptime(date, "%Y%m%dT%H%M%S")

    #load data
    container = np.load(i, allow_pickle=True)
    pindex = container['pindex']
    tripts = container['tripts']
    
    #get all nods of triangles in lkfs
    lkf_tri = [ tripts[p] for p in pindex ]
    lkf_nods = [val for sublist in lkf_tri for val in sublist]
    print('This pair has # LKF nods',len(lkf_nods)) 
    #lkf_nods_extra = lkf_nods.copy()
        
    #a region for all area where there is data (following image pair edges)
    all_nods = [val for sublist in tripts for val in sublist]
    region = MultiPoint(all_nods).convex_hull
    
    #buffer size in meters
    print('buffer size',bf)
    
    #alpha for triangulation in concave hull
    print('max triangulation distance',1/alpha)
    
    #make polygons of all triangles
    tmp = [ Shapely_Polygon(p) for p in lkf_tri ]
    poly_tri = unary_union(tmp)
    
    #buffer around them in hope they will merge
    poly_buff = poly_tri.buffer(bf)
    
    #check what is contained in individual polygons of this multipolygon
コード例 #18
0
ファイル: views.py プロジェクト: navagis-sid/copy
def get_centermost_point(cluster):
	#print cluster
	centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
	centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
	res = {'count': len(cluster),'point': centermost_point, 'cluster': cluster}
	return res
コード例 #19
0
river = sys.argv[1]

geojson = json.load(
    open("../../data/us-postal-history/consistent/postoffices.geojson"))
points = []
for point in geojson['features']:
    projected = ProjectedFeature(point['geometry'], projection='wgs84').lcc
    points.append((projected.x, projected.y))
post_offices = QuadTree(points)

river_line_meters = ProjectedFeature(
    json.load(open("../../data/rivers/%s/river.geojson" %
                   river))['features'][0]['geometry'], 'wgs84').lcc
bridges = json.load(open("../../data/rivers/%s/bridges.geojson" %
                         river))['features']
bridge_collection = ProjectedFeature(MultiPoint(
    [shape(bridge['geometry']) for bridge in bridges]),
                                     projection='wgs84').lcc
segments = [
    dict(river_mile=point['river_mile'], geometry=disc_5km(point['geometry']))
    for point in get_every_10k_points(river_line_meters)
]
water_body_fc = json.load(open("input/%s/river_poly.geojson" % river))
water_body = ProjectedFeature(
    feature_collection_to_multipolygon(water_body_fc),
    projection='epsg3975').lcc
predicted_crossing_points = MultiPoint([
    ProjectedFeature(shape(point['geometry']), projection='epsg3975').lcc
    for point in json.load(
        open("input/%s/predicted_crossing_points.geojson" % river))['features']
])
コード例 #20
0
def triangulate_points(points):
    if isinstance(points, list):
        pp = MultiPoint(points)
    elif isinstance(points, MultiPoint):
        pp = points
    return unary_union(triangulate(pp))
コード例 #21
0
def random_points(n_points=10, x_min=-10, x_max=10, y_min=-10, y_max=10):
    assert x_min < x_max, 'x_min must be lower than x_max'
    assert y_min < y_max, 'y_min must be lower than y_max'
    x = random.rand(n_points) * (x_max - x_min) + x_min
    y = random.rand(n_points) * (y_max - y_min) + y_min
    return MultiPoint([(xi, yi) for xi, yi in zip(x, y)])
コード例 #22
0
for postcode in postcodes_arr:

    points = []
    for point in gf.loc[gf.postcode == postcode].geometry:
        points.append(point)

    if ALGORITHM == 'ALPHA':
        alpha_geometry, edge_points = alpha_shape.alpha_shape(points, 1000)
        gf_postcode_poly = gf_postcode_poly.append(
            {
                'geometry': alpha_geometry,
                'postcode': postcode
            },
            ignore_index=True)
    elif ALGORITHM == 'CONVEX':
        multipoint = MultiPoint(points)
        gf_postcode_poly = gf_postcode_poly.append(
            {
                'geometry': multipoint.convex_hull,
                'postcode': postcode
            },
            ignore_index=True)

# Serie de colores para cada uno de los códigos postales
colors = sns.hls_palette(len(postcodes_arr))
colormap = ListedColormap(colors)
base = gs.plot(color='blue')

# Representación de los códigos postales y los polígonos
gf.plot(ax=base,
        marker="*",
コード例 #23
0
ファイル: test_geom.py プロジェクト: rostock/geocodr
def test_multipoint():
    mp = MultiPoint([(0, 0), (5, 5), (10, 0)])
    assert point_on_geom(mp) == Point(5, 5)
コード例 #24
0
def pcf2d(array_positions,
          bins_distances,
          coord_border=None,
          coord_holes=None,
          fast_method=False,
          show_timing=False,
          plot=False,
          full_output=False):
    r"""
    Computes the 2D radial distribution function (pair correlation function) 
    g(r) for a set of points, corrected to take account of the boundary effects.
    
    Parameters:
        
        - array_positions (numpy array, shape Nx2): 
            the (x,y) coordinates of N points.
                
        - bins_distances (numpy array, shape Mx1): 
            a monotonically increasing array of bin edges defining the values 
            of r for which g(r) is going to be computed.
        
    Optional parameters :
        
        - coord_border (numpy array, shape Lx2): 
            the (x,y) coordinates of L points, defining the boundary enclosing 
            the area of interest to compute the g(r).
            Points in array_positions that are outside the area of interest are 
            automatically excluded.
            !! The list of coordinates must be "valid" in the sense used by the
            shapely library: linking all the points in order should result in a
            simple polygon with no line intersecting each other. !!
            
            Ex: Assuming one wants a square border of area 1
                [[0,0],[0,1],[1,1],[1,0]] is valid (square shape)
                [[0,0],[0,1],[1,0],[1,1]] is not valid (bow tie shape)
                
            If no value is provided, the smallest convex Polygon containing all 
            the points in array_positions is computed using convex_hull. 
            (default value: None)
            
        - coord_holes (list of numpy arrays):
            a list of the (x,y) coordinates of points forming "holes" in the
            area of interest (useful if one is using a geometry with obstacles 
            or exclusion zones where no particles can be found).
            It is possible to define several holes (as long as they do not 
            intersect each other).
            
            Ex: Assuming the area of interest is a square and one wants to 
                to remove a smaller square at the center
                coord_border=[[0,0],[0,1],[1,1],[1,0]]
                coord_holes=[[[0.2,0.2],[0.2,0.8],[0.8,0.8],[0.8,0.2]]]
            
            If no value is provided, the area of intereste will be a simple
            polygon with no hole.
            (default value: None)
        
        - fast_method (bool):
            if True, all the points whose distance to the boundary is less than
            the longest distance in bins_distance are excluded from the g(r)
            computation, and only the points sufficiently far away from the 
            boundary are considered. This method is faster, but might exclude a
            lot of points.
            if False, the code computes for each point its distance to the 
            boundary, then computes a normalization factor for the points that 
            are too close to the boundary. This is the default method that 
            correctly takes account of all points, but might be time consuming.
            (default value: False)
            
        - show_timing (bool):
            if True, the code will print outputs showing timings at different
            stages of the computation (to let one know what operations are the
            most time consuming).
            (default value: False)

        - plot (bool): 
            if True, shows the points that were kept, and the computed g(r). 
            (default value: False).
            
        - full_output (bool):
            if True, the function also returns also "raw" distribution of 
            distances between the points PDF(r), the new array of coordinates 
            with only the points that were considered for computing g(r), the 
            distance of each point to the closest boundary of the area of 
            interest, the normalization factors, and the estimated density of 
            points in the area of interest.
            (default value: False). 
    
    Outputs:
    
        - g(r): a 1x(M-1) numpy array (where M is the length of bin_distances)
        - r: a 1x(M-1) numpy array (where M is the length of bin_distances)
    
    Optional output:
        
        - PDF(r): a 1x(M-1) numpy array
        - new_array_positions: a 2xN numpy array
        - distance_to_boundary: a 1xN numpy array
        - normalization_factor: a Nx(M-1) numpy array
        - estimated_density: a float
        
        (where N in the number of points in the area of interest and M is the 
         length of bin_distances)
    
    """

    if show_timing == True:
        t0 = time.time()

    if coord_border is None:
        positions_of_interest = MultiPoint(
            array_positions)  #all the points are considered
        area_of_interest = positions_of_interest.convex_hull  #the boundary is the convex_hull of all the points
    else:
        if coord_holes is None:
            area_of_interest = Polygon(
                shell=coord_border)  #definition of the boundary
        else:
            area_of_interest = Polygon(
                shell=coord_border,
                holes=coord_holes)  #definition of the boundary

        if not area_of_interest.is_valid:
            print(
                'The list of coordinates your provided for the border is not valid (see help for the definition of valid coord_border).'
            )
            return

        positions_of_interest = area_of_interest.intersection(
            MultiPoint(array_positions)
        )  #only the points inside the area of interest are considered
        array_positions = np.array(
            positions_of_interest
        )  #redefinition of "array_positions" with only the points inside the area of interest (time consuming operation)

    if show_timing == True:
        t1 = time.time() - t0
        print(
            'Creating boundary polygon and array of points inside took %f s' %
            t1)

    nb_part = len(array_positions)  #number of particles
    densite = nb_part / (area_of_interest.area)  #average density of particles
    border_area = area_of_interest.boundary  #the boundary (line) of the area of interest (polygon)

    rings = [[] for i in range(len(bins_distances) - 1)]
    ring_area = np.zeros(len(bins_distances) - 1)  #true ring areas
    ring_area_approx = np.zeros(
        len(bins_distances) -
        1)  #approximate ring areas (useful for normalization calculation)

    #For each distance bin, defines the ring (difference between the disk of radius r[jj+1] and the disk of radius r[jj])
    #and computes the area of those rings.
    for jj in range(len(bins_distances) - 1):

        inner_circle = Point([0, 0]).buffer(bins_distances[jj])
        outer_circle = Point([0, 0]).buffer(bins_distances[jj + 1])
        rings[jj] = outer_circle.difference(inner_circle)

        ring_area_approx[jj] = rings[jj].area
        ring_area[jj] = np.pi * (bins_distances[jj + 1]**2 -
                                 bins_distances[jj]**2)

    if show_timing == True:
        t2 = time.time() - t0
        print('Creating all ring polygons took %f s' % (t2 - t1))

    g_of_r = np.zeros(len(bins_distances) - 1)
    g_of_r_normalized = np.zeros(len(bins_distances) - 1)

    #For each point, computes its distance to the boundary, and for each bin computes the normalization factor
    #(the area of "the intersection of the ring and the area of interest", divided by the ring area)
    normalisation = np.ones(
        (nb_part, len(bins_distances) -
         1))  #normalization factors to take account of the boundaries
    dist_to_border = np.zeros(nb_part)

    if fast_method == True:

        for ii in range(nb_part):
            dist_to_border[ii] = positions_of_interest[ii].distance(
                border_area)  #distance of current point to boundary

        far_enough = np.where(dist_to_border > bins_distances[-1])[
            0]  #indexes of points far enough from the boundary
        array_positions = array_positions[
            far_enough, :]  #points too close to the boundary are excluded
        nb_part = len(array_positions)  #the new number of points

        if full_output == True:
            dist_to_border = dist_to_border[
                far_enough]  #points too close to the boundary are excluded
            normalisation = np.ones(
                (nb_part, len(bins_distances) -
                 1))  #just so that the matrix has the right size

    else:

        for ii in range(nb_part):
            dist_to_border[ii] = positions_of_interest[ii].distance(
                border_area)  #distance of point ii to boundary

            if dist_to_border[ii] <= bins_distances[
                    0]:  #special case the point is too close to the boundary for every r
                for jj in range(len(bins_distances) - 1):
                    normalisation[ii, jj] = (area_of_interest.intersection(
                        translate(rings[jj],
                                  xoff=positions_of_interest[ii].xy[0][0],
                                  yoff=positions_of_interest[ii].xy[1]
                                  [0])).area) / ring_area_approx[jj]
            else:
                for jj in (
                        np.where(bins_distances > dist_to_border[ii])[0] - 1
                ):  #the normalization factor needs only to be computed for a subset of r
                    normalisation[ii, jj] = (area_of_interest.intersection(
                        translate(rings[jj],
                                  xoff=positions_of_interest[ii].xy[0][0],
                                  yoff=positions_of_interest[ii].xy[1]
                                  [0])).area) / ring_area_approx[jj]

    if show_timing == True:
        t3 = time.time() - t0
        print('Computing normalization factors took %f s' % (t3 - t2))

    #For each point, computes the distance to others, then compute the g(r) by binning
    for ii in range(nb_part):
        #coordinates of the current point
        x_loc = array_positions[ii, 0]
        y_loc = array_positions[ii, 1]

        dist_to_loc = np.sqrt(
            (array_positions[:, 0] - x_loc)**2 +
            (array_positions[:, 1] - y_loc)**
            2)  #distance of the current point to each other point
        dist_to_loc[
            ii] = np.inf  #the distance from a point to itself is always zero (it is therefore excluded from the computation)

        g_of_r = g_of_r + np.histogram(dist_to_loc, bins=bins_distances)[
            0]  #computes the histogram of distances to the current point
        g_of_r_normalized = g_of_r_normalized + np.histogram(
            dist_to_loc, bins=bins_distances
        )[0] / (
            ring_area * normalisation[ii, :]
        )  #computes the histogram of distances to the current point normalized by the area of the intersection of the ring and the area of interest

    if show_timing == True:
        t4 = time.time() - t0
        print('Computing g(r) took %f s' % (t4 - t3))

    g_of_r = g_of_r / nb_part  #computes PDF(r)
    g_of_r_normalized = g_of_r_normalized / (nb_part * densite)  #computes g(r)

    radii = (bins_distances[1::] +
             bins_distances[0:-1]) / 2  #computes the values of "r"

    if plot == True:
        plt.figure()
        plt.scatter(array_positions[:, 0], array_positions[:, 1])
        plt.xlabel('x')
        plt.ylabel('y')
        plt.title('Points kept to compute g(r)')

        plt.figure()
        plt.plot(radii, g_of_r_normalized)
        plt.xlabel('r')
        plt.ylabel('g(r)')
        plt.title('Radial Distribution Function')

    if full_output == True:
        results = (g_of_r_normalized, radii, g_of_r, array_positions,
                   dist_to_border, normalisation, densite)
    else:
        results = (g_of_r_normalized, radii)

    if show_timing == True:
        t5 = time.time() - t0
        print('Total time: %f s for %i points ' % (t5, nb_part))

    return results
コード例 #25
0
    def water(self, origin, res, raster, tin, extents, stage):
        """ Function that flattens the water bodies that are present within the specified raster. Uses all local
        polygons in shapefile format that are available in the specified folder in the config, theoretically not limited
        to water. Retrieves the polygons within the bounding box of the raster to interpolate the median value for this
        polygon. Then overlays these values in the correct position in the raster to create flattened areas on the
        raster.

        :param origin: List containing the coordinates of the top left corner of the raster
        :param res: List containing the x and y resolution of the raster
        :param raster: Numpy array containing the content of the raster (x, y, z)
        :param tin: startin.DT() object containing all relevant LAS points for interpolating values of polygons
        :param extents: List containing the extents of the raster as [[minx, maxx], [miny, maxy]]
        :return: Numpy array containing raster with flattened areas where polygons were found
        """
        print('\n{0}: Starting to flatten water bodies'.format(
            multiprocessing.current_process().name, ))

        x0 = extents[0][0]
        x1 = extents[0][1]
        y0 = extents[1][0]
        y1 = extents[1][1]

        bbox = [[x0, x1], [y0, y1]]

        input_vectors = []

        for polygon in self._polygons:
            vec = vector_prepare(bbox=bbox, filepath=polygon)
            if len(vec) != 0:
                input_vectors.append(vec)

        if stage == Stages.INTERPOLATED_DTM:  # Only flatten buildings if it's DTM
            try:
                vec = wfs_prepare(bbox=bbox,
                                  url=self._wfs_url[0],
                                  layer=self._wfs_url[1])

                if len(vec) != 0:
                    input_vectors.append(vec)

            except:  # WFS server might be down
                pass

        if len(input_vectors) > 0 and tin is not None:
            xs = np.linspace(x0, x1, res[0])
            ys = np.linspace(y0, y1, res[1])
            xg, yg = np.meshgrid(xs, ys)

            cell_centers = np.vstack(
                (xg.ravel(), yg.ravel(), raster.ravel())).transpose()

            data = cell_centers[cell_centers[:, 2] != NO_DATA]
            data_hull = MultiPoint(data).convex_hull

            shapes = []

            for polygons in input_vectors:
                for polygon in polygons:

                    els = []

                    for vertex in polygon.exterior.coords:

                        if Point(vertex).within(data_hull):
                            try:
                                els += [
                                    tin.interpolate_laplace(
                                        vertex[0], vertex[1])
                                ]

                            except OSError:  # Apparently we can sometimes still be outside CH
                                pass

                    for interior in polygon.interiors:
                        for vertex in interior.coords:

                            if Point(vertex).within(data_hull):
                                try:
                                    els += [
                                        tin.interpolate_laplace(
                                            vertex[0], vertex[1])
                                    ]

                                except OSError:  # Apparently we can sometimes still be outside CH
                                    pass

                    if len(els) > 0:
                        shapes.append((polygon, np.median(els)))

            if len(shapes) > 0:
                transform = rasterio.transform.from_origin(
                    west=origin[0],
                    north=origin[1],
                    xsize=self._raster_cell_size,
                    ysize=self._raster_cell_size)

                raster_polygons = rasterize(shapes=shapes,
                                            out_shape=raster.shape,
                                            fill=NO_DATA,
                                            transform=transform)

                for yi in range(res[1]):
                    for xi in range(res[0]):
                        if raster_polygons[yi, xi] != NO_DATA:
                            raster[yi, xi] = raster_polygons[yi, xi]

        return raster
コード例 #26
0
hull_attacker = ConvexHull(u1_pos)
hull_attacker = Polygon(
    us_pos[hull_attacker.vertices]).minimum_rotated_rectangle.boundary

u2_pos = np.array((u2.get_soldiers_pos(False)))
hull_defender = ConvexHull(u2_pos)
hull_defender = Polygon(u2_pos[hull_defender.vertices])

nps = nearest_points(hull_attacker, hull_defender)

plt.scatter(*np.array(u1.get_soldiers_pos(False)).T, color="green", alpha=0.5)
plt.scatter(*np.array(u2.get_soldiers_pos(False)).T, color="red", alpha=0.5)
plt.scatter(nps[0].x, nps[0].y, color="green")
plt.scatter(nps[1].x, nps[1].y, color="red")

u1_pos = MultiPoint(u2.get_soldiers_pos(False))
u2_pos = MultiPoint(u1.get_soldiers_pos(False))

u2_rect = u2_pos.minimum_rotated_rectangle.boundary

nps = nearest_points(u1_pos, u2_rect)

x0, y0 = u2_rect.coords.xy
u2_rect = LineString([(nps[1].x, nps[1].y)] +
                     [(x, y) for x, y in zip(x0[:-1], y0[:-1])])

num_points0 = 10
new_points = [u2_rect.interpolate(i * size) for i in range(num_points0)]
MultiPoint(new_points + u2.get_soldiers_pos(False))

plt.scatter(*np.array(u1.get_soldiers_pos(False)).T, color="green", alpha=0.5)
コード例 #27
0
 def enveloppe_convexe(self):
     multipt = MultiPoint(self.points)
     return (multipt.convex_hull)
コード例 #28
0
def get_centermost_point(cluster):
    centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
    centermost_point = min(cluster,
                           key=lambda point: great_circle(point, centroid).m)
    return tuple(centermost_point)
コード例 #29
0
ファイル: _voronoi.py プロジェクト: ajitjohnson/scimap
def voronoi(adata,
            color_by=None,
            colors=None,
            x_coordinate='X_centroid',
            y_coordinate='Y_centroid',
            imageid='imageid',
            subset=None,
            x_lim=None,
            y_lim=None,
            voronoi_edge_color='black',
            voronoi_line_width=0.1,
            voronoi_alpha=0.5,
            size_max=np.inf,
            overlay_points=None,
            overlay_points_categories=None,
            overlay_drop_categories=None,
            overlay_points_colors=None,
            overlay_point_size=5,
            overlay_point_alpha=1,
            overlay_point_shape=".",
            plot_legend=True,
            legend_size=6,
            **kwargs):
    """
Parameters:
    adata : Anndata object

    color_by : string, optional  
        Color the voronoi diagram based on categorical variable (e.g. cell types or neighbourhoods).
        Pass the name of the column which contains the categorical variable.

    colors : string or Dict, optional  
        Custom coloring the voronoi diagram. The parameter accepts `sns color palettes` or a python dictionary
        mapping the categorical variable with the required color.

    x_coordinate : float, required  
        Column name containing the x-coordinates values.

    y_coordinate : float, required  
        Column name containing the y-coordinates values.

    imageid : string, optional  
        Column name of the column containing the image id.

    subset : string, optional  
        imageid of a single image to be subsetted for plotting.

    voronoi_edge_color : string, optional  
        A Matplotlib color for marking the edges of the voronoi. 
        If `facecolor` is passed, the edge color will always be the same as the face color.

    voronoi_line_width : float, optional  
        The linewidth of the marker edges. Note: The default edgecolors is 'face'. You may want to change this as well. 

    voronoi_alpha : float, optional  
        The alpha blending value, between 0 (transparent) and 1 (opaque).

    x_lim : list, optional  
        Pass the x-coordinates range [x1,x2].

    y_lim : list, optional  
        Pass the y-coordinates range [y1,y2].

    overlay_points : string, optional  
        It is possible to overlay a scatter plot on top of the voronoi diagram.
        Pass the name of the column which contains categorical variable to be overlayed.

    overlay_points_categories : list, optional  
        If the passed column in `overlay_points` contains multiple categories, however the user is only
        interested in a subset of categories, those specific names can be passed as a list. By default all 
        categories will be overlayed on the voronoi diagram.

    overlay_drop_categories : list, optional  
        Similar to `overlay_points_categories`. Here for ease of use, especially if large number of categories are present.
        The user can drop a set of categories.

    overlay_points_colors : string or dict, optional  
        Similar to `colors`.  
        User can pass in a  
        a) solid color (like `black`)  
        b) sns palettes name (like `Set1`)  
        c) python dictionary mapping the categories with custom colors

    overlay_point_size : float, optional  
        Overlay scatter plot point size.

    overlay_point_alpha : float, optional  
        The alpha blending value for the overlay, between 0 (transparent) and 1 (opaque).

    overlay_point_shape : string, optional  
        The marker style. marker can be either an instance of the class or the text shorthand for a particular marker.

    plot_legend : bool, optional  
        Define if the figure legend should be plotted.  
        Please note the figure legend may be out of view and you may need to resize the image to see it, especially 
        the legend for the scatter plot which will be on the left side of the plot.

    legend_size : float, optional  
        Resize the legend if needed.

Example:
    
    ```python
    sm.pl.voronoi(adata, color_by='phenotype', colors=None, 
             x_coordinate='X_position', y_coordinate='Y_position',
             imageid='ImageId',subset=None,
             voronoi_edge_color = 'black',voronoi_line_width = 0.2, 
             voronoi_alpha = 0.5, size_max=np.inf,
             overlay_points='phenotype', overlay_points_categories=None, 
             overlay_drop_categories=None,
             overlay_point_size = 5, overlay_point_alpha= 1, 
             overlay_point_shape=".", plot_legend=False, legend_size=6)
    
    ```

    """

    # create the data frame needed
    data = adata.obs

    # Subset the image of interest
    if subset is not None:
        data = data[data[imageid] == subset]

    # subset coordinates if needed
    if x_lim is not None:
        x1 = x_lim[0]
        if len(x_lim) < 2:
            x2 = max(data[x_coordinate])
        else:
            x2 = x_lim[1]
    if y_lim is not None:
        y1 = y_lim[0]
        if len(y_lim) < 2:
            y2 = min(data[y_coordinate])
        else:
            y2 = y_lim[1]
    # do the actuall subsetting
    if x_lim is not None:
        data = data[data[x_coordinate] >= x1]
        data = data[data[x_coordinate] <= x2]
    if y_lim is not None:
        data = data[data[y_coordinate] <= y1]
        data = data[data[y_coordinate] >= y2]

    # create an extra column with index information
    data['index_info'] = np.arange(data.shape[0])

    # generate the x and y coordinates
    points = data[[x_coordinate, y_coordinate]].values

    # invert the Y-axis
    points[:, 1] = max(points[:, 1]) - points[:, 1]

    # Generate colors
    if color_by is None:
        colors = np.repeat('#e5e5e5', len(data))
#    elif color_by is None and colors is not None:
#        if isinstance(colors,str):
#            colors = np.repeat(colors, len(data))
    elif color_by is not None and colors is None:
        # auto color the samples
        if len(np.unique(data[color_by])) <= 9:
            c = sns.color_palette('Set1')[0:len(np.unique(data[color_by]))]
        if len(np.unique(data[color_by])) > 9 and len(np.unique(
                data[color_by])) <= 20:
            c = sns.color_palette('tab20')[0:len(np.unique(data[color_by]))]
        if len(np.unique(data[color_by])) > 20:
            # For large categories generate random colors
            np.random.seed(0)
            c = np.random.rand(len(np.unique(data[color_by])), 3).tolist()
        # merge colors with phenotypes/ categories of interest
        p = np.unique(data[color_by])
        c_p = dict(zip(p, c))
        # map to colors
        colors = list(map(c_p.get, list(data[color_by].values)))
    elif color_by is not None and colors is not None:
        # check if colors is a dictionary or a sns color scale
        if isinstance(colors, str):
            if len(sns.color_palette(colors)) < len(np.unique(data[color_by])):
                raise ValueError(
                    str(colors) + ' includes a maximun of ' +
                    str(len(sns.color_palette(colors))) +
                    ' colors, while your data need ' +
                    str(len(np.unique(data[color_by]))) + ' colors')
            else:
                c = sns.color_palette(colors)[0:len(np.unique(data[color_by]))]
                # merge colors with phenotypes/ categories of interest
                p = np.unique(data[color_by])
                c_p = dict(zip(p, c))
        if isinstance(colors, dict):
            if len(colors) < len(np.unique(data[color_by])):
                raise ValueError(
                    'Color mapping is not provided for all categories. Please check'
                )
            else:
                c_p = colors
        # map to colors
        colors = list(map(c_p.get, list(data[color_by].values)))

    # create the voronoi object
    vor = Voronoi(points)

    # trim the object
    regions, vertices = voronoi_finite_polygons_2d(vor)

    # plotting
    pts = MultiPoint([Point(i) for i in points])
    mask = pts.convex_hull
    new_vertices = []
    if type(voronoi_alpha) != list:
        voronoi_alpha = [voronoi_alpha] * len(points)
    areas = []
    for i, (region, alph) in enumerate(zip(regions, voronoi_alpha)):
        polygon = vertices[region]
        shape = list(polygon.shape)
        shape[0] += 1
        p = Polygon(np.append(polygon,
                              polygon[0]).reshape(*shape)).intersection(mask)
        areas += [p.area]
        if p.area < size_max:
            poly = np.array(
                list(
                    zip(p.boundary.coords.xy[0][:-1],
                        p.boundary.coords.xy[1][:-1])))
            new_vertices.append(poly)
            if voronoi_edge_color == 'facecolor':
                plt.fill(*zip(*poly),
                         alpha=alph,
                         edgecolor=colors[i],
                         linewidth=voronoi_line_width,
                         facecolor=colors[i])
                plt.xticks([])
                plt.yticks([])
            else:
                plt.fill(*zip(*poly),
                         alpha=alph,
                         edgecolor=voronoi_edge_color,
                         linewidth=voronoi_line_width,
                         facecolor=colors[i])
                plt.xticks([])
                plt.yticks([])
                #plt.xlim([1097.5,1414.5])
                #plt.ylim([167.3,464.1])

    # Add scatter on top of the voronoi if user requests
    if overlay_points is not None:
        if overlay_points_categories is None:
            d = data
        if overlay_points_categories is not None:
            # convert to list if needed (cells to keep)
            if isinstance(overlay_points_categories, str):
                overlay_points_categories = [overlay_points_categories]
            # subset cells needed
            d = data[data[overlay_points].isin(overlay_points_categories)]
        if overlay_drop_categories is not None:
            # conver to list if needed (cells to drop)
            if isinstance(overlay_drop_categories, str):
                overlay_drop_categories = [overlay_drop_categories]
            # subset cells needed
            d = d[-d[overlay_points].isin(overlay_drop_categories)]

        # Find the x and y coordinates for the overlay category
        #points_scatter = d[[x_coordinate,y_coordinate]].values
        points_scatter = points[d.index_info.values]

        # invert the Y-axis
        #points_scatter[:,1] = max(points_scatter[:,1])-points_scatter[:,1]

        # Generate colors for the scatter plot
        if overlay_points_colors is None and color_by == overlay_points:
            # Borrow color from vornoi
            wanted_keys = np.unique(d[overlay_points])  # The keys to extract
            c_p_scatter = dict((k, c_p[k]) for k in wanted_keys if k in c_p)
        elif overlay_points_colors is None and color_by != overlay_points:
            # Randomly generate colors for all the categories in scatter plot
            # auto color the samples
            if len(np.unique(d[overlay_points])) <= 9:
                c_scatter = sns.color_palette(
                    'Set1')[0:len(np.unique(d[overlay_points]))]
            if len(np.unique(d[overlay_points])) > 9 and len(
                    np.unique(d[overlay_points])) <= 20:
                c_scatter = sns.color_palette(
                    'tab20')[0:len(np.unique(d[overlay_points]))]
            if len(np.unique(d[overlay_points])) > 20:
                # For large categories generate random colors
                np.random.seed(1)
                c_scatter = np.random.rand(len(np.unique(d[overlay_points])),
                                           3).tolist()
            # merge colors with phenotypes/ categories of interest
            p_scatter = np.unique(d[overlay_points])
            c_p_scatter = dict(zip(p_scatter, c_scatter))
        elif overlay_points_colors is not None:
            # check if the overlay_points_colors is a pallete
            if isinstance(overlay_points_colors, str):
                try:
                    c_scatter = sns.color_palette(overlay_points_colors)[
                        0:len(np.unique(d[overlay_points]))]
                    if len(sns.color_palette(overlay_points_colors)) < len(
                            np.unique(d[overlay_points])):
                        raise ValueError(
                            str(overlay_points_colors) +
                            ' pallete includes a maximun of ' +
                            str(len(sns.color_palette(
                                overlay_points_colors))) +
                            ' colors, while your data (overlay_points_colors) need '
                            + str(len(np.unique(d[overlay_points]))) +
                            ' colors')
                except:
                    c_scatter = np.repeat(
                        overlay_points_colors, len(np.unique(
                            d[overlay_points])))  #[overlay_points_colors]
                # create a dict
                p_scatter = np.unique(d[overlay_points])
                c_p_scatter = dict(zip(p_scatter, c_scatter))
            if isinstance(overlay_points_colors, dict):
                if len(overlay_points_colors) < len(
                        np.unique(d[overlay_points])):
                    raise ValueError(
                        'Color mapping is not provided for all categories. Please check overlay_points_colors'
                    )
                else:
                    c_p_scatter = overlay_points_colors
        # map to colors
        colors_scatter = list(
            map(c_p_scatter.get, list(d[overlay_points].values)))

        #plt.scatter(x = points_scatter[:,0], y = points_scatter[:,1], s= overlay_point_size, alpha= overlay_point_alpha, c= colors_scatter, marker=overlay_point_shape)
        plt.scatter(x=points_scatter[:, 0],
                    y=points_scatter[:, 1],
                    s=overlay_point_size,
                    alpha=overlay_point_alpha,
                    c=colors_scatter,
                    marker=overlay_point_shape,
                    **kwargs)
        plt.xticks([])
        plt.yticks([])

    if plot_legend is True:
        # Add legend to voronoi
        patchList = []
        for key in c_p:
            data_key = mpatches.Patch(color=c_p[key], label=key)
            patchList.append(data_key)

        first_legend = plt.legend(handles=patchList,
                                  bbox_to_anchor=(1.05, 1),
                                  loc=2,
                                  borderaxespad=0.,
                                  prop={'size': legend_size})
        plt.tight_layout()
        # Add the legend manually to the current Axes.
        ax = plt.gca().add_artist(first_legend)

        if overlay_points is not None:
            # Add legend to scatter
            patchList_scatter = []
            for key in c_p_scatter:
                data_key_scatter = mpatches.Patch(color=c_p_scatter[key],
                                                  label=key)
                patchList_scatter.append(data_key_scatter)

            plt.legend(handles=patchList_scatter,
                       bbox_to_anchor=(-0.05, 1),
                       loc=1,
                       borderaxespad=0.,
                       prop={'size': legend_size})
コード例 #30
0
logging.basicConfig(filename='/data/home/faw513/tokunaga-workflow/log.log',
                    level=logging.INFO)

logging.info('entering loop')

for i, filename in enumerate(file_list):

    logging.info('File {} of {}'.format(i + 1, len(file_list)))

    points = []

    with open(filename) as csvfile:
        reader = csv.reader(csvfile, delimiter=',')
        next(reader)
        for row in reader:
            points.append(Point(float(row[1]), float(row[0])))

    concave_hull, edge_points = alpha_shape(MultiPoint(points), alpha=10.5)

    stats = zonal_stats(concave_hull,
                        '/data/Geog-c2s2/CHELSA_bio10_12.tif',
                        stats="mean")

    toku_id = filename.split('toku_network_')[1][:-4]

    precips[toku_id] = stats[0]['mean']

logging.info('writing json')
with open('/data/Geog-c2s2/toku/toku-data.json', 'w') as outfile:
    json.dump(precips, outfile)
コード例 #31
0
ファイル: citibike.py プロジェクト: AntArch/SciPy2013
jsonfile = os.path.join(datadir, 'manhattan_island_proj.json')
citibikefile = os.path.join(datadir, 'citibike.json')
manhattan = shape(geojson.load(open(jsonfile)))
man_arr = np.asarray(manhattan.exterior)

# load citbike station locations, transform to map coordinates,
# and filter for only those in Manhattan
c = json.load(open(citibikefile))
stations = [(x['longitude'], x['latitude']) for x in c['stationBeanList']]
lon, lat = zip(*stations)
nyp = Proj('+datum=NAD83 +lat_0=40.1666666667 +lat_1=40.6666666667 '
           '+lat_2=41.0333333333 +lon_0=-74 +no_defs +proj=lcc +units=us-ft '
           '+x_0=300000 +y_0', preserve_units=True)
wgs84 = Proj(init='epsg:4326')
x, y = transform(wgs84, nyp, lon, lat)
points = MultiPoint(zip(x, y))
points = MultiPoint([p for p in points.geoms if manhattan.contains(p)])
pt_arr = np.asarray(points)

# make a small buffer that aproximates 59th Street
mp = MultiPoint([Point([978887, 224975]), Point([1009023, 207566])])
s59 = LineString(mp).buffer(0.5)
sp = manhattan.difference(s59)
lower_manhattan = sp.geoms[1]
man_arr = np.asarray(manhattan.exterior)
# TODO: calculate area fractions below 59th Street

# draw buffers around bike stations with 1, 2, and 3 block radius
block = 260 # Manhattan city block (feet)
buffer = points.buffer(1 * block)
one_block = buffer.intersection(manhattan)
コード例 #32
0
    def plot_formatted_shapefile_with_locations(self, list_of_new_points):
        """
		Plot both existing and recommended office locations on formatted State shapefile.

		Parameters
		----------
		list_of_new_points : list
			List of Longtitude & latitude for location recommendations.
		"""
        fig = plt.figure()
        ax = fig.add_subplot(111, axisbg='w', frame_on=False)

        dev = self._state_shapefile.scatter(
            [geom.x for geom in self._state_points],
            [geom.y for geom in self._state_points],
            20,
            marker='o',
            lw=.25,
            facecolor='#33ccff',
            edgecolor='w',
            alpha=0.9,
            antialiased=True,
            label='Current Office locations',
            zorder=3)


        map_points = pd.Series([Point(self._state_shapefile(mapped_x, mapped_y)) for mapped_x, mapped_y in \
           zip(list_of_new_points['Long'], list_of_new_points['Lat'])])
        _new_state_points = MultiPoint(list(map_points.values))
        dev = self._state_shapefile.scatter(
            [geom.x for geom in _new_state_points],
            [geom.y for geom in _new_state_points],
            20,
            marker='o',
            lw=.25,
            facecolor='#41ff16',
            edgecolor='w',
            alpha=0.9,
            antialiased=True,
            zorder=3,
            label='Recommendations')

        # plot office locations by adding the PatchCollection to the axes instance
        ax.add_collection(
            PatchCollection(self._df_map['districts'].values,
                            match_original=True))

        # Draw a map scale
        self._state_shapefile.drawmapscale(self._coords[0] + 0.08,
                                           self._coords[1] + 0.015,
                                           self._coords[0],
                                           self._coords[1],
                                           10.,
                                           barstyle='fancy',
                                           labelstyle='simple',
                                           fillcolor1='w',
                                           fillcolor2='#555555',
                                           fontcolor='#555555',
                                           zorder=5)

        plt.title("Shubham Housing Finance Office Locations, {State}".format(
            State=self._state))
        plt.tight_layout()
        plt.legend(loc='best')

        fig.set_size_inches(10, 10)
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')

        plt.savefig('data/{State}_visualized.png'.format(State=self._state),
                    dpi=100,
                    alpha=True)
        plt.show()

        pass
コード例 #33
0
def filter_stations(area_of_influence, bounds, stations):
    total_points = MultiPoint(
        [Point(x, y) for x, y in stations[['X', 'Y']].to_numpy()])
    intersection = bounds.buffer(area_of_influence).intersection(total_points)
    return stations[[intersection.contains(point) for point in total_points]]
コード例 #34
0
ファイル: ffncg.py プロジェクト: GarfieldEr007/CRCPython
def main():  
    gdal.AllRegister()
    path = auxil.select_directory('Choose input directory')
    if path:
        os.chdir(path)        
#  input image    
    infile = auxil.select_infile(title='Choose image file') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)     
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)      
    else:
        return  
    pos =  auxil.select_pos(bands)  
    if not pos:
        return
    N = len(pos) 
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b)) 
#  training data (shapefile)      
    trnfile = auxil.select_infile(filt='.shp',title='Choose train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile,0)
        trnLayer = trnDatasource.GetLayer() 
        trnsr = trnLayer.GetSpatialRef()             
    else:
        return
#  hidden neurons
    L = auxil.select_integer(8,'number of hidden neurons')    
    if not L:
        return
#  outfile
    outfile, fmt = auxil.select_outfilefmt()   
    if not outfile:
        return     
#  coordinate transformation from training to image projection   
    ct= osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K = int(classid)+1       
    print '========================='
    print '       ffncg'
    print '========================='
    print time.asctime()    
    print 'image:    '+infile
    print 'training: '+trnfile          
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = feature.GetField('CLASS_ID')
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1]
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  setup output dataset 
    driver = gdal.GetDriverByName(fmt)    
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
#  train on 9/10 training examples         
    Gstrn = Gs[0:9*m//10,:]
    lstrn = ls[0:9*m//10,:]
    affn = Ffncg(Gstrn,lstrn,L)
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    cost = affn.train(epochs=epochs)
    print 'elapsed time %s' %str(time.time()-start) 
    if cost is not None:
#        cost = np.log10(cost)  
        ymax = np.max(cost)
        ymin = np.min(cost) 
        xmax = len(cost)      
        plt.plot(range(xmax),cost,'k')
        plt.axis([0,xmax,ymin-1,ymax])
        plt.title('Cross entropy')
        plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        tile = np.zeros((cols,N))    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
            cls, _ = affn.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
        outBand.FlushCache()
        outDataset = None
        inDataset = None  
        print 'thematic map written to: ' + outfile
        print 'please close the cross entropy plot to continue'
        plt.show()
    else:
        print 'an error occured' 
        return 
    
    print 'submitting cross-validation to multyvac'    
    start = time.time()
    jid = mv.submit(traintst,Gs,ls,L,_layer='ms_image_analysis')  
    print 'submission time: %s' %str(time.time()-start)
    start = time.time()    
    job = mv.get(jid)
    result = job.get_result(job) 
    
    
    print 'execution time: %s' %str(time.time()-start)      
    print 'misclassification rate: %f' %np.mean(result)
    print 'standard deviation:     %f' %np.std(result)         
    print '--------done---------------------'       
コード例 #35
0
    def data_extract(self):

        list_img = os.listdir(self.input_slide_dir)
        list_obj_mask = os.listdir(self.obj_dir)
        list_obj_Tissue = os.listdir(self.tissue_mask_dir)
        list_img = sorted(list_img, key=natural_key)
        list_obj_mask = sorted(list_obj_mask, key=natural_key)
        list_obj_Tissue = sorted(list_obj_Tissue, key=natural_key)

        voronoi_dir = 'voronoi'

        if os.path.exists(os.path.join(self.output_dir, voronoi_dir)) is False:
            os.makedirs(os.path.join(self.output_dir, voronoi_dir))

        for p1 in range(len(list_img)):
            file_name = list_img[p1]
            img = cv2.imread(os.path.join(self.input_slide_dir, list_img[p1]))
            img_orig = img.copy()

            file1 = os.path.join(self.obj_dir, list_obj_mask[p1])
            file2 = os.path.join(self.tissue_mask_dir, list_obj_Tissue[p1])

            x1 = []
            y1 = []
            points11 = []
            x1, y1 = centroids1(file1)

            for i in range(0, len(x1)):
                points11.append([x1[i], y1[i]])

            points = np.array(points11)

            vor = Voronoi(points)

            regions, vertices = voronoi_finite_polygons_2d(vor)
            cnts_img_t = Contours(file2)
            point_t = []

            for c1 in cnts_img_t:

                point_t.append([c1[0][0], c1[0][1]])
            point_t = np.array(point_t)

            pts = MultiPoint([Point(i) for i in point_t])
            mask = pts.convex_hull
            print("mask=", mask.bounds)

            new_vertices = []
            a = 0
            for region in regions:
                print("a=", a)
                a = a + 1
                polygon = vertices[region]
                shape = list(polygon.shape)
                shape[0] += 1
                p = Polygon(np.append(
                    polygon, polygon[0]).reshape(*shape)).intersection(mask)
                print("p=", p.bounds)
                print("lk=", int(p.length))
                l1 = int(p.length)
                if (l1 > 0):
                    poly = np.array(
                        list(
                            zip(p.boundary.coords.xy[0][:-1],
                                p.boundary.coords.xy[1][:-1])))
                    new_vertices.append(poly)
                    new_vertices.append(poly)

            for p1 in new_vertices:

                pts = np.array(p1, np.int32)
                pts = pts.reshape((-1, 1, 2))

                cv2.polylines(img, [pts], True, (0, 0, 0), 3)
            for p1 in points11:

                cv2.circle(img, (p1[0], p1[1]), 13, (0, 255, 0), cv2.FILLED,
                           cv2.LINE_AA, 0)
                #
            cv2.imwrite(os.path.join(self.output_dir, voronoi_dir, file_name),
                        img)

            new_vert = []
            for i in range(len(new_vertices)):
                new_vert.append(new_vertices[i].tolist())

            write_voronoi_detail(self.output_dir, file_name, new_vert)
コード例 #36
0
def main():
    import argparse

    parser=argparse.ArgumentParser("Build LINZ binary format reverse patch definition file")
    parser.add_argument('model_dir',help="Model directory")
    parser.add_argument('build_dir',help="Patch build directory")
    parser.add_argument('patch_name',nargs="?",help="Patch file name")
    parser.add_argument('--version',help="Deformation model for which patch applies, default is current version")
    parser.add_argument('--ordinates',choices=('3d','horizontal','vertical'),default='3d',help="Ordinates required")
    parser.add_argument('--extents-tolerance',type=float,default=0.0,help='Minimum change to include in extents')
    parser.add_argument('--extents-buffer',type=float,default=2000.0,help='Buffer for extents')
    parser.add_argument('--extents-simplification',type=float,default=1000.0,help='Simplification for extents')
    

    args=parser.parse_args()
    modeldir=args.model_dir
    builddir=args.build_dir
    format='linzdef'

    if not os.path.isdir(builddir):
        raise RuntimeError('Build directory {0} does not exist or is not a directory'
                           .format(builddir))

    extents_tolerance=args.extents_tolerance
    extents_buffer=args.extents_buffer
    extents_simplification=args.extents_simplification

    model=Model(modeldir)
    version=args.version
    if version is None:
        version=model.version()
    datumcode=model.metadata('datum_code')
    modelname=model.metadata('model_name')


    patchname=args.patch_name
    if patchname is None:
        patchname='{0}_patch{2}_{1}'.format(datumcode,version,
                        '' if args.ordinates=='3d' else '_'+args.ordinates[:1].upper())

    deffile=os.path.join(builddir,patchname+'.def')
    binfile=os.path.join(builddir,patchname+'.bin')
    affectedfile=os.path.join(builddir,patchname+'.extents.wkt')
    gdfname=os.path.join(builddir,patchname+'_g{0:03d}.gdf')

    ngrid=0
    ncomp=0

    with open(deffile,'w') as deff:
        deff.write(patch_header(
            patch_name=modelname,
            patch_version='1.0',
            patch_description=(
                'Model version: '+version + '\n' +
                '\nReverse patch calculated '+
                datetime.today().strftime("%Y-%m-%d"))
            ))

        revcomps=model.reversePatchComponents(args.version)
        extentsfiles=[]
        for factor,c in revcomps:
            ncomp += 1
            spatial=c.spatialModel
            ordinates=output_ordinates.get((args.ordinates,spatial.displacement_type))
            if ordinates is None:
                continue
            gridfiles=[]
            for gm in spatial.models():
                grid=gm.model()
                if type(grid).__name__ != 'Grid':
                    raise RuntimeError('Only grid models handled by build_patch.py')
                gridfiles.append(model.getFileName(grid.gridFile()))

            # Reverse to ensure highest priority is listed first
            for g in reversed(gridfiles):
                ngrid += 1
                gdf=gdfname.format(ngrid)
                wktf=gdf+'.wkt'
                commands=[
                    gridtool,
                    'read','csv',g,
                    'write_linzgrid',datumcode,
                    modelname,
                    'Reverse patch for '+c.name,
                    'Grid file '+os.path.basename(g),
                    'resolution','0.0001',
                    'columns','+'.join(ordinates),
                    gdf,
                    'affected_area',
                    'where','|'+'|'.join(ordinates)+'|','!=',str(extents_tolerance),
                    'noheader',wktf
                    ]
                subprocess.call(commands)
                extentsfiles.append(wktf)
                deff.write(patch_component(
                    grid_file=os.path.basename(gdf),
                    group_id=ncomp,
                    factor=factor,
                    comp_description="{0}: {1}".format(
                        c.name,os.path.basename(g))
                    ))

    subprocess.call([makeshiftpl,'-f','LINZSHIFT2B',deffile,binfile])

    extentspolys=[]
    for wktf in extentsfiles:
        poly=wkt.loads(open(wktf).read())
        extentspolys.append(poly)
    centroid=MultiPoint([p.centroid for p in extentspolys]).centroid
    yscale=100000.0
    xscale=yscale*math.cos(math.radians(centroid.y))
    union=None
    for p in extentspolys:
        g=affinity.scale(p,xfact=xscale,yfact=yscale,origin=centroid)
        g=g.buffer(extents_buffer,resolution=4)
        g=g.simplify(extents_simplification)
        if union is None:
            union=g
        else:
            union=union.union(g)
    union=union.simplify(extents_simplification)
    extents=affinity.scale(union,xfact=1.0/xscale,yfact=1.0/yscale,origin=centroid)
    with open(affectedfile,'w') as wktf:
        wktf.write(extents.wkt)
コード例 #37
0
ファイル: test_tools.py プロジェクト: geopandas/geopandas
 def test_collect_single_force_multi(self):
     result = collect(self.p1, multi=True)
     expected = MultiPoint([self.p1])
     assert expected.equals(result)
コード例 #38
0
ファイル: seismic_network.py プロジェクト: nimanshr/toolbox
 def __init__(self, net_lats, net_lons):
     poly_x, poly_y = pyproj.transform(wgs84, pj_laea, net_lons, net_lats)
     self.polygon = MultiPoint(zip(poly_x, poly_y)).convex_hull
コード例 #39
0
def generate_waypoints_3(
    site: str,
    floor: str,
    known_waypoints: np.ndarray,
    min_distance_to_known: float = 3.0,
    corner_min_distance_to_known: float = 1.05,
    max_distance_to_known: float = 30.0,
    dist_maybe_wall_pt: float = 2.0,
    dist_definitely_wall_pt: float = 0.4,
    corner_radians_slack_upper: float = (pi / 2) * 0.34,
    corner_radians_slack_lower: float = (pi / 2) * 0.34,
    angle_support_dist: float = 1.5,
    max_inner_points: int = 999,
    generate_inner_waypoints: bool = True,
    generate_corner_waypoints: bool = True,
    generate_edge_waypoints: bool = True,
    wall_point_distance_multiplier: float = 0.35,
    inner_point_distance_multiplier: float = 0.7,
) -> Tuple[np.ndarray, np.ndarray]:
  del generate_inner_waypoints
  del generate_corner_waypoints
  del generate_edge_waypoints
  del max_inner_points

  known_waypoints = fuse_neighbors(known_waypoints, threshold=0.8)
  known_waypoints = asMultiPoint(known_waypoints)
  known_waypoints = unary_union(known_waypoints)

  inner_floor, inner_clean, floor_poly, _ = create_floor_polygon(
      site=site,
      floor=floor,
  )
  bndry = floor_poly.boundary
  if isinstance(bndry, MultiLineString):
    outer_poly = MultiPolygon([Polygon(b) for b in bndry])
  else:
    outer_poly = Polygon(floor_poly.boundary)
  outer_poly = unary_union(outer_poly)

  assert outer_poly.is_valid
  assert floor_poly.is_valid
  outer_walls = floor_poly.envelope.difference(outer_poly)
  inner_walls = floor_poly.difference(inner_clean)
  if isinstance(inner_walls, Polygon):
    inner_walls = [inner_walls]
  walls = unary_union([item.buffer(0) for item in inner_walls] +
                      [outer_walls.buffer(0)])
  walls = walls.buffer(0.05)
  assert walls.is_valid
  assert not walls.is_empty

  # plot_polygon(inner_walls, title="inner_walls")
  # plot_polygon(outer_walls, title="outer_walls")
  # plot_polygon(walls, title="walls")

  distance_to_wall = np.array([pt.distance(walls) for pt in known_waypoints])

  maybe_wall_mask = distance_to_wall < dist_maybe_wall_pt
  wall_mask = maybe_wall_mask

  if len(maybe_wall_mask) > 10:
    assert len(maybe_wall_mask[maybe_wall_mask]) / len(maybe_wall_mask) > 0.2

  consider_wall_dist = 0.0
  known_wall_points = np.empty(shape=(0, 2))
  if maybe_wall_mask.any():
    typical_distance_to_wall_global = np.quantile(
        distance_to_wall[maybe_wall_mask], q=0.7)
    consider_wall_dist = max((typical_distance_to_wall_global * 1.5),
                             dist_definitely_wall_pt)
    wall_mask = distance_to_wall < max(
        (typical_distance_to_wall_global * 1.5), dist_definitely_wall_pt)
    known_wall_points = np.array(known_waypoints)[wall_mask]

  dist_mean, dist_std, global_median_distance = local_stats(known_waypoints)

  not_wall_points = (
      np.empty(shape=(0, 2))
      if wall_mask.all() else np.array(known_waypoints)[~wall_mask])

  # plot_polygon(
  #     walls.buffer(typical_distance_to_wall_global),
  #     bounds=floor_poly.bounds,
  #     title=f"inner site: {site} floor: {floor} walls",
  # )
  # plot_polygon(
  #     inner_clean.buffer(-typical_distance_to_wall_global),
  #     bounds=floor_poly.bounds,
  #     title=f"inner site: {site} floor: {floor} inner",
  # )

  (
      to_wall_distances,
      along_wall_distances,
      near_wall_to_rest_distances,
  ) = near_wall_stats(
      polygons=inner_clean,
      known_wall_points=known_wall_points,
      known_waypoints=known_waypoints,
      min_len=2.0,
      maybe_wall_dist=dist_maybe_wall_pt,
  )
  median_to_wall_dist = maybe_median(
      to_wall_distances[to_wall_distances < consider_wall_dist])
  median_between_wall_pts_dist = maybe_median(
      along_wall_distances[(along_wall_distances > 1.1)
                           & (along_wall_distances < 20)])
  median_near_wall_to_rest = maybe_median(
      near_wall_to_rest_distances[(near_wall_to_rest_distances > 1.1)
                                  & (near_wall_to_rest_distances < 10)])

  gen_wall_pts_dist = median_between_wall_pts_dist * wall_point_distance_multiplier
  gen_inner_pts_dist = global_median_distance[
      0] * inner_point_distance_multiplier
  corner_waypoints, wall_waypoints, inner_waypoints = along_wall(
      inner_clean,
      walls=walls,
      known_wall_points=known_wall_points,
      known_waypoints=known_waypoints,
      wall_pts_dist=gen_wall_pts_dist,
      inner_pts_dist=gen_inner_pts_dist,
      to_wall_dist=median_to_wall_dist,
      min_len=gen_wall_pts_dist,
      angle_support_dist=angle_support_dist,
      slack_lower=corner_radians_slack_lower,
      slack_upper=corner_radians_slack_upper,
      consider_wall_dist=consider_wall_dist,
      wall_point_distance_multiplier=wall_point_distance_multiplier,
      inner_point_distance_multiplier=inner_point_distance_multiplier,
  )

  corner_waypoints = unary_union(MultiPoint(corner_waypoints))
  corner_waypoints = filter_inside(corner_waypoints, inner_clean.buffer(0.05))
  corner_waypoints = fuse_neighbors(
      corner_waypoints, threshold=gen_wall_pts_dist * 0.3)

  wall_waypoints = unary_union(MultiPoint(wall_waypoints))
  wall_waypoints = filter_inside(wall_waypoints, inner_clean.buffer(0.05))
  wall_waypoints = fuse_neighbors(
      wall_waypoints, threshold=gen_wall_pts_dist * 0.6)

  inner_waypoints = unary_union(MultiPoint(inner_waypoints))
  inner_waypoints = filter_inside(inner_waypoints, inner_clean.buffer(0.05))

  generated_inner_ratio = len(inner_waypoints) / (len(wall_waypoints) + 1e-9)
  known_inner_ratio = len(not_wall_points) / len(known_wall_points)
  if generated_inner_ratio > (known_inner_ratio * 6) or known_inner_ratio < 0.1:
    inner_waypoints = np.empty(shape=(0, 2))

  inner_waypoints = fuse_neighbors(
      inner_waypoints,
      threshold=min(gen_inner_pts_dist * 0.95, gen_wall_pts_dist * 0.95),
  )

  corner_waypoints = filter_dist_waypoints(
      points=corner_waypoints,
      known_waypoints=known_waypoints,
      min_distance_to_known=corner_min_distance_to_known,
      max_distance_to_known=max_distance_to_known,
  )
  wall_waypoints = filter_dist_waypoints(
      points=wall_waypoints,
      known_waypoints=corner_waypoints,
      min_distance_to_known=gen_wall_pts_dist,
      max_distance_to_known=math.inf,
  )

  wall_waypoints = filter_dist_waypoints(
      points=wall_waypoints,
      known_waypoints=known_waypoints,
      min_distance_to_known=min_distance_to_known,
      max_distance_to_known=max_distance_to_known,
  )

  corner_waypoints = maybe_to_array(corner_waypoints)
  wall_waypoints = np.concatenate((corner_waypoints, wall_waypoints))

  inner_waypoints = filter_dist_waypoints(
      points=inner_waypoints,
      known_waypoints=wall_waypoints,
      min_distance_to_known=gen_inner_pts_dist * 0.50,
      max_distance_to_known=math.inf,
  )
  inner_waypoints = filter_dist_waypoints(
      points=inner_waypoints,
      known_waypoints=known_waypoints,
      min_distance_to_known=max(min_distance_to_known,
                                median_near_wall_to_rest * 0.9),
      max_distance_to_known=max_distance_to_known,
  )

  wall_waypoints = maybe_to_array(wall_waypoints)
  inner_waypoints = maybe_to_array(inner_waypoints)

  if wall_waypoints.ndim != 2:
    raise ValueError(
        f"Unexpected shape at output. Wall waypoints shape: {wall_waypoints.shape}"
    )

  if inner_waypoints.ndim != 2:
    raise ValueError(
        f"Unexpected shape at output. Inner waypoints shape: {inner_waypoints.shape}"
    )

  # assert len(wall_waypoints) > (len(known_waypoints) * 0.1), f"{site} {floor}"

  return wall_waypoints, inner_waypoints
コード例 #40
0
ファイル: synthetic.py プロジェクト: yiyange/peartree
def generate_meter_projected_chunks(
        route_shape: LineString,
        custom_stops: List[List[float]] = None,
        stop_distance_distribution: int = None) -> List[LineString]:

    # Reproject 4326 lat/lon coordinates to equal area
    project = partial(
        pyproj.transform,
        pyproj.Proj(init='epsg:4326'),  # source coordinate system
        pyproj.Proj(init='epsg:2163'))  # destination coordinate system

    rs2 = transform(project, route_shape)  # apply projection

    # Two ways to break apart this route into chunks:
    #   1. Using custom stops as break points (this one takes precedence)
    #   2. Using a custom distance to segment out the route

    # In either case, we need to generate mp_array such that we have
    # target stops or "break points" for the route line shape

    # Path 1 if available
    if custom_stops is not None:
        mp_array = []
        for custom_stop in custom_stops:
            # Now reproject with cast point geometry
            custom_stop_proj = transform(project, Point(custom_stop))
            interp_stop = rs2.interpolate(rs2.project(custom_stop_proj))
            mp_array.append(interp_stop)

    # Otherwise we go with path 2
    else:
        stop_count = round(rs2.length / stop_distance_distribution)

        # Create the array of break points/joints
        mp_array = []
        for i in range(1, stop_count):
            fr = (i / stop_count)
            mp_array.append(rs2.interpolate(fr, normalized=True))

    # Cast array as a Shapely object
    splitter = MultiPoint(mp_array)

    # 1 meter buffer to address floating point discrepencies
    chunks = split(rs2, splitter.buffer(1))

    # TODO: Potential for length errors with this 1 meter
    #       threshold check

    # Take chunks and merge in the small lines
    # from intersection inside of the buffered circles
    # and attach to nearest larger line
    clean_chunks = [chunks[0]]
    r = len(chunks)
    for c in range(1, r):
        latest = clean_chunks[-1]
        current = chunks[c]
        # Again, this is a week point of the buffer of
        # 1 meter method
        if latest.length <= 2:
            # Merge in the small chunks with the larger chunks
            clean_chunks[-1] = linemerge([latest, current])
        else:
            clean_chunks.append(current)

    return clean_chunks
コード例 #41
0
                color='none',
                zorder=2)

# set up a map dataframe
df_map = pd.DataFrame({
    'poly': [Polygon(xy) for xy in m.boston],
    'hoodname': [names['Neighborho'] for names in m.boston_info]
})
df_map['SqMiles'] = df_map['poly'].map(lambda x: x.area)

# Convert our latitude and longitude into Basemap cartesian map coordinates
mapped_points = [
    Point(m(mapped_x, mapped_y))
    for mapped_x, mapped_y in zip(df['longitude'], df['latitude'])
]
all_points = MultiPoint(mapped_points)
# Use prep to optimize polygons for faster computation


def num_of_contained_points(apolygon, all_points):
    return int(len(filter(prep(apolygon).contains, all_points)))


df_map['hood_count'] = df_map['poly'].apply(num_of_contained_points,
                                            args=(all_points, ))
#print df_map['hood_count']

# # # #
# We'll only use a handful of distinct colors for our choropleth. So pick where
# you want your cutoffs to occur. Leave zero and ~infinity alone.
breaks = [0.] + [qt10, qt20, qt30, qt40, qt50, qt60, qt70, qt80, qt90] + [1e20]
コード例 #42
0
ファイル: classify.py プロジェクト: GarfieldEr007/CRCPython
def main():      
    gdal.AllRegister()
    path = auxil.select_directory('Input directory')
    if path:
        os.chdir(path)        
#  input image    
    infile = auxil.select_infile(title='Image file') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)      
    else:
        return  
    pos =  auxil.select_pos(bands)   
    if not pos:
        return
    N = len(pos) 
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b)) 
#  training algorithm
    trainalg = auxil.select_integer(1,msg='1:Maxlike,2:Backprop,3:Congrad,4:SVM') 
    if not trainalg:
        return           
#  training data (shapefile)      
    trnfile = auxil.select_infile(filt='.shp',title='Train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile,0)
        trnLayer = trnDatasource.GetLayer() 
        trnsr = trnLayer.GetSpatialRef()             
    else:
        return     
    tstfile = auxil.select_outfile(filt='.tst', title='Test results file') 
    if not tstfile:
        print 'No test output'      
#  outfile
    outfile, outfmt = auxil.select_outfilefmt(title='Classification file')   
    if not outfile:
        return                   
    if trainalg in (2,3,4):
#      class probabilities file, hidden neurons
        probfile, probfmt = auxil.select_outfilefmt(title='Probabilities file')
    else:
        probfile = None     
    if trainalg in (2,3):    
        L = auxil.select_integer(8,'Number of hidden neurons')    
        if not L:
            return                  
#  coordinate transformation from training to image projection   
    ct= osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:    '+infile
    print 'training: '+trnfile  
    if trainalg == 1:
        print 'Maximum Likelihood'
    elif trainalg == 2:
        print 'Neural Net (Backprop)'
    elif trainalg ==3:
        print 'Neural Net (Congrad)'
    else:
        print 'Support Vector Machine'               
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)        
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] for ffn
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  setup output datasets 
    driver = gdal.GetDriverByName(outfmt)    
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:
        driver = gdal.GetDriverByName(probfmt)    
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
    if tstfile:
#  train on 2/3 training examples         
        Gstrn = Gs[0:2*m//3,:]
        lstrn = ls[0:2*m//3,:] 
        Gstst = Gs[2*m//3:,:]  
        lstst = ls[2*m//3:,:]    
    else:
        Gstrn = Gs
        lstrn = ls         
    if   trainalg == 1:
        classifier = sc.Maxlike(Gstrn,lstrn)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gstrn,lstrn,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gstrn,lstrn,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gstrn,lstrn)         
            
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if trainalg in [2,3]:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N))    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  lstrn.shape[1]+1                     
        if (outfmt == 'ENVI') and (K<19):
#          try to make an ENVI classification header file            
            hdr = header.Header() 
            headerfile = outfile+'.hdr'
            f = open(headerfile)
            line = f.readline()
            envihdr = ''
            while line:
                envihdr += line
                line = f.readline()
            f.close()         
            hdr.read(envihdr)
            hdr['file type'] ='ENVI Classification'
            hdr['classes'] = str(K)
            classlookup = '{0'
            for i in range(1,3*K):
                classlookup += ', '+str(str(ctable[i]))
            classlookup +='}'    
            hdr['class lookup'] = classlookup
            hdr['class names'] = classnames
            f = open(headerfile,'w')
            f.write(str(hdr))
            f.close()             
        print 'thematic map written to: %s'%outfile
        if trainalg in [2,3]:
            print 'please close the cross entropy plot to continue'
            plt.show()
        if tstfile:
            with open(tstfile,'w') as f:
                print >>f, 'FFN test results for %s'%infile
                print >>f, time.asctime()
                print >>f, 'Classification image: %s'%outfile
                print >>f, 'Class probabilities image: %s'%probfile
                print >>f, lstst.shape[0],lstst.shape[1]
                classes, _ = classifier.classify(Gstst)
                labels = np.argmax(lstst,axis=1)+1
                for i in range(len(classes)):
                    print >>f, classes[i], labels[i]              
                f.close()
                print 'test results written to: %s'%tstfile
        print 'done'
    else:
        print 'an error occured' 
        return 
コード例 #43
0
def centroid_coords2(list_coords):
    global lc
    lc = list_coords
    from shapely.geometry import MultiPoint
    points = MultiPoint([(m[0], m[1]) for m in list_coords])
    return (points.centroid.x, points.centroid.y)
コード例 #44
0
"""
 based on shapely docs:
 https://shapely.readthedocs.io/en/stable/manual.html
"""

from shapely.geometry import MultiPoint, Polygon, LineString
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch

fig = plt.figure(1, dpi=90)
fig.set_frameon(True)

# 1
ax = fig.add_subplot(121)

mp = MultiPoint([(0, 0), (0.5, 1.5), (1, 0.5), (0.5, 0.5)])
rect = mp.minimum_rotated_rectangle

for p in mp:
    ax.plot(p.x, p.y, 'o', color='#999999')
patch = PolygonPatch(rect,
                     facecolor='#6699cc',
                     edgecolor='#6699cc',
                     alpha=0.5,
                     zorder=2)
ax.add_patch(patch)
ax.set_title('a) MultiPoint')

xr = [-1, 2]
yr = [-1, 2]
ax.set_xlim(*xr)
コード例 #45
0
def get_centroid(cluster):
    centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
    return list(centroid)
コード例 #46
0
ファイル: classify_cv.py プロジェクト: citterio/CRCDocker
def main():    
    usage = '''
Usage: 
---------------------------------------------------------
python %s  [-p bandPositions] [- a algorithm] [-L number of hidden neurons]   
[-P generate class probabilities image] filename trainShapefile

bandPositions is a list, e.g., -p [1,2,4]  

algorithm  1=MaxLike
           2=NNet(backprop)
           3=NNet(congrad)
           4=SVM

If the input file is named 

         path/filenbasename.ext then

The output classification file is named 

         path/filebasename_class.ext

the class probabilities output file is named

         path/filebasename_classprobs.ext
         
and the test results file is named

         path/filebasename_<classifier>.tst
--------------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hnPp:a:L:')
    pos = None
    probs = False   
    L = 8
    graphics = True
    trainalg = 1
    for option, value in options:
        if option == '-h':
            print usage
            return
        elif option == '-p':
            pos = eval(value)
        elif option == '-n':
            graphics = False            
        elif option == '-a':
            trainalg = eval(value)
        elif option == '-L':
            L = eval(value)    
        elif option == '-P':
            probs = True                              
    if len(args) != 2: 
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)      
    if trainalg == 1:
        algorithm = 'MaxLike'
    elif trainalg == 2:
        algorithm = 'NNet(Backprop)'
    elif trainalg == 3:
        algorithm =  'NNet(Congrad)'
    elif trainalg == 4:
        algorithm = 'SVM'              
    infile = args[0]  
    trnfile = args[1]      
    gdal.AllRegister() 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)    
    else:
        return  
    if pos is None: 
        pos = range(1,bands+1)
    N = len(pos)    
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))     
#  output files
    path = os.path.dirname(infile)
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    outfile = '%s/%s_class%s'%(path,root,ext)  
    tstfile = '%s/%s_%s.tst'%(path,root,algorithm)            
    if (trainalg in (2,3,4)) and probs:
#      class probabilities file
        probfile = '%s/%s_classprobs%s'%(path,root,ext) 
    else:
        probfile = None        
#  training data        
    trnDriver = ogr.GetDriverByName('ESRI Shapefile')
    trnDatasource = trnDriver.Open(trnfile,0)
    trnLayer = trnDatasource.GetLayer() 
    trnsr = trnLayer.GetSpatialRef()             
#  coordinate transformation from training to image projection   
    ct = osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
#  es kann losgehen    
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:     '+infile
    print 'training:  '+trnfile  
    print 'algorithm: '+algorithm             
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)     
#      label for this ROI           
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] (for ffn)
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0   
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]             
#  setup output datasets 
    driver = inDataset.GetDriver() 
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:   
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
#  initialize classifier  
    if   trainalg == 1:
        classifier = sc.Maxlike(Gs,ls)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gs,ls,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gs,ls,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gs,ls)         
#  train it            
    print 'training on %i pixel vectors...' % np.shape(Gs)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if (trainalg in [2,3]) and graphics:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')   
            plt.show()
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N),dtype=np.float32)    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  ls.shape[1]+1                     
        print 'thematic map written to: %s'%outfile
    else:
        print 'an error occured' 
        return 
#  cross-validation
    start = time.time()
    rc = Client()   
    print 'submitting cross-validation to %i IPython engines'%len(rc)  
    m = np.shape(Gs)[0]
    traintest = []
    for i in range(10):
        sl = slice(i*m//10,(i+1)*m//10)
        traintest.append( (np.delete(Gs,sl,0),np.delete(ls,sl,0), \
                                     Gs[sl,:],ls[sl,:],L,trainalg) )
    v = rc[:]   
    v.execute('import auxil.supervisedclass as sc') 
    result = v.map(crossvalidate,traintest).get()   
    print 'parallel execution time: %s' %str(time.time()-start)      
    print 'misclassification rate: %f' %np.mean(result)
    print 'standard deviation:     %f' %np.std(result)         
コード例 #47
0
_expected_exceptions = {}

# ------------------
# gdf with Points
gdf = GeoDataFrame({'a': [1, 2]},
                   crs={'init': 'epsg:4326'},
                   geometry=[city_hall_entrance, city_hall_balcony])
_geodataframes_to_write.append(gdf)

# ------------------
# gdf with MultiPoints
gdf = GeoDataFrame(
    {'a': [1, 2]},
    crs={'init': 'epsg:4326'},
    geometry=[
        MultiPoint([city_hall_balcony, city_hall_council_chamber]),
        MultiPoint(
            [city_hall_entrance, city_hall_balcony, city_hall_council_chamber])
    ])
_geodataframes_to_write.append(gdf)

# ------------------
# gdf with Points and MultiPoints
gdf = GeoDataFrame({'a': [1, 2]},
                   crs={'init': 'epsg:4326'},
                   geometry=[
                       MultiPoint([city_hall_entrance, city_hall_balcony]),
                       city_hall_balcony
                   ])
_geodataframes_to_write.append(gdf)
# 'ESRI Shapefile' driver supports writing LineString/MultiLinestring and