Esempio n. 1
0
    def setup_method(self):

        test_file_path = mm.datasets.get_path("bubenec")
        self.df_buildings = gpd.read_file(test_file_path, layer="buildings")
        self.df_tessellation = gpd.read_file(test_file_path,
                                             layer="tessellation")
        self.df_tessellation["area"] = mm.Area(self.df_tessellation).series
Esempio n. 2
0
    def setup(self, *args):

        test_file_path = mm.datasets.get_path("bubenec")
        self.df_buildings = gpd.read_file(test_file_path, layer="buildings")
        self.df_streets = gpd.read_file(test_file_path, layer="streets")
        self.df_tessellation = gpd.read_file(test_file_path, layer="tessellation")
        self.df_buildings["height"] = np.linspace(10.0, 30.0, 144)
        self.df_tessellation["area"] = mm.Area(self.df_tessellation).series
        self.sw = mm.sw_high(k=3, gdf=self.df_tessellation, ids="uID")
Esempio n. 3
0
def compute_similar(data_path, input, information_data):

    sf_data = shapefile.Reader(data_path, encoding='gbk')
    data = geopandas.read_file(data_path)
    data['area'] = momepy.Area(data).series

    data_ori = compute_ori(sf_data.shape(0))

    s_i = [
        'school', 'mall', 'restaurant', 'hospital', 'subway', 'bank', 'park',
        'water'
    ]

    data_text = json.loads(input)
    text = json.loads(information_data)
    result = {}

    for ID in list(text.keys()):  #['234', '345']
        temp = json.loads(text[ID])
        area = fuc_1(data['area'][0], temp['地块面积'])
        ori = fuc_1(data_ori, temp['地块朝向'])
        FAR = fuc_1(data_text['FAR'], temp['容积率'])
        price = fuc_1(data_text['price'], temp['价格'])
        greeningrate = fuc_1(data_text['greeningrate'], temp['绿化率'])

        tmp = 0
        for s in s_i:
            if data_text[s] != temp[s]:
                tmp += 1
        surrounding = (1 - tmp / 8) * 100

        tmp_result = {
            '面积': detected(area),
            '朝向': detected(ori),
            '容积率': detected(FAR),
            '价格': detected(price),
            '绿化率': detected(greeningrate),
            '周边环境': detected(surrounding)
        }
        result[ID] = json.dumps(tmp_result,
                                sort_keys=True,
                                indent=4,
                                separators=(',', ': '),
                                ensure_ascii=False)

    final_data = json.dumps(result,
                            sort_keys=True,
                            indent=4,
                            separators=(',', ': '),
                            ensure_ascii=False)

    sf_data.close()
    del data
    return final_data
Esempio n. 4
0
    def setup_method(self):

        test_file_path = mm.datasets.get_path("bubenec")
        self.df_buildings = gpd.read_file(test_file_path, layer="buildings")
        self.df_streets = gpd.read_file(test_file_path, layer="streets")
        self.df_tessellation = gpd.read_file(test_file_path,
                                             layer="tessellation")
        self.df_buildings["height"] = np.linspace(10.0, 30.0, 144)
        self.df_tessellation["area"] = mm.Area(self.df_tessellation).series
        self.sw = sw_high(k=3, gdf=self.df_tessellation, ids="uID")
        self.sw.neighbors[100] = []
        self.sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids="uID")
Esempio n. 5
0
 def test_Area(self):
     self.df_buildings["area"] = mm.Area(self.df_buildings).series
     check = self.df_buildings.geometry[0].area
     assert self.df_buildings["area"][0] == check
Esempio n. 6
0
def retrieval_similar_cases(data_path, cases_path, threshold):

    threshold = json.loads(threshold)['threshold']
    data = geopandas.read_file(data_path)  # print(data.shape) #(1, 46)
    cases = geopandas.read_file(cases_path)  # print(len(cases)) #1055
    sf_cases = shapefile.Reader(cases_path, encoding='gbk')
    sf_data = shapefile.Reader(data_path, encoding='gbk')
    change_list = ['bank', 'hospital', 'mall', 'school', 'subway']
    for l in change_list:
        cases.loc[cases[l].notnull(), l] = 1
        cases.loc[cases[l].isnull(), l] = 0
    cases = cases.fillna(value='nan')

    data['area'] = momepy.Area(data).series
    data['length'] = momepy.Perimeter(data).series
    data['ccd_means'] = momepy.CentroidCorners(data).mean
    data['ccd_std_stdev'] = momepy.CentroidCorners(data).std
    data['circ_comp'] = momepy.CircularCompactness(data).series  # 周长紧凑度
    data['cwa'] = momepy.CompactnessWeightedAxis(data).series  # 紧凑度加权轴
    data['convexity'] = momepy.Convexity(data).series  # 凸度
    data['corners'] = momepy.Corners(data).series  # 角数
    data['elongation'] = momepy.Elongation(data).series  # 伸长率
    data['eri'] = momepy.EquivalentRectangularIndex(data).series  # 等校矩形指数
    data['fractal'] = momepy.FractalDimension(data).series  # 分形维数
    data['rectangularity'] = momepy.Rectangularity(data).series  # 矩形度
    data['squ_comp'] = momepy.SquareCompactness(data).series  # 紧凑度指数
    data['long_ax'] = momepy.LongestAxisLength(data).series  # 最长轴的长度值
    data['shape_index'] = momepy.ShapeIndex(
        data, longest_axis='long_ax').series  # 形状索引

    cases['area'] = momepy.Area(cases).series
    cases['length'] = momepy.Perimeter(cases).series
    cases['ccd_means'] = momepy.CentroidCorners(cases).mean
    cases['ccd_std_stdev'] = momepy.CentroidCorners(cases).std
    cases['circ_comp'] = momepy.CircularCompactness(cases).series  # 周长紧凑度
    cases['cwa'] = momepy.CompactnessWeightedAxis(cases).series  # 紧凑度加权轴
    cases['convexity'] = momepy.Convexity(cases).series  # 凸度
    cases['corners'] = momepy.Corners(cases).series  # 角数
    cases['elongation'] = momepy.Elongation(cases).series  # 伸长率
    cases['eri'] = momepy.EquivalentRectangularIndex(cases).series  # 等校矩形指数
    cases['fractal'] = momepy.FractalDimension(cases).series  # 分形维数
    cases['rectangularity'] = momepy.Rectangularity(cases).series  # 矩形度
    cases['squ_comp'] = momepy.SquareCompactness(cases).series  # 紧凑度指数
    cases['long_ax'] = momepy.LongestAxisLength(cases).series  # 最长轴的长度值
    cases['shape_index'] = momepy.ShapeIndex(
        cases, longest_axis='long_ax').series  # 形状索引

    test_x = cases.iloc[:, 18:].sub(data.iloc[0, 2:],
                                    axis=1).abs().astype('float')

    ori = []
    dft = []
    him = []
    for i in range(len(cases)):
        # Ori
        ori.append(
            abs(
                compute_ori(sf_cases.shape(i)) -
                compute_ori(sf_data.shape(0))))
        # DFT
        fd_c, final_x_c, final_y_c = compute_DFT(sf_cases.shape(i))
        fd_d, final_x_d, final_y_d = compute_DFT(sf_data.shape(0))
        tmp = 0
        for k in range(20):
            tmp += math.pow((fd_c[k] - fd_d[k]), 2)
        dft.append(math.sqrt(tmp))
        # him
        inter = np.concatenate(
            (final_x_c[:, np.newaxis], final_y_c[:, np.newaxis]),
            1)  # nom_x[:, np.newaxis]新增一个维度
        inter = inter.reshape(
            len(final_x_c), 1,
            2)  # !!!OpenCV找轮廓后,返回的ndarray的维数是(100, 1, 2)!!!而不是我们认为的(100, 2)。
        inter_d = np.concatenate(
            (final_x_d[:, np.newaxis], final_y_d[:, np.newaxis]),
            1)  # nom_x[:, np.newaxis]新增一个维度
        inter_d = inter_d.reshape(
            len(final_x_d), 1,
            2)  # !!!OpenCV找轮廓后,返回的ndarray的维数是(100, 1, 2)!!!而不是我们认为的(100, 2)。
        him.append(cv2.matchShapes(inter, inter_d, 1, 0))

    test_x['area'] = test_x['area'] * 0.000001
    test_x['length'] = test_x['length'] * 0.001
    test_x['ori'] = ori
    test_x['dft'] = dft
    test_x['shape'] = him

    loaded_model = pickle.load(open('xgb.pickle.dat', 'rb'))

    xgb_pred = loaded_model.predict_proba(test_x)

    shutil.rmtree('result')
    os.mkdir('result')
    result = {}

    for i in range(len(xgb_pred)):
        if test_x['area'][i] < data['area'][0] * 0.25 and test_x['ori'][
                i] < 30 and xgb_pred[i][1] > threshold:  # 0.99,26
            information = {
                '编号ID': float(cases['ID'][i]),
                '地块名字': cases['NAME'][i],
                '地块所在地': cases['city'][i],
                '地块面积': cases['area'][i],
                '地块朝向': compute_ori(sf_cases.shape(i)),
                '容积率': cases['plot_area'][i],
                '价格': cases['price'][i],
                '绿化率': cases['greening_r'][i],
                '建成日期': cases['build_date'][i],
                'school': float(cases['school'][i]),
                'mall': float(cases['mall'][i]),
                'restaurant': float(cases['restaurant'][i]),
                'hospital': float(cases['hospital'][i]),
                'subway': float(cases['subway'][i]),
                'bank': float(cases['bank'][i]),
                'park': float(cases['includ_g'][i]),
                'water': float(cases['includ_w'][i])
            }
            result[i] = json.dumps(information,
                                   sort_keys=True,
                                   indent=4,
                                   separators=(',', ': '),
                                   ensure_ascii=False)

            a = 1024
            b = 512
            img = np.zeros((a, a, 3))
            img.fill(255)
            landuse = sf_cases
            l_shape = landuse.shape(i)
            l_convex = Polygon(l_shape.points).convex_hull
            x_c = l_convex.centroid.xy[0][
                0]  # l_convex.centroid.xy:(array('d', [12945692.760656377]), array('d', [4861576.219346005]))
            y_c = l_convex.centroid.xy[1][0]
            l_dot = np.array(l_shape.points)
            l_nom_x = np.array(list(map(int, l_dot[:, 0]))) - int(x_c)
            l_nom_y = np.array(list(map(int, l_dot[:, 1]))) - int(y_c)
            l_inter = np.concatenate((l_nom_x[:, np.newaxis] + b,
                                      minus(l_nom_y)[:, np.newaxis] + b),
                                     1)  # nom_x[:, np.newaxis]新增一个维度
            cv2.polylines(
                img, [np.asarray(l_inter)], True, (0, 0, 255),
                1)  # cv2.polylines(画布,点坐标列表,封闭,颜色,宽度)polylines点坐标不能出现浮点型,需要是整型
            cv2.imwrite('./result/' + str(cases['ID'][i]) + '.jpg', img)

    final_data = json.dumps(result,
                            sort_keys=True,
                            indent=4,
                            separators=(',', ': '),
                            ensure_ascii=False)
    print(final_data)
    sf_cases.close()
    sf_data.close()
    del cases
    del data
    return final_data
Esempio n. 7
0
for buf in buffers:
    tessellation = gpd.read_file(
        'data/tessellation/{0}_tessellation.shp'.format(buf))
    tessellation['area'] = tessellation.area
    tessellation['lal'] = mm.LongestAxisLength(tessellation).series
    tessellation['circom'] = mm.CircularCompactness(tessellation).series
    tessellation['shapeix'] = mm.ShapeIndex(tessellation, 'lal', 'area').series
    tessellation['rectan'] = mm.Rectangularity(tessellation, 'area').series
    tessellation['fractal'] = mm.FractalDimension(tessellation, 'area').series
    tessellation['orient'] = mm.Orientation(tessellation).series
    distancesw = libpysal.weights.DistanceBand.from_dataframe(tessellation,
                                                              400,
                                                              ids='uID')
    tessellation['freq'] = mm.Neighbors(tessellation, distancesw, 'uID').series
    tessellation['car'] = mm.AreaRatio(tessellation, buildings, 'area',
                                       mm.Area(buildings).series)
    tessellation['gini_area'] = gini_fn(tessellation, 'area', distancesw,
                                        'uID')
    tessellation['gini_car'] = gini_fn(tessellation, 'car', distancesw, 'uID')
    tessellation.to_file('data/tessellation/{0}_tessellation.shp'.format(buf))

# In[ ]:

cadastre = gpd.read_file('data/cadastre/Zurich_cadastre.shp')

cadastre['area'] = tessellation.area
cadastre['lal'] = mm.LongestAxisLength(cadastre).series
cadastre['circom'] = mm.CircularCompactness(cadastre).series
cadastre['shapeix'] = mm.ShapeIndex(cadastre, 'lal', 'area').series
cadastre['rectan'] = mm.Rectangularity(cadastre, 'area').series
cadastre['fractal'] = mm.FractalDimension(cadastre, 'area').series
                               "nID",
                               spatial_weights=str_q1,
                               mode="count").series
streets["mdsAre"] = mm.Reached(streets,
                               tess,
                               "nID",
                               "nID",
                               spatial_weights=str_q1,
                               mode="sum").series

blg_q1 = libpysal.weights.contiguity.Queen.from_dataframe(blg)

blg["libNCo"] = mm.Courtyards(blg, "bID", blg_q1).series
blg["ldbPWL"] = mm.PerimeterWall(blg, blg_q1).series

blocks["ldkAre"] = mm.Area(blocks).series
blocks["ldkPer"] = mm.Perimeter(blocks).series
blocks["lskCCo"] = mm.CircularCompactness(blocks, "ldkAre").series
blocks["lskERI"] = mm.EquivalentRectangularIndex(blocks, "ldkAre",
                                                 "ldkPer").series
blocks["lskCWA"] = mm.CompactnessWeightedAxis(blocks, "ldkAre",
                                              "ldkPer").series
blocks["ltkOri"] = mm.Orientation(blocks).series

blo_q1 = libpysal.weights.contiguity.Queen.from_dataframe(blocks, ids="bID")

blocks["ltkWNB"] = mm.Neighbors(blocks, blo_q1, "bID", weighted=True).series
blocks["likWBB"] = mm.Count(blocks, blg, "bID", "bID", weighted=True).series

tess.to_file("files/elements.gpkg", layer="tessellation", driver="GPKG")
blg.to_file("files/elements.gpkg", layer="buildings", driver="GPKG")