Exemplo n.º 1
0
def quadratCount(targetPts_epoch, locationsPts_epoch, nx, ny):
    # corners=[Point(p.x+30,p.y+30),Point(p.x+30,p.y-30),Point(p.x-30,p.y-30),Point(p.x-30,p.y+30)]
    corners_coordi = [(locationsPts_epoch.x + 30, locationsPts_epoch.y + 30),
                      (locationsPts_epoch.x + 30, locationsPts_epoch.y - 30),
                      (locationsPts_epoch.x - 30, locationsPts_epoch.y - 30),
                      (locationsPts_epoch.x - 30, locationsPts_epoch.y + 30)]
    target_pts = [coordinate.coords[:][0]
                  for coordinate in targetPts_epoch] + corners_coordi
    pp = PointPattern(target_pts)
    # print("^"*50)

    #应用PySAL的Quadrat_statistics样方统计,亦可以替换用R的spatstat库实现,获取更多功能。参考:https://pointpats.readthedocs.io/en/latest/  https://pysal.org/notebooks/explore/pointpats/Quadrat_statistics.html
    #样方分析(Quadrat Analysis ,QA )法是样方内点数均值变差的分析方法,是由Greig-Smith 于1964年提出的。其具体做法是用一组样方覆盖在研究区域上并作叠置分析,统计落在每一个样方上的样本数,通过统计不同的具有m 个点数的样方的个数及其频率,并与完全随机过程(Poisson 分布)对比来判断点模式的空间分布特征。其结果一般用方差均值比(V ariance-Mean Ratio ,VMR )判断。
    #合理地确定样方的大小较为重要,一般地样方大小的确定采用符合“拇指规则(rule of thumb )”,即样方大小应当是平均每个点所占面积的两倍. ref:《黄土丘陵沟壑区农村居民点分布模式空间统计分析——以甘谷县为例》
    q_r = qs.QStatistic(pp, shape="rectangle", nx=nx, ny=ny)
    # q_r.plot()
    mr = q_r.mr
    quadratCount = mr.point_location_sta()
    # print(quadratCount)
    chi2 = q_r.chi2  #观察点模式的卡方检验统计量 chi-squared test statistic for the observed point pattern
    chi2_pvalue = q_r.chi2_pvalue
    df = q_r.df

    comparisonValue = 1
    quadratNum = sum(
        np.array(list(quadratCount.values())) >= comparisonValue
    )  #the amount of the occupied quadrat based on a value used for comparison

    # print(sum(np.array(list(quadratCount.values()))>=1))
    numDivQuad = len(targetPts_epoch) / sum(
        np.array(list(quadratCount.values())) >=
        1)  #amount_landmarks/amount_the occupied quadrat
    return chi2, quadratNum, numDivQuad
from pointpats import PointPattern
import pointpats
points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
              [9.47, 31.02], [30.78, 60.10], [75.21, 58.93],
              [79.26,  7.68], [8.23, 39.93], [98.73, 77.17],
              [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]]

pp = PointPattern(points)

print(pp.n)

print(pointpats.mean_center(points))
Exemplo n.º 3
0
def analise7():
    #Determinar o crescimento e/ou diminuição do potencial de consumo ao longo dos anos
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" fishnet_clip.shp MYSQL:"openbanking,host=localhost,user=root,password="******"fishnet_clip"'
    subprocess.check_call(command, shell=True)
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" bairros.shp MYSQL:"openbanking,host=localhost,user=root,password="******"bairros"'
    subprocess.check_call(command, shell=True)
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" demanda.shp MYSQL:"openbanking,host=localhost,user=root,password="******"demanda"'
    subprocess.check_call(command, shell=True)
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" restaurante.shp MYSQL:"openbanking,host=localhost,user=root,password="******"restaurante"'
    subprocess.check_call(command, shell=True)
    limite = gpd.read_file("bairros.shp")
    restaurante = gpd.read_file("restaurante.shp")
    demanda = gpd.read_file("demanda.shp")

    abrangencia1 = restaurante.buffer(3000)
    polygon = abrangencia1.geometry[0]
    demanda_r2 = demanda[demanda.disjoint(polygon)]

    demanda_r2.reset_index(inplace=True, drop=True)

    demanda_r2_geo = gpd.GeoDataFrame(demanda_r2)

    demanda_r2_geo['longitude'] = demanda_r2.centroid.map(lambda p: p.x)
    demanda_r2_geo['latitude'] = demanda_r2.centroid.map(lambda p: p.y)

    demanda_r2_geo = demanda_r2_geo[['longitude', 'latitude']]

    kmeans = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10)
    pred_y = kmeans.fit_predict(demanda_r2_geo)
    demanda_r2['pred'] = pred_y

    demanda_r2_up = demanda_r2.query('pred == [0, 1, 3]')
    demanda_r2_up.reset_index(inplace=True, drop=True)

    #Ponto médio geográfico da demanda excedente ponderado pelo preco total
    colunas_selecionadas = ['longitude', 'latitude']
    points = pd.DataFrame(demanda_r2_up.filter(items=colunas_selecionadas))
    points[['longitude', 'latitude']].to_numpy()

    #centrografia
    pp = PointPattern(points)

    #Media
    media = mean_center(pp.points)

    #media ponderada
    pesos_a = ['preco_tot']
    pesos = pd.DataFrame(demanda_r2_up.filter(items=pesos_a))

    mediapond = weighted_mean_center(pp.points, pesos)

    #Standard Distance
    stdd = std_distance(pp.points)

    potencial_consumo = gpd.read_file("fishnet_clip.shp")

    potencial_consumo['cresc_1'] = potencial_consumo[
        'pcon_2005'] - potencial_consumo['pcon_2000']
    potencial_consumo['cresc_2'] = potencial_consumo[
        'pcon_2010'] - potencial_consumo['pcon_2005']
    potencial_consumo['cresc_3'] = potencial_consumo[
        'pcon_2015'] - potencial_consumo['pcon_2010']
    potencial_consumo['cresc_4'] = potencial_consumo[
        'pcon_2020'] - potencial_consumo['pcon_2015']

    potencial_consumo_crescimento = potencial_consumo[
        (potencial_consumo.cresc_1 >= 0) & (potencial_consumo.cresc_2 >= 0) &
        (potencial_consumo.cresc_3 >= 0) & (potencial_consumo.cresc_4 >= 0)]
    base1 = potencial_consumo.plot(color='None')
    base1.set_aspect(1)
    base = limite.plot(ax=base1, color='white', edgecolor='black')
    base.set_aspect(1)
    pc = potencial_consumo_crescimento.plot(ax=base1, color='lime')
    pc.set_aspect(1)
    restaurante2 = plt.plot(mediapond[0],
                            mediapond[1],
                            '*',
                            color='green',
                            label='Novo empreendimento',
                            markersize=15,
                            zorder=10)

    base1.set_axis_off()

    filepath = os.path.join('mapa1.jpg')
    chart = base.get_figure()
    chart.savefig(filepath, dpi=300)
Exemplo n.º 4
0
def analise5():

    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" bairros.shp MYSQL:"openbanking,host=localhost,user=root,password="******"bairros"'
    subprocess.check_call(command, shell=True)
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" demanda.shp MYSQL:"openbanking,host=localhost,user=root,password="******"demanda"'
    subprocess.check_call(command, shell=True)
    command = '"C:\\Program Files\\QGIS 3.16\\bin\\ogr2ogr.exe" -f "ESRI Shapefile" restaurante.shp MYSQL:"openbanking,host=localhost,user=root,password="******"restaurante"'
    subprocess.check_call(command, shell=True)
    limite = gpd.read_file("bairros.shp")
    restaurante = gpd.read_file("restaurante.shp")
    demanda = gpd.read_file("demanda.shp")

    abrangencia1 = restaurante.buffer(3000)
    polygon = abrangencia1.geometry[0]
    demanda_r2 = demanda[demanda.disjoint(polygon)]

    demanda_r2.reset_index(inplace=True, drop=True)

    demanda_r2_geo = gpd.GeoDataFrame(demanda_r2)

    demanda_r2_geo['longitude'] = demanda_r2.centroid.map(lambda p: p.x)
    demanda_r2_geo['latitude'] = demanda_r2.centroid.map(lambda p: p.y)

    demanda_r2_geo = demanda_r2_geo[['longitude', 'latitude']]

    kmeans = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10)
    pred_y = kmeans.fit_predict(demanda_r2_geo)
    demanda_r2['pred'] = pred_y

    demanda_r2_up = demanda_r2.query('pred == [0, 1, 3]')
    demanda_r2_up.reset_index(inplace=True, drop=True)

    #Ponto médio geográfico da demanda excedente ponderado pelo preco total
    colunas_selecionadas = ['longitude', 'latitude']
    points = pd.DataFrame(demanda_r2_up.filter(items=colunas_selecionadas))
    points[['longitude', 'latitude']].to_numpy()

    #centrografia
    pp = PointPattern(points)

    #Media
    media = mean_center(pp.points)

    #media ponderada
    pesos_a = ['preco_tot']
    pesos = pd.DataFrame(demanda_r2_up.filter(items=pesos_a))

    mediapond = weighted_mean_center(pp.points, pesos)

    #Standard Distance
    stdd = std_distance(pp.points)

    #Plot

    base = limite.plot(color='white', edgecolor='black')
    base.set_aspect(1)
    restaurante1 = restaurante.plot(ax=base,
                                    marker='*',
                                    color='red',
                                    markersize=75,
                                    zorder=10)
    restaurante1.set_aspect(1)
    fig = demanda_r2.plot(ax=base,
                          marker='o',
                          color='grey',
                          markersize=10,
                          zorder=8)
    fig.set_aspect(1)

    circle1 = plt.Circle((media[0], media[1]),
                         stdd,
                         fill=False,
                         color='c',
                         label='1',
                         zorder=10)
    circle2 = plt.Circle((mediapond[0], mediapond[1]),
                         stdd,
                         fill=False,
                         color='b',
                         label='2',
                         zorder=10)

    plt.plot(media[0], media[1], 'c^', label='Médio', zorder=10)
    plt.plot(mediapond[0],
             mediapond[1],
             'b^',
             label='Médio Ponderado',
             zorder=10)

    plt.gcf().gca().add_artist(circle1)
    plt.gcf().gca().add_artist(circle2)

    plt.suptitle('Centrografia - Círculo', fontsize=16)

    plt.legend(loc='best',
               title='Centro',
               numpoints=1,
               facecolor='white',
               edgecolor='black')

    #Plot do local ideal para o novo empreendimento
    base = limite.plot(color='white', edgecolor='black')
    base.set_aspect(1)
    restaurante1 = restaurante.plot(ax=base,
                                    marker='*',
                                    color='red',
                                    markersize=180,
                                    label='Empreendimento existente',
                                    zorder=10)
    restaurante1.set_aspect(1)

    restaurante2 = plt.plot(mediapond[0],
                            mediapond[1],
                            '*',
                            color='green',
                            label='Novo empreendimento',
                            markersize=15,
                            zorder=10)

    fig = demanda.plot(color='grey',
                       ax=base,
                       marker='o',
                       markersize=2,
                       zorder=5)
    fig.set_aspect(1)

    fig.set_axis_off()

    filepath = os.path.join('mapa1.jpg')
    chart = base.get_figure()
    chart.savefig(filepath, dpi=300)
    # select just the current moraine and project as appropriate
    moraine = moraines[moraines.Landform == f].to_crs("epsg:" + proj)

    # make a copy for writing results to
    result = moraine.copy()

    # calculate weights using minimum nearest neighbour distance threshold with one neighbour
    # W = DistanceBand.from_dataframe(moraine, threshold=min_threshold_distance(
    #     [[x, y] for x, y in zip(moraine.geometry.x, moraine.geometry.y)], binary=False)

    # calculate weights using minimum nearest neighbour distance threshold with knn
    W = DistanceBand.from_dataframe(
        moraine,
        threshold=max(PointPattern([
            [x, y] for x, y in zip(moraine.geometry.x, moraine.geometry.y)
        ]).knn(2)[1],
                      key=itemgetter(1))[1],
        binary=False)

    # print(W.cardinalities)  # you can use this to see how many neighbours each observation has

    # perform row standardisation (so all weights in a row add up to 1)
    W.transform = 'r'

    # loop through the columns
    for s in [
            'General_Class_1sigma', 'General_Class_2sigma',
            'General_Class_3sigma'
    ]:
Exemplo n.º 6
0
def pysal_pp():
    tuple_points = np.array([[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
                             [9.47, 31.02], [30.78, 60.10], [75.21, 58.93],
                             [79.26, 7.68], [8.23, 39.93], [98.73, 77.17],
                             [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]])
    return PointPattern(tuple_points)
def dataMerge(targetPts_idx, locations_pts):
    percentilePhmi = labelsPercentile_upgrade(Phmi)

    LA_numbers = list(
        zip([key for key in targetPts_idx.keys()],
            [len(vals) for vals in targetPts_idx.values()]))
    ScalePhmi = math.pow(10, 1)  #scale PHMI values
    distance_single = {}  #存储所有位置,对应的所有landmarks距离
    distance_mean = []  #存储距离均值
    minDisList = []  #存储最小距离
    maxDisList = []  #存储最大距离
    containsResults = {}  #存储无人车位置点划分视角,存储每一视角存在的landmark布尔值
    containsResults_num = {}  #存储无人车位置点划分视角,存储每一视角存在的landmark数量
    containsResults_num_is = []  #存储视角存在landmark的数量
    containsResults_num_none = []  #存储视角不存在landmark的数量
    evenDistribution = []  #存储视角有无landmark变化,使用1维卷积的方法
    intensityPts = []  #存储landmarks二维空间点格局的intsity值,外接矩形
    intensityPts_convexHull = []  #存储landmarks二维空间点格局的intsity值,凸包
    chi2_5 = []  #
    chi2_10 = []
    chi2_15 = []
    chi2_8 = []
    VMR = []
    F_km = []
    G_km = []
    i = 0

    def Average(lst):
        return sum(lst) / len(lst)

    for key in tqdm(targetPts_idx.keys()):
        distance_temp = [
            locations_pts[key].distance(pt) for pt in targetPts[key]
        ]
        # print(distance_temp)
        minDisList.append(min(distance_temp))
        maxDisList.append(max(distance_temp))
        distance_single[key] = distance_temp
        distance_mean.append(Average(distance_temp))

        p = locations_pts[key]
        # print("_"*50)
        # print(p)
        lidarScanDis = 25
        bufferCircle = p.buffer(lidarScanDis).boundary
        circleLen = bufferCircle.length

        num = 36  #36
        divisionRange = np.arange(0., circleLen, (circleLen - 0) / num)
        # point = bufferCircle.interpolate(0)
        interpolationPts = [bufferCircle.interpolate(i) for i in divisionRange]
        # points = MultiPoint(interpolationPts)
        interpolationPtsPairs = list(
            zip(interpolationPts, interpolationPts[1:] + interpolationPts[:1]))
        segments = [Polygon([p, i[0], i[1]]) for i in interpolationPtsPairs]
        # multiSegs=MultiPolygon(segments)

        # print(targetPts[key])
        containsResults[key] = [([seg.contains(pt) for pt in targetPts[key]])
                                for seg in segments]
        containsResults_num[key] = [
            val.count(True) for val in containsResults[key]
        ]
        containsResults_num_is.append(
            sum(i > 0 for i in containsResults_num[key]))
        containsResults_num_none.append(
            sum(i == 0 for i in containsResults_num[key]))

        # print(containsResults[key])
        kernel_conv_even = [-1, 2, -1]
        result_conv_even = npConv([int(i) for i in containsResults[key][0]],
                                  kernel_conv_even, 'same')
        # print(result_conv_even)
        evenDistribution.append(sum([abs(v) for v in result_conv_even]))

        p1 = PointPattern(
            [coordinate.coords[:][0] for coordinate in targetPts[key]])
        #Intensity based on minimum bounding box:
        intensityPts.append(p1.lambda_mbb)
        #Intensity based on convex hull:
        intensityPts_convexHull.append(p1.lambda_hull)

        #应用PySAL的Quadrat_statistics样方统计,亦可以替换用R的spatstat库实现,获取更多功能。参考:https://pointpats.readthedocs.io/en/latest/  https://pysal.org/notebooks/explore/pointpats/Quadrat_statistics.html
        #样方分析(Quadrat Analysis ,QA )法是样方内点数均值变差的分析方法,是由Greig-Smith 于1964年提出的。其具体做法是用一组样方覆盖在研究区域上并作叠置分析,统计落在每一个样方上的样本数,通过统计不同的具有m 个点数的样方的个数及其频率,并与完全随机过程(Poisson 分布)对比来判断点模式的空间分布特征。其结果一般用方差均值比(V ariance-Mean Ratio ,VMR )判断。
        #合理地确定样方的大小较为重要,一般地样方大小的确定采用符合“拇指规则(rule of thumb )”,即样方大小应当是平均每个点所占面积的两倍. ref:《黄土丘陵沟壑区农村居民点分布模式空间统计分析——以甘谷县为例》
        q_r_10 = qs.QStatistic(p1, shape="rectangle", nx=10, ny=10)
        chi2_10.append(
            q_r_10.chi2
        )  #观察点模式的卡方检验统计量 chi-squared test statistic for the observed point pattern
        #By comparing the observed point counts against the expected counts and calculate a χ2 test statistic,e can decide whether to reject the null based on the position of the χ2 test statistic in the sampling distribution. ref:https://nbviewer.jupyter.org/github/pysal/pointpats/blob/master/notebooks/Quadrat_statistics.ipynb#Quadrat-Statistic
        #Complete Spatial Randomness (CSR)
        q_r_5 = qs.QStatistic(p1, shape="rectangle", nx=5, ny=5)
        chi2_5.append(q_r_5.chi2)

        q_r_15 = qs.QStatistic(p1, shape="rectangle", nx=15, ny=15)
        chi2_15.append(q_r_15.chi2)

        q_r_8 = qs.QStatistic(p1, shape="rectangle", nx=8, ny=8)
        chi2_8.append(q_r_8.chi2)

        # print(targetPts[key][0].coords[:])
        # print(targetPts[key])
        #建立Landmarks的坐标点dataframe用于R下的计算
        pts_df = pd.DataFrame(
            zip([pt.coords[:][0][0] for pt in targetPts[key]],
                [pt.coords[:][0][1] for pt in targetPts[key]]),
            columns=["x", "y"])
        # print("+"*50)
        # print(pts_df)
        # print(min(pts_df.x),max(pts_df.x))
        # print(min(pts_df.y),max(pts_df.y))
        r_vals = r_cal_b(
            pts_df
        )  #使用R的spatstat计算空间点格局模式,本次计算Variance/Mean Ratio (VMR)方差均值比, ref:https://rspatial.org/raster/analysis/8-pointpat.html
        r2p = pandas2ri.ri2py(r_vals)  #将R数据格式(list)转换为python数据格式
        F_km.append(r2p[1][0])
        G_km.append(r2p[2][0])
        VMR.append(r2p[0][0])

        # VMR.append(vmr_single[0])

        # if i==0:break
        # i+=1
    #建立dataFrame,汇集数据
    num_meanDis_DF = pd.DataFrame(zip([num[1]
                                       for num in LA_numbers], distance_mean,
                                      Phmi * ScalePhmi, PHMI_dic[0][0],
                                      PHMI_dic[0][1], percentilePhmi),
                                  columns=[
                                      "number", "mean distance", "PHMI", "X",
                                      "y", "percentilePhmi"
                                  ])
    num_meanDis_DF["minDistance"] = minDisList
    num_meanDis_DF["maxDistance"] = maxDisList
    num_meanDis_DF["direction_is"] = containsResults_num_is
    num_meanDis_DF["direction_none"] = containsResults_num_none
    num_meanDis_DF["evenDistribution"] = evenDistribution
    num_meanDis_DF["intensityPts"] = intensityPts
    num_meanDis_DF["intensityPts_hull"] = intensityPts_convexHull
    num_meanDis_DF["chi2_10"] = chi2_10
    num_meanDis_DF["chi2_5"] = chi2_5
    num_meanDis_DF["chi2_15"] = chi2_15
    num_meanDis_DF["chi2_8"] = chi2_8
    num_meanDis_DF["VMR"] = VMR
    num_meanDis_DF["F_km"] = F_km
    num_meanDis_DF["G_km"] = G_km

    return num_meanDis_DF
Exemplo n.º 8
0
    return offsets.mean(), offsets.std(), offsets


outfile = sys.argv[1]
region_file = sys.argv[2]
r_xcen = sys.argv[3]
r_ycen = sys.argv[4]
bin_size = get_bin_size(region_file)

x_xcens, y_ycens, count_sums = get_centroids(outfile, region_file)
nan_indices = np.logical_and(np.isnan(x_xcens), np.isnan(y_ycens))
x_xcens = x_xcens[~nan_indices]
y_ycens = y_ycens[~nan_indices]
# calculate the standard distance deviation (spatial dispersion)
pp = PointPattern(list(zip(x_xcens, y_ycens)))

print(
    f"Standard distance deviation: {std_distance(pp.points) * bin_size * 0.492:.3f} arcsec"
)

# calculate the standard deviational ellipse
sx, sy, theta = ellipse(pp.points)

print(
    f"Standard deviational ellipse: X->{sx*bin_size*0.492:.3f}, Y->{sy*bin_size*0.492:.3f}, Theta->{np.degrees(theta):.2f}"
)

offset_mean, offset_std, offsets = calculate_offsets(
    x_xcens, y_ycens, r_xcen, r_ycen, region_file, bin_size
)
Exemplo n.º 9
0
def indicatorAssociation(targetPts_idx, locations_pts, Phmi,
                         distance_domain_quadrat):
    indicator_df = pd.DataFrame()

    #01-PHMI
    indicator_df["PHMI"] = Phmi
    '''A-basic related content'''
    #02-the amount of landmarks at each epoch
    LM_numbers = list(
        zip([key for key in targetPts_idx.keys()],
            [len(vals) for vals in targetPts_idx.values()]))
    # print(LM_numbers)
    indicator_df["LM_amount"] = [num[1] for num in LM_numbers]
    #03-(x,y) of AV location
    indicator_df["loc_x"] = [p.x for p in locations_pts]
    indicator_df["loc_y"] = [p.y for p in locations_pts]

    # distance_mean=[]
    # distance_min=[]
    indicator_dic = {
        "distance_mean": [],
        "distance_min": [],
        "distance_max": [],
        "direction_is": [],
        "direction_none": [],
        "direction_edge": [],
        "intensity_mbb": [],
        "intensity_hull": [],
        "nnd_max": [],
        "nnd_min": [],
        "nnd_mean": [],
        "nnd2_mean": [],
        "G": [],
        "chi2_10": [],
        "chi2_10_pval": [],
        # "F":[],
    }
    containResults = {
    }  #store bool values of segments at each epoch to check if there is landmarks in each segment  存储无人车位置点划分视角,存储每一视角存在的landmark布尔值
    containsResults_num = {
    }  #store the amount of landmarks in each segemnt存储无人车位置点划分视角,存储每一视角存在的landmark数量
    trueIdx = {}
    distanceContain = {}
    distanceContainAdj = {}
    i = 0
    genv_list = []
    quadratCount_dic = {}
    for key in tqdm(targetPts_idx.keys()):
        #distance between landmarks and each location
        distance_temp = [
            locations_pts[key].distance(pt) for pt in targetPts[key]
        ]
        #00-mean distances between landamarks and each location of AV
        indicator_dic["distance_mean"].append(Average(distance_temp))
        #00-min distance
        indicator_dic["distance_min"].append(min(distance_temp))
        #00-max distance
        indicator_dic["distance_max"].append(max(distance_temp))
        '''B-direction-related content'''
        p = locations_pts[key]
        lidarScanDis = 25
        bufferCircle = p.buffer(lidarScanDis).boundary
        circleLen = bufferCircle.length

        divisionRange = np.arange(0., circleLen, (circleLen - 0) / num)
        interpolationPts = [bufferCircle.interpolate(i) for i in divisionRange]
        interpolationPtsPairs = list(
            zip(interpolationPts, interpolationPts[1:] + interpolationPts[:1]))
        segments = [Polygon([p, i[0], i[1]]) for i in interpolationPtsPairs]
        multiSegs = MultiPolygon(segments)

        containResults[key] = [([seg.contains(pt) for pt in targetPts[key]])
                               for seg in segments]
        containsResults_num[key] = [
            val.count(True) for val in containResults[key]
        ]  #have problem, due to count(True) only count the number 1
        indicator_dic["direction_is"].append(
            sum(i > 0 for i in containsResults_num[key]))
        indicator_dic["direction_none"].append(
            sum(i == 0 for i in containsResults_num[key]))

        #00-the nearest distance between the landmark and the location of AV in each direction
        trueIdx[key] = [
            getIndexPositions_2(lst, True) for lst in containResults[key]
        ]
        distanceContain[key] = [[distance_temp[idx] for idx in lst]
                                for lst in trueIdx[key]]
        distanceContainAdj[key] = [
            min(lst) if lst != [] else 9999 for lst in distanceContain[key]
        ]

        #00-edge detection-extract the jump point as [-1,2,-1] and others are 0
        kernel_conv_even = [-1, 2, -1]
        edge_detection = npConv([int(i) for i in containResults[key][0]],
                                kernel_conv_even, 'same')
        indicator_dic["direction_edge"].append(
            sum([abs(v) for v in edge_detection]))
        '''C-Intensity of landmarks'''
        pp = PointPattern(
            [coordinate.coords[:][0] for coordinate in targetPts[key]])
        #based on minimum bounding box
        indicator_dic["intensity_mbb"].append(pp.lambda_mbb)
        #based on convex hull
        indicator_dic["intensity_hull"].append(pp.lambda_hull)
        '''D-distance statistics'''
        #distance based statistical method ref http://pysal.org/notebooks/explore/pointpats/distance_statistics.html
        indicator_dic["nnd_max"].append(pp.max_nnd)
        indicator_dic["nnd_min"].append(pp.min_nnd)
        indicator_dic["nnd_mean"].append(pp.mean_nnd)
        indicator_dic["nnd2_mean"].append(pp.knn(2)[1])

        #Nearest Neighbor Distance Functions/simulation envelopes---G  function - event-to-event / F  function - "point-event"
        #simulation envelopes
        # realizations = PoissonPointProcess(pp.window, pp.n, 100, asPP=True) # simulate CSR 100 times
        # genv = Genv(pp, intervals=20, realizations=realizations) # call Genv to generate simulation envelope
        # genv_list.append(genv)
        # plt.figure()
        # genv.plot()

        #G
        gp1 = G(
            pp, intervals=20
        )  #cumulative nearest neighbor distance distribution over d (corresponding to the y-axis))
        # plt.figure()
        # gp1.plot()
        G_mean = np.mean(gp1.G)
        # print(G_mean)
        indicator_dic["G"].append(G_mean)
        #F
        # fp1 = F(pp, intervals=20) # The default is to randomly generate 100 points.
        # print(help(fp1))
        # F_mean=np.mean(np.diff(fp1.G)) #error-AttributeError: 'F' object has no attribute 'G'
        # indicator_dic["F"].append(F_mean)

        #quadrat statistics based on continuous distance
        nx_m = ny_m = list(distance_domain_quadrat)
        temp_quadratCount = {}
        for nx, ny in zip(nx_m, ny_m):
            chi2, quadratNum, numDivQuad = quadratCount(
                targetPts[key], p, nx, ny)
            temp_quadratCount[str(nx) + "dis"] = {
                "chi2": chi2,
                "quadratNum": quadratNum,
                "numDivQuad": numDivQuad
            }
        quadratCount_dic[key] = temp_quadratCount
        quadratCount_df = pd.concat(
            {
                k: pd.DataFrame.from_dict(v, 'index')
                for k, v in quadratCount_dic.items()
            },
            axis=0)

        chi2_df = quadratCount_df.chi2.unstack(level=1)
        chi2_df.set_axis(["qdt_chi2_" + i for i in chi2_df.columns],
                         axis=1,
                         inplace=True)

        quadratNum_df = quadratCount_df.quadratNum.unstack(level=1)
        quadratNum_df.set_axis(["qdt_num_" + i for i in quadratNum_df.columns],
                               axis=1,
                               inplace=True)

        numDivQuad_df = quadratCount_df.numDivQuad.unstack(level=1)
        numDivQuad_df.set_axis(["qdt_n/Q_" + i for i in numDivQuad_df.columns],
                               axis=1,
                               inplace=True)

        #00-chi2-10
        csr_process = csr(pp.window, pp.n, 999, asPP=True)
        q_r_e = qs.QStatistic(
            pp, shape="rectangle", nx=6, ny=6,
            realizations=csr_process)  #divistion 6 is meant to be 10 meter
        chi2_10 = q_r_e.chi2
        chi2_10_pval = q_r_e.chi2_r_pvalue
        indicator_dic["chi2_10"].append(chi2_10)
        indicator_dic["chi2_10_pval"].append(chi2_10_pval)

        #VMR(Variance/Mean Ratio)

        # if i==5:break
        # i+=1
    print("\n")
    print("+" * 50)
    print(indicator_dic["chi2_10"], indicator_dic["chi2_10_pval"])
    #direction and distance
    distance_eachDirection = pd.DataFrame.from_dict(distanceContainAdj,
                                                    orient='index',
                                                    columns=list(range(num)))
    print(distance_eachDirection)
    distance_eachDirection["PHMI"] = Phmi  #[:11]
    distance_eachDirection["loc_x"] = [p.x for p in locations_pts]

    # print(indicator_dic)
    indicator_dic_df = pd.DataFrame.from_dict(indicator_dic)
    indicator_df_m = pd.concat([
        indicator_df, indicator_dic_df, chi2_df, quadratNum_df, numDivQuad_df
    ],
                               axis=1,
                               sort=False)

    return indicator_df_m, distance_eachDirection, multiSegs, genv_list
Exemplo n.º 10
0
import scipy.spatial
import libpysal as ps
import numpy as np
import pandas as pd 
from pointpats import PointPattern, PoissonClusterPointProcess, as_window
import math
df = pd.read_csv("/Users/Gym/Desktop/research/Benchmark_LTW_20/lrc106.csv")
x = list(df['XCOORD.'])
y = list(df['YCOORD.'])
point = (zip(x,y))
# print(list(point))

pp = PointPattern(point)
print(pp.summary())
# print(pp.mean_nnd)

a = math.sqrt(7790/106)
print(pp.mean_nnd/(0.5*a))
Exemplo n.º 11
0
plt.grid(axis='x', alpha=0.75)
plt.xlabel('Index')
plt.ylabel('Interval')

# 4.1.3 Time Series

df_meg = pd.read_csv("meg_dw.csv", sep=';')
ks_vil = pd.DataFrame({'x': x_d, 'y': np.exp(logprob)})
ks_vil.head()

# 4.2 Two-Dimensional Data ===============================

# https://nbviewer.jupyter.org/github/pysal/pointpats/blob/master/notebooks/Quadrat_statistics.ipynb

meg_points = df_meg.as_matrix(columns=df_meg.columns[1:])
pp_meg = PointPattern(meg_points)  # Window
pp_meg.plot(window=True, title="Point pattern Meg")
qr_meg = qs.QStatistic(pp_meg, shape="rectangle", nx=9, ny=6)
qr_meg.plot()

x = df_meg['x']
y = np.array(df_meg['y'])

# test
h = plt.hist2d(x, y)  # 9x6 Aufteilung
plt.colorbar(h[8])

# 4.2.1 Kernel-Based Density ===============================

# Fitting Gaussian Kernel
x = np.array(df_meg['x'])
Exemplo n.º 12
0
# -*- coding: utf-8 -*-
"""
Created on Wed Apr  8 11:44:55 2020
@author: Globe-Eater

Big thanks to the team at Pysal, here is the reference to this library:
https://pysal.org/notebooks/explore/pointpats/Quadrat_statistics.html
"""
import geopandas as gpd
import numpy as np
from pointpats import PointPattern, as_window
import pointpats.quadrat_statistics as qs

df = gpd.read_file(
    '/Users/kellenbullock/Desktop/Geographic Analysis II/Ex7/Data.shp')

points = df[['longitude', 'latitude']]
points = np.array(points)

pp_quakes = PointPattern(points)
pp_quakes.summary()

pp_quakes.plot(window=True, title="Point pattern")

q_r = qs.QStatistic(pp_quakes, shape="rectangle", nx=4, ny=4)
q_r.plot()

print("\n",
      q_r.chi2)  #chi-squared test statistic for the observed point pattern
print("\n", q_r.df)
print("\n", q_r.chi2_pvalue)  # analytical pvalue
Exemplo n.º 13
0
'''
Created on Sep 4, 2019

@author: mark
'''

import numpy as np
from pointpats import PointPattern


points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
          [9.47, 31.02],  [30.78, 60.10], [75.21, 58.93],
          [79.26,  7.68], [8.23, 39.93],  [98.73, 77.17],
          [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]]
p1 = PointPattern(points)

p1.mbb

p1.summary()

type(p1.points)
np.asarray(p1.points)

p1.mbb

points = np.asarray(points)
points

p1_np = PointPattern(points)
p1_np.summary()