Ejemplo n.º 1
0
def localGDB():  #
    global local
    fields = [
        'OBJECTID',
        'lon',
        'lat',
        'city_id',
        'temp',
        'forecast_date2',
        'temp_min',
        'temp_max',  # объявление полей для чтения
        'pressure',
        'pressure_s_lvl',
        'pressure_g_lvl',
        'wind_speed',
        'wind_degree',  # объявление полей для чтения
        'clouds',
        'weather_description',
        'humidity',
        'request_date',
        'forecast_date',  # объявление полей для чтения
        'rain',
        'snow',
        'name'
    ]  # объявление полей для чтения
    fc_np = da.FeatureClassToNumPyArray(
        weatherinput, fields, skip_nulls=False, null_value=0
    )  # запрос данных из таблицы в нампай массив #where_clause=exp,
    localdf = DataFrame(fc_np)  # конвертация массива в датафрейм
    localdf.drop_duplicates(subset='forecast_date2', keep='last', inplace=True)
    print(localdf.shape)  # избавление датафрейма от дубликатов
    local = localdf
    print(local)

    return local
Ejemplo n.º 2
0
def unique_values(table, field):
    """gets a list of unique values from a table's column
    table: path to a table
    field: string of a field name

    output:
       list
    """
    try:
        if has_pandas:
            uvalues = None
            chunk_size = calc_chunk_size()
            with da.SearchCursor(table, [field]) as cursor:
                for group in grouper_it(chunk_size, cursor):
                    df = pd.DataFrame.from_records(group,
                                                   columns=cursor.fields)
                    column = df[field].unique()
                    if uvalues is None:
                        uvalues = column
                    else:
                        uvalues = np.concatenate([column, uvalues])
                    del group
                    del df
                    del column
                del cursor
            if uvalues is None:
                return []
            return list(set(uvalues.tolist()))
        else:
            desc = arcpy.Describe(table)
            if desc.hasOID:
                oidFieldname = desc.OIDFieldName
            else:
                raise Exception("Table must have an object id table")
            template = da.FeatureClassToNumPyArray(
                table, [field],
                where_clause="{ofield} < 1".format(ofield=oidFieldname))
            uvalues = None
            chunk_size = calc_chunk_size()
            with da.SearchCursor(table, [field]) as cursor:
                for group in grouper_it(chunk_size, cursor):
                    df = np.fromiter(group, template.dtype, -1)
                    column = np.unique(df[field])
                    if uvalues is None:
                        uvalues = column
                    else:
                        uvalues = np.unique(np.concatenate([column, uvalues]))
            if uvalues is None:
                return []
            return list(set(uvalues.tolist()))
    except:
        line, filename, synerror = trace()
        raise FunctionError({
            "function": "unique_values",
            "line": line,
            "filename": __file__,
            "synerror": synerror,
            "arc": str(arcpy.GetMessages(2))
        })
Ejemplo n.º 3
0
def featureToCSV(inFeature, inColumns, outCSV):
    arcpy.AddMessage("Converting feature class to csv...")
    feature_array = da.FeatureClassToNumPyArray(inFeature,
                                                ["SHAPE@XY"] + inColumns)
    feature_df = pd.DataFrame(feature_array, columns=inColumns)
    feature_df.to_csv(outCSV,
                      header=True,
                      index=False,
                      sep=',',
                      encoding='utf-8')
def las_tile_to_numpy_pandas(lidar_tile, sr, returns, class_codes,
                             format_for_library):
    temp_lasd = "{0}_temp.lasd".format(splitext(lidar_tile)[0])
    if Exists(temp_lasd):
        Delete(temp_lasd)
    arcpy.CreateLasDataset_management(lidar_tile,
                                      temp_lasd,
                                      spatial_reference=sr)
    point_spacing = arcpy.Describe(temp_lasd).pointSpacing
    Delete(temp_lasd)

    temp_pts_multi = join("in_memory", "temp_pts_multi")
    if Exists(temp_pts_multi):
        Delete(temp_pts_multi)
    LASToMultipoint(input=lidar_tile,
                    out_feature_class=temp_pts_multi,
                    average_point_spacing=point_spacing,
                    class_code=class_codes,
                    _return=returns,
                    input_coordinate_system=sr)

    if format_for_library == "numpy":
        lidar_points = da.FeatureClassToNumPyArray(
            in_table=temp_pts_multi,
            # field_names=["OID@", "SHAPE@X", "SHAPE@Y", "SHAPE@Z"],
            field_names=["SHAPE@X", "SHAPE@Y", "SHAPE@Z"],
            # field_names=["SHAPE@XYZ"],
            spatial_reference=sr,
            explode_to_points=True)
        Delete(temp_pts_multi)
        # Numpy Processing Operation Goes Here!
        numpy_operation_here(lidar_points)

    elif format_for_library == "pandas":
        lidar_points = pd.DataFrame.spatial.from_featureclass(
            location=temp_pts_multi)
        #fields=["SHAPE@X", "SHAPE@Y", "SHAPE@Z"])
        Delete(temp_pts_multi)
        # Numpy Processing Operation Goes Here!
        pandas_operation_here(lidar_points)

    del lidar_points
Ejemplo n.º 5
0
    def get_change_detection_statistics(self, fc):
        """calculates the change detection statistic"""
        try:
            #FeatureToNumPyArray
            sum_array = da.FeatureClassToNumPyArray(fc,
                                                    self.year_list,
                                                    skip_nulls=True)

            arcpy.AddMessage("Creating CD SUM array")
            change_detection_sum = []
            for i in range(0, len(sum_array[self.year_list[0]])):
                results = 0
                for year in self.year_list:
                    results = sum_array[i][year]+ results
                change_detection_sum.append(results)

            arcpy.AddMessage('Using default SUM statistics')
            self.minus2Sigma = np.percentile(change_detection_sum,
                                             self.MINUS_2_SIGMA)
            self.minus1Sigma = np.percentile(change_detection_sum,
                                             self.MINUS_1_SIGMA)
            self.median = np.median(change_detection_sum)
            self.plus1Sigma = np.percentile(change_detection_sum,
                                            self.PLUS_1_SIGMA)
            self.plus2Sigma = np.percentile(change_detection_sum,
                                            self.PLUS_2_SIGMA)

            arcpy.AddMessage('Minus 2 Sigma = ' + str(self.minus2Sigma))
            arcpy.AddMessage('Minus 1 Sigma = ' + str(self.minus1Sigma))
            arcpy.AddMessage('Median = ' + str(self.median))
            arcpy.AddMessage('Plus 1 Sigma = ' + str(self.plus1Sigma))
            arcpy.AddMessage('Plus 2 Sigma = ' + str(self.plus2Sigma))
        except:
            line, filename, synerror = trace()
            raise FunctionError({
                "function": "get_change_detection_statistics",
                "line": line,
                "filename": __file__,
                "synerror": synerror,
                "arc" : str(arcpy.GetMessages(2))
            })
#rename the datasets
inputFC = r'USA_Train'
globalFC = r'EMU_Global_90m_FillMissingVa'

#group the variables
predictVars = [
    'DISS02', 'NITRATE', 'PHOSPHATE', 'SALINITY', 'SILICATE', 'SRTM30', 'TEMP'
]
classVar = ['PRESENT']

#catconate the variable
allVars = predictVars + classVar

#spaital reference the data
trainFC = DA.FeatureClassToNumPyArray(inputFC, ["SHAPE@XY"] + allVars)
spatRef = ARCPY.Describe(inputFC).spatialReference

#create the structure for the training data
data = PD.DataFrame(trainFC, columns=allVars)
corr = data.astype('float64').corr()

ax = SEA.heatmap(corr,
                 cmap=SEA.diverging_palette(220, 10, as_cmap=True),
                 square=True,
                 annot=True,
                 linecolor='k',
                 linewidths=1)
PLOT.show()

#take the data sample size
Ejemplo n.º 7
0
from arcpy import da, GetParameterAsText, AddMessage, SelectLayerByAttribute_management
import pandas as pd

EventTable = GetParameterAsText(0)
RouteId = GetParameterAsText(1)
FromMeas = GetParameterAsText(2)
ToMeas = GetParameterAsText(3)
OverlapOID = GetParameterAsText(4)

OID_field = 'OID@'
np_array = da.FeatureClassToNumPyArray(EventTable,
                                       [RouteId, FromMeas, ToMeas, OID_field])
df = pd.DataFrame(np_array)
AddMessage(list(df))
route_list = df[RouteId].unique().tolist()

for route in route_list:
    df_route = df.loc[df[RouteId] == route]

    flipped_i = df_route.loc[df_route[FromMeas] > df_route[ToMeas]].index
    df_route.loc[flipped_i,
                 [FromMeas, ToMeas]] = df_route.loc[flipped_i,
                                                    [ToMeas, FromMeas]].values
    df_route.sort_values(by=[FromMeas, ToMeas], inplace=True)

    for index, row in df_route.iterrows():
        complete_overlap = ((df_route[FromMeas] > row[FromMeas]) &
                            (df_route[ToMeas] < row[ToMeas]))
        partial_front_overlap = ((df_route[FromMeas] < row[FromMeas]) &
                                 (df_route[ToMeas] > row[FromMeas]))
        overlap_i = df_route.loc[partial_front_overlap
Ejemplo n.º 8
0
def anal():
    timeDF = (pd.DataFrame(da.FeatureClassToNumPyArray(analysinput, 'date', skip_nulls=False, null_value=0)).tail(1))
    starttime = pd.to_datetime(timeDF['date'].values[0])
    dtable = date((starttime.year), (starttime.month), (starttime.day))
    #d1 = date(2018, 5, 15)
    d1 = dtable+timedelta(days = 1)
    d2 = (date.today())
    delta=d2-d1

    fieldstoread = ['lat','lon','city_id',  'temp', 'clouds', 'weather_description',  'forecast_date2', 'forecast_date', 'rain',
             'snow', 'name']
    fieldstowrite = ['SHAPE@XY','name','id','rainsumm','snowsum','rainday','acttemp','avgtemp','clearday','date','t1','t2','t3','t4','t5']#
    for i in range(len(s)):
        print(s[i])
        exp = "city_id = {}".format(s[i])
        fc_np = da.FeatureClassToNumPyArray(weatherinput, fieldstoread, where_clause=exp, skip_nulls=False, null_value=0)
        local = pd.DataFrame(fc_np)
        local.drop_duplicates(subset='forecast_date2', keep='last', inplace=True)
        latlon = (float(local['lat'].values[0]), float(local['lon'].values[0]))
        print(type(latlon))
        print(latlon)
        for z in range(delta.days ):
            x = str(d1 + timedelta(days=z))
            rainsum = float(local.loc[local['forecast_date2'].str.contains('{}'.format(x))]['rain'].sum())
            snowsum = float(local.loc[local['forecast_date2'].str.contains('{}'.format(x))]['snow'].sum())
            acttempdf = (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']>10])
            avgtemp = float(local.loc[local['forecast_date2'].str.contains('{}'.format(x))]['temp'].mean())
            avgsun = float(local.loc[local['forecast_date2'].str.contains('{}'.format(x))]['clouds'].mean())
            name = (local['name'].values[0])
            t1df =  (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']>10])
            t2df =  (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']> 1])
            t3df = (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']> 2])
            t4df = (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']> 5])
            t5df = (local.loc[local['forecast_date2'].str.contains('{}'.format(x))][local['temp']> 8])
            id = int(s[i])
            if rainsum > 0:
                rainday = 1
            else:
                rainday = 0
            if avgsun < 16:
                clearday = 1
            else:
                clearday = 0
            if t1df.shape[0]<8:
                t1 = 0
            else:
                t1 = float(acttempdf['temp'].mean())
            if t2df.shape[0]<8:
                t2 = 0
            else:
                t2 = float(acttempdf['temp'].mean())
            if t3df.shape[0]<8:
                t3 = 0
            else:
                t3 = float(acttempdf['temp'].mean())
            if t4df.shape[0]<8:
                t4 = 0
            else:
                t4 = float(acttempdf['temp'].mean())
            if t5df.shape[0]<8:
                t5 = 0
            else:
                t5 = float(acttempdf['temp'].mean())
            if acttempdf.shape[0]<8:
                acttemp = 0
            else:

                acttemp=float(acttempdf['temp'].mean())
                pass

            cursor = da.InsertCursor(analysinput, fieldstowrite)
            print("creating row {} {} {} {} {} {} {}".format(latlon,name,id,rainsum,avgtemp,clearday,x))
            cursor.insertRow((latlon,name,id,rainsum,snowsum,rainday,acttemp,avgtemp,clearday,x,t1,t2,t3,t4,t5))#
            del cursor