Esempio n. 1
0
def as_feature_set(
    dataset_path: Union[Path, str],
    *,
    field_names: Optional[Iterable[str]] = None,
    dataset_where_sql: Optional[str] = None,
    force_record_set: bool = False,
) -> arcpy.FeatureSet:
    """Return dataset as feature set.

    Args:
        dataset_path: Path to dataset.
        field_names: Collection of field names to include in output. If set to None, all
            fields will be included.
        dataset_where_sql: SQL where-clause property for dataset subselection.
        force_record_set: If True, return record set. If False, return feature set if
            spatial dataset & record set if non-spatial.
    """
    dataset_path = Path(dataset_path)
    if field_names is not None:
        field_names = list(field_names)
    view = DatasetView(dataset_path,
                       field_names=field_names,
                       dataset_where_sql=dataset_where_sql)
    with view:
        if force_record_set or not view.is_spatial:
            return arcpy.RecordSet(table=view.name)

        return arcpy.FeatureSet(table=view.name)
Esempio n. 2
0
def rowsToJson(dataset):
    # converts a feature class/table to a json dictionary representation
    try:
        rows = arcpy.FeatureSet(
            dataset)  # Load the feature layer into a feature set
    except:
        rows = arcpy.RecordSet(
            dataset)  # Load the feature layer into a feature set

    desc = arcpy.Describe(rows)  # use the json property of the feature set
    return json.loads(desc.json)
Esempio n. 3
0
def download_data(base_url: str, token: Optional[str], oids: List[Union[int,
                                                                        str]],
                  is_layer_table: bool, chunk: int):
    """download data"""
    params: Dict[str, Any] = init_params(token)
    params['outFields'] = '*'

    total_downloaded: int = 0
    featuresets = []
    total: int = len(oids)
    chunk_size: int = min([chunk, total])
    describe_recs: str = records_desc(is_layer_table)
    arcpy.ResetProgressor()
    arcpy.SetProgressor(
        'step', f'''{total} {describe_recs.lower()} to be downloaded''', 0,
        total, chunk)
    url: str = add_url_path(base_url, 'query')
    for current_chunk in chunklist(oids, chunk_size):
        oids_query = ",".join(map(str, current_chunk))
        if not oids_query:
            continue
        else:
            featureset = arcpy.RecordSet(
            ) if is_layer_table else arcpy.FeatureSet()
            params['objectIds'] = oids_query
            try:
                featureset.load(f'''{url}?{urlencode(params)}''')
            except:
                arcpy.AddError('Try to set a lower value for variable CHUNK')
                raise

            featuresets.append(featureset)
            total_downloaded += chunk_size
            arcpy.SetProgressorLabel(
                f'''{total_downloaded} {describe_recs.lower()} appended''')
            arcpy.SetProgressorPosition()
    return featuresets
Esempio n. 4
0
def table_to_json(table):
    """ returns a table as JSON """
    if arcpyFound == False:
        raise Exception("ArcPy is required to use this function")
    return arcpy.RecordSet(table).JSON
Esempio n. 5
0
def recordset_to_json(table):
    """ converts the table to JSON """
    if arcpyFound == False:
        raise Exception("ArcPy is required to use this function")
    return arcpy.RecordSet(table).JSON
def APIdownload(baseURL, workspace, basename, itersize, IDlist, geometry):
    IDrange = range(IDlist[0], IDlist[1], itersize)
    arcpy.env.workspace = workspace

    for i, j in itertools.izip(IDrange, IDrange[1:]):
        itertry = itersize
        downlist = []
        # It seems like REST API server also has a limitation on the size of the downloads so sometimes won't allow
        # Therefore, if fails to download, try smaller increments until reaches increments of 2 if still fails at increments
        # of 2, then throw a proper error and break
        while True:
            try:
                IDrangetry = list(sorted(set(
                    range(i, j + 1, itertry) +
                    [j])))  # To make sure that the list goes until the maximum
                # Loop with smaller increment within range
                for k, l in itertools.izip(IDrangetry, IDrangetry[1:]):
                    print('From {0} to {1}'.format(k, l))
                    where = "OBJECTID>={0} AND OBJECTID<{1}".format(k, l)
                    # &geometryType=esriGeometryPoint
                    query = "?where={0}&returnGeometry={1}&f=json&outFields=*".format(
                        where,
                        str(geometry).lower())
                    fsURL = baseURL + query
                    if geometry == True:
                        fs = arcpy.FeatureSet()
                    elif geometry == False:
                        fs = arcpy.RecordSet()
                    else:
                        raise ValueError(
                            'Invalid geometry argument: only boolean values are accepted'
                        )
                    fs.load(fsURL)
                    if long(arcpy.GetCount_management(fs)[0]) > 0:
                        outname = '{0}_{1}_{2}'.format(basename, k, l)
                        downlist.append(outname)
                        if geometry == True:
                            arcpy.CopyFeatures_management(fs, outname)
                        else:
                            arcpy.CopyRows_management(fs,
                                                      '{}.csv'.format(outname))
                        print(outname)
                    else:
                        print(
                            'No data from OBJECTID {0} to OBJECTID {1}'.format(
                                k, l))
                break
            except:
                if itertry > 5:
                    print(
                        'Count not download, delete previous {0} datasets, try downloading in smaller increments'
                        .format(len(downlist)))
                    if len(downlist) > 0:
                        for fc in downlist:
                            arcpy.Delete_management(fc)
                        downlist = []
                    itertry = itertry / 2
                else:
                    e = sys.exc_info()[1]
                    print('Exit with error: ' + e.args[0])
                    # sys.exit(1)
                    break
        del searchRow, searchRows, insertRows
    except:
        pass

# Code for downloading tabular data
if agsTable == 'true':
    if count < 1000:
        x = iteration
        y = minOID
        where = OID + '>' + str(y) + 'AND ' + OID + '<=' + str(x)
        fields ='*'

        query = "?where={}&outFields={}&returnGeometry=true&f=json&token={}".format(where, fields, token)
        fsURL = baseURL + query

        fs = arcpy.RecordSet()
        try:
            fs.load(fsURL)
        except:
            PrintException("Error Loading Features")

        arcpy.AddMessage('Copying features with ObjectIDs from ' + str(y) + ' to ' + str(x))
        outputFC = arcpy.GetParameterAsText(7)
        desc = arcpy.Describe(os.path.dirname(outputFC))
        if desc.workspaceFactoryProgID == 'esriDataSourcesGDB.SdeWorkspaceFactory.1':
            outputFC2 = outputFC.split(".")[-1]
            try:
                arcpy.TableToTable_conversion(fs, os.path.dirname(outputFC), outputFC2)
            except:
                PrintException("Error Copying Features")
        else:
    def process(self):
        #: Load table from web service using a RecordSet
        self.log.info('Loading UDOT data...')
        record_set = arcpy.RecordSet()
        record_set.load(secrets.TABLE_URL)
        traffic_dict = json.loads(record_set.JSON)

        #: traffic_dict['features'] is the actual table, but is list of nested dicts, all with the single outer key 'attributes'
        cleaned_traffic_dict = [
            t['attributes'] for t in traffic_dict['features']
        ]
        traffic_frame = pd.DataFrame.from_dict(cleaned_traffic_dict)

        #: Convert dates for .last() operation later
        traffic_frame['Date'] = pd.to_datetime(traffic_frame['Date'])

        multi_index_df = traffic_frame.set_index(['Station', 'Date'])

        station_ids = traffic_frame['Station'].unique()
        trend_columns = [f'D{i}' for i in range(1, 15)]
        avgs_df = pd.DataFrame(index=station_ids,
                               columns=['AvgChange7D'],
                               dtype=np.float64)

        for i in station_ids:
            working_df = multi_index_df.loc[i, :].last('7D').copy()
            avgs_df.loc[i, 'AvgChange7D'] = working_df['PercentChange'].mean()
            avgs_df.loc[i, 'StartDate'] = str(working_df.index[0])
            avgs_df.loc[i, 'EndDate'] = str(working_df.index[-1])

            fourteen_day_df = multi_index_df.loc[i, 'PercentChange'].last(
                '14D').copy()
            for d in range(14):
                day_column = f'D{d+1}'
                avgs_df.loc[i, day_column] = fourteen_day_df.iloc[d]

        #: Transpose so that the index becomes the keys and the rows are the values
        avgs_dict = avgs_df.T.to_dict()

        #: Load features into a feature set
        feature_set = arcpy.FeatureSet()
        feature_set.load(secrets.FEATURE_URL)

        feature_name = 'TrafficChanges'

        temp_json_path = os.path.join(arcpy.env.scratchFolder, 'features.json')
        temp_fc_path = os.path.join(arcpy.env.scratchGDB, 'features')
        sddraft_path = os.path.join(arcpy.env.scratchFolder,
                                    f'{feature_name}.sddraft')
        sd_path = sddraft_path[:-5]

        #: Make sure none of our files already exist
        paths = [sddraft_path, sd_path, temp_json_path, temp_fc_path]
        for item in paths:
            if arcpy.Exists(item):
                self.log.info(f'Deleting {item} prior to use...')
                arcpy.Delete_management(item)

        #: Save features to .json, load .json as a feature class
        self.log.info(f'Saving JSON to {temp_json_path}...')
        with open(temp_json_path, 'w') as json_file:
            json_file.write(feature_set.JSON)

        self.log.info(f'Creating temp feature class {temp_fc_path}...')
        arcpy.JSONToFeatures_conversion(temp_json_path, temp_fc_path)

        #: Add our new columns.
        self.log.info('Adding columns...')
        columns = [('DetectorStation', 'TEXT'), ('AvgChange7D', 'DOUBLE'),
                   ('StartDate', 'TEXT'), ('EndDate', 'TEXT')]
        # trend_columns = [(f'D{i}', 'DOUBLE') for i in range(1, 15)]
        columns.extend([(d, 'DOUBLE') for d in trend_columns])
        for col in columns:
            name, dtype = col
            arcpy.AddField_management(temp_fc_path, name, dtype)

        #: Update the temp feature class with new averages
        self.log.info('Updating feature class with new averages...')
        fields = ['DetectorStation', 'AvgChange7D', 'StartDate', 'EndDate']
        fields.extend(trend_columns)
        with arcpy.da.UpdateCursor(temp_fc_path, fields) as ucursor:
            for row in ucursor:
                station = row[0]
                if station in avgs_dict:
                    row[1] = avgs_dict[station]['AvgChange7D']
                    row[2] = avgs_dict[station]['StartDate'].split()[0]
                    row[3] = avgs_dict[station]['EndDate'].split()[0]
                    for column, index in zip(trend_columns, range(4, 18)):
                        row[index] = avgs_dict[station][column]
                    ucursor.updateRow(row)

        #: Add anchor points for the symbology
        self.log.info('Adding anchor points...')
        anchor_fields = ['DetectorStation', 'AvgChange7D', 'SHAPE@XY']
        with arcpy.da.InsertCursor(temp_fc_path, anchor_fields) as icursor:
            null_island = (0, 0)
            icursor.insertRow(['AnchorLow', 25, null_island])
            icursor.insertRow(['AnchorHigh', 100, null_island])

        #: Overwrite existing AGOL service
        self.log.info(f'Connecting to AGOL as {secrets.USERNAME}...')
        gis = arcgis.gis.GIS('https://www.arcgis.com', secrets.USERNAME,
                             secrets.PASSWORD)
        sd_item = gis.content.get(secrets.SD_ITEM_ID)

        #: Get project references
        #: Assume there's only one map in the project, remove all layers for clean map
        self.log.info(f'Getting map from {secrets.PROJECT_PATH}...')
        project = arcpy.mp.ArcGISProject(secrets.PROJECT_PATH)
        covid_map = project.listMaps()[0]
        for layer in covid_map.listLayers():
            self.log.info(f'Removing {layer} from {covid_map.name}...')
            covid_map.removeLayer(layer)

        layer = covid_map.addDataFromPath(temp_fc_path)
        project.save()

        #: draft, stage, update, publish
        self.log.info(f'Staging and updating...')
        sharing_draft = covid_map.getWebLayerSharingDraft(
            'HOSTING_SERVER', 'FEATURE', feature_name, [layer])
        sharing_draft.exportToSDDraft(sddraft_path)
        arcpy.server.StageService(sddraft_path, sd_path)
        sd_item.update(data=sd_path)
        sd_item.publish(overwrite=True)

        #: Update item description
        self.log.info('Updating item description...')
        feature_item = gis.content.get(secrets.FEATURES_ITEM_ID)
        start_date = avgs_dict[station]['StartDate'].split()[0]
        end_date = avgs_dict[station]['EndDate'].split()[0]
        description = f'Traffic data obtained from UDOT; updates occur every morning. Data currently reflects traffic from {start_date} to {end_date}.'
        feature_item.update(item_properties={'description': description})
def tableToRecordSet(table_path):
    """ converts a table to a recordset object """
    rs = arcpy.RecordSet()
    rs.load(table_path)
    return rs
Esempio n. 10
0
def table_to_json(table):
    """ returns a table as JSON """
    return arcpy.RecordSet(table).JSON
Esempio n. 11
0
def recordset_to_json(table):
    """ converts the table to JSON """
    return arcpy.RecordSet(table).JSON
    # Gather features
    print("Gathering records...")
    fs = dict()
    for i in range(0, numrec, maxrc):
      torec = i + (maxrc - 1)
      if torec > numrec:
        torec = numrec - 1
      fromid = idlist[i]
      toid = idlist[torec]
      where = "{} >= {} and {} <= {}".format(idfield, fromid, idfield, toid)
      print("  {}".format(where))
      urlstring = baseURL + "/query?where={}&returnGeometry=true&outFields={}&f=json".format(where,fields)
      if table_type == 'feature':
        fs[i] = arcpy.FeatureSet()
      else:
        fs[i] = arcpy.RecordSet()  
      fs[i].load(urlstring)
    
    # Save features
    print("Saving features...")
    fslist = []
    for key,value in fs.items():
      fslist.append(value)
    arcpy.Merge_management(fslist, out_data)
    print("Done!")


######################################################################
## CREATE PARKS AND ACCESS POINTS FEATURE CLASSES FOR EACH SCENARIO ##
###################################################################### 
queries = {
Esempio n. 13
0
        ownerName = row.getValue("MERCATOR2.DBO.TITLED_OWNERS.Owner_" + str (x))
        postalCode = row.getValue("MERCATOR2.DBO.TITLED_OWNERS.Owner_" + str (x) + "_Postal_Code")
        province = row.getValue("MERCATOR2.DBO.TITLED_OWNERS.Owner_" + str (x) + "_Province")

        ownerAddress = ""
        

        if x == 1:
            ownerAddress = row.getValue("MERCATOR2.DBO.TITLED_OWNERS.Owner_" + str (x) + "__Address")

        else:
            ownerAddress = row.getValue("MERCATOR2.DBO.TITLED_OWNERS.Owner_" + str (x) + "_Address")
            
        irow = icursor.newRow()
        irow.setValue("PID", pid)
        irow.setValue("owner_name", ownerName)
        irow.setValue("owner_address", ownerAddress)
        irow.setValue("postal_code", postalCode)
        irow.setValue("province", province)
        icursor.insertRow(irow)
        
recordSet = arcpy.RecordSet(owners_table)
print arcpy.GetCount_management(recordSet)
arcpy.SetParameter(2, recordSet)

desc = arcpy.Describe(recordSet)
        #arcpy.AddMessage(desc.pjson)
print desc.pjson