Ejemplo n.º 1
0
def createComplainRasterFeature(SelectSQL,InputComplainFeatures,POIFeatures,FinalResultFeature):
    logging.info("Process: 创建"+FinalResultFeature)
    if(arcpy.Exists(FinalResultFeature)):
        arcpy.Delete_management(FinalResultFeature, "FeatureClass")
    rmNo = random.randint(100000000,999999999)
    print rmNo
    # Process: 筛选
    print "Process: 筛选"
    logging.info("Process: 筛选")
    FeatureSelect=arcpy.Select_analysis(InputComplainFeatures, "in_memory/FeatureSelect"+repr(rmNo), SelectSQL)
    # Process: 点转栅格
    print FeatureSelect
    rowSear =  arcpy.SearchCursor(FeatureSelect)
    row = rowSear.next()
    if(row):
        print "Process: 点转栅格"
        logging.info("Process: 点转栅格")
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster=arcpy.PointToRaster_conversion(FeatureSelect, "OBJECTID", "in_memory/ResultRaster"+repr(rmNo), "COUNT", "NONE", ".0018")
        arcpy.env.extent = tempEnvironment0
        # Process: 栅格转点 
        print "Process: 栅格转点"
        logging.info("Process: 栅格转点")
        COMPLAIN_RASTER_POINTS=arcpy.RasterToPoint_conversion(ResultRaster, "in_memory/COMPLAIN_RASTER_POINTS"+repr(rmNo), "VALUE")
        print "Process: 空间连接"
        # Process: 空间连接
        COMPLAIN_POI_UNION=arcpy.SpatialJoin_analysis(COMPLAIN_RASTER_POINTS, POI, "in_memory/COMPLAIN_POI_UNION"+repr(rmNo), "JOIN_ONE_TO_ONE", "KEEP_ALL", "","CLOSEST", ".1 DecimalDegrees", "DISTANCE")
        print "Process: 点转栅格 (2)"
        logging.info("Process: 点转栅格 (2)")
        # Process: 点转栅格 (2)
        tempEnvironment0 = arcpy.env.extent
        arcpy.env.extent = "115 23 122 29"
        ResultRaster2=arcpy.PointToRaster_conversion(COMPLAIN_POI_UNION, "OBJECTID", "in_memory/ResultRaster2"+repr(rmNo), "MOST_FREQUENT", "NONE", ".0018")
        arcpy.env.extent = tempEnvironment0
        print "Process: 栅格转面"
        logging.info("Process: 栅格转面")
        # Process: 栅格转面
        ResultFeature=arcpy.RasterToPolygon_conversion(ResultRaster2, "in_memory/ResultFeature"+repr(rmNo), "NO_SIMPLIFY", "VALUE")
        print "Process: 空间连接 (2)"
        logging.info("Process: 空间连接 (2)")
        # Process: 空间连接 (2)
        ResultFeatureZj=arcpy.SpatialJoin_analysis(ResultFeature, COMPLAIN_POI_UNION, "in_memory/ResultFeatureZj"+repr(rmNo), "JOIN_ONE_TO_ONE", "KEEP_ALL", "", "INTERSECT", "", "")
        # Process: 空间连接 (3)
        arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj, FinalResultFeature, "JOIN_ONE_TO_ONE", "KEEP_ALL", "", "INTERSECT", "", "")
        #arcpy.SpatialJoin_analysis(FeatureSelect, ResultFeatureZj, FinalResultFeature, "JOIN_ONE_TO_ONE", "KEEP_ALL", "TIME \"TIME\" true true false 8 Date 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,TIME,-1,-1;WORK_ORDER_ID \"WORK_ORDER_ID\" true true false 100 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\GIS_OBJECT_COMPLAIN_Select1,WORK_ORDER_ID,-1,-1;DISTANCE \"DISTANCE\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,DISTANCE,-1,-1;POINTID \"POINTID\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,POINTID,-1,-1;GRID_CODE \"聚合数\" true true false 4 Long 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID_CODE,-1,-1;Name \"聚合地址\" true true false 160 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Name,-1,-1;Ctype \"聚合地址类型(原始)\" true true false 64 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,Ctype,-1,-1;CnType \"聚合地址类型\" true true false 50 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CnType,-1,-1;CITY \"地市\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CITY,-1,-1;COUNTY \"区县\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,COUNTY,-1,-1;GRID \"GRID\" true true false 32 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,GRID,-1,-1;SGLON \"栅格POI经度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLON,-1,-1;SGLAT \"栅格POI纬度\" true true false 8 Double 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,SGLAT,-1,-1;CQ_REGION \"城区网格所属区域\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION,-1,-1;CQ_REGION_TYPE \"城区网格区域属性\" true true false 60 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,CQ_REGION_TYPE,-1,-1;TEST_ID \"测试网格ID\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_ID,-1,-1;TEST_GRIDID \"测试网格编号\" true true false 20 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_GRIDID,-1,-1;TEST_CLASS \"测试网格类型\" true true false 10 Text 0 0 ,First,#,D:\\HasmbyGis\\Cache.gdb\\Complain20140509_SpatialJoin,TEST_CLASS,-1,-1", "INTERSECT", "", "")

        
        arcpy.Delete_management(COMPLAIN_POI_UNION)
        arcpy.Delete_management(COMPLAIN_RASTER_POINTS)
        arcpy.Delete_management(ResultRaster)
        arcpy.Delete_management(ResultRaster2)
        arcpy.Delete_management(ResultFeature)
        arcpy.Delete_management(ResultFeatureZj)
        del COMPLAIN_POI_UNION,COMPLAIN_RASTER_POINTS,ResultRaster,ResultRaster2,ResultFeature,ResultFeatureZj
    arcpy.Delete_management(FeatureSelect)
    del FeatureSelect,rowSear
    logging.info("清理内存~~")
    gc.collect()
Ejemplo n.º 2
0
def createTraceFiles(in_layer,name):
    # 选中常熟市企业分区1中的AOT点并导出
    arcpy.AddMessage("-------制作溯源文件。。。")
    # 企业分区文件位置
    zoneLocation='F:\\20190905\\data\\zone\\polygon'
    # 创建文件夹
    # AOT文件
    aot_Content='F:\\changshutraceresult\\aot'
    try:
        os.makedirs(aot_Content)
    except :
        arcpy.AddMessage('-------'+aot_Content+'已经存在')
    path2Dir =  os.listdir(zoneLocation)
    for zonefile in path2Dir:
        if zonefile[-4:].lower() == '.shp':
            filename=getTime()+'_'+zonefile[:-4]
            arcpy.AddMessage("------"+filename)
            zone_Layer=os.path.join(zoneLocation,zonefile)
            ilayer='InLayer'+getTime()
            try:
                copy_layer="F:\\20190905\\data\\out\\point\\"+str(int(time.time()))+'.shp'
                arcpy.CopyFeatures_management(in_layer,copy_layer)
                flayer=arcpy.MakeFeatureLayer_management(copy_layer,ilayer)
                # 选择图层
                zone_feature=arcpy.SelectLayerByLocation_management(flayer, 'COMPLETELY_WITHIN',zone_Layer,0,'NEW_SELECTION')
                layer="F:\\20190905\\data\\out\\point\\"+str(int(time.time()))+'.shp'
                arcpy.CopyFeatures_management(zone_feature,layer)
                # cnt=arcpy.GetCount_management(flayer)
                # arcpy.AddMessage("已选择{}行".format(str(cnt)))
                # 合并后的要素输出位置
                out_feature_class='F:\\20190905\\data\\zone\\out\\'+filename+'.shp'
                # 合并要素
                arcpy.Merge_management([layer, "F:\\20190905\\data\\zone\\point\\"+zonefile[:-4]+"_company.shp"], out_feature_class)
                # 将合并后的点转为栅格,分辨率设为0.0005,参数设置如下
                out_rasterdataset=os.path.join(aot_Content,zonefile[:-4]+'企业坐标'+name[3:]+'.tif')
                arcpy.PointToRaster_conversion (out_feature_class, 'enterprise', out_rasterdataset, 'MAXIMUM', 'NONE', '0.0005')
                # 同样,将分区一的AOT点转为栅格,分辨率设为0.0005,参数设置如下,即得到分区1AOT文件
                out_rasterdataset2=os.path.join(aot_Content,zonefile[:-4]+name+'.tif')
                arcpy.PointToRaster_conversion (layer, 'value', out_rasterdataset2, 'MAXIMUM', 'NONE', '0.0005')
                try:
                    arcpy.Delete_management(flayer)
                    arcpy.Delete_management(layer)
                    arcpy.Delete_management(copy_layer)
                    arcpy.Delete_management(out_feature_class)
                except Exception as e:
                    arcpy.AddMessage(e.message)
            except Exception as e:
                print e.message
    arcpy.AddMessage("------制作溯源文件完成")
Ejemplo n.º 3
0
def execute(in_netcdf, out_feat, levels=(20, 25, 30, 35, 40, 45), mask=None):
    # This is a very stupid fix for multiprocessing
    # But I am sure why arcpy.CheckoutExtension works
    arcpy.env.overwriteOutput = True
    arcpy.env.workspace = "in_memory"
    workspace = "in_memory"

    # Set all filenames
    temp_dir = os.path.dirname(os.path.abspath(in_netcdf))  # Emm, csv and img must be saved with .nc
    layer1 = relocate(in_netcdf, temp_dir, ".img")
    if not os.path.exists(layer1):
        fn_csv = relocate(in_netcdf, temp_dir, ".csv")
        cnt_dir = os.path.dirname(in_netcdf) + "\\cnt"

        # UnComment this to skip existed results
        if os.path.exists(relocate(fn_csv, cnt_dir, ".shp")):
            print("%s existed. Skip!" % relocate(fn_csv, cnt_dir, ".shp"))
            return
        ds = netCDF4.Dataset(in_netcdf)
        # Level 7 == 3.5km
        refl_l = numpy.ravel(ds.variables["WNDSPD_850MB"])
        lon_l = numpy.ravel(ds.variables["lon"])
        lat_l = numpy.ravel(ds.variables["lat"])

        lon_l, lat_l = utils.projFunc(lon_l, lat_l)

        print(fn_csv)
        if not os.path.exists(fn_csv):
            f_csv = open(fn_csv, "w")
            f_csv.write("Id,X,Y,Reflect\n")

            for i in range(refl_l.shape[0]):
                if not refl_l[i] >= 10:
                    continue
                refl = refl_l[i]
                lat = lat_l[i]
                lon = lon_l[i]
                f_csv.write("%d,%f,%f,%f\n" % (i, lon, lat, refl))

            f_csv.close()
            print("NC to CSV:", fn_csv)
        else:
            print("Have CSV:", fn_csv)

        reflect = arcpy.CreateUniqueName(arcpy.ValidateTableName("reflect.shp"), workspace)
        arcpy.MakeXYEventLayer_management(fn_csv, 'X', 'Y', reflect, utils.spatialRef, "Reflect")
        arcpy.PointToRaster_conversion(reflect, "Reflect", layer1, cellsize=utils.resolution)
        arcpy.DefineProjection_management(layer1, utils.spatialRef)
        print("CSV to Rsater:", layer1)

    # Apply mask on if provided
    #if mask is not None:
    #    l2 = arcpy.sa.ExtractByMask(in_netcdf, mask)
    #    l21 = arcpy.sa.Con(l2, l2, 0, "VALUE >= 10")
    #else:
    # layer1 = in_netcdf
    # l21 = arcpy.sa.Con(layer1, layer1, 0, "VALUE >= 10")
    l22 = arcpy.sa.Con(arcpy.sa.IsNull(layer1), 0, layer1)
    arcpy.sa.ContourList(l22, out_feat, levels)
    print("Raster to Contour:", out_feat)
Ejemplo n.º 4
0
    def _transformCSVToRaster(self, csv_file, load_dir):

        arcpy.env.overwriteOutput = True  # set True since the xy event layer created resides in memory

        raster_name = os.path.basename(csv_file).split(".")[0]
        raster_name_with_prefix = self.raster_name_prefix + raster_name
        output_raster = os.path.join(load_dir, raster_name_with_prefix)
        trmm_xy_event_layer = "trmm_xy_layer"

        # create xy event layer
        xyc = self.make_xy_event_layer_config
        xy_result = arcpy.MakeXYEventLayer_management(
            csv_file, xyc['in_x_field'], xyc['in_y_field'],
            trmm_xy_event_layer, xyc.get('spatial_reference', ''),
            xyc.get('in_z_field', ''))
        self.debug_logger("MakeXYEventLayer_management status",
                          xy_result.status)

        # create raster from xy event layer
        prc = self.point_to_raster_config
        point_to_raster_result = arcpy.PointToRaster_conversion(
            trmm_xy_event_layer, prc['value_field'], output_raster,
            prc.get('cell_assignment', ''), prc.get('priority_field', ''),
            prc.get('cellsize', ''))
        self.debug_logger("PointToRaster_conversion status",
                          point_to_raster_result.status)

        return output_raster
Ejemplo n.º 5
0
def finddist(clatoshad_14, dir, path):
    path2 = path
    # Local variables:
    points_shp = "{}\\points.shp".format(path2)
    dist1 = "{}\\dist1".format(path2)
    # Process: Raster to Point
    arcpy.RasterToPoint_conversion(clatoshad_14, points_shp, "VALUE")
    # Process: Add XY Coordinates
    arcpy.AddXY_management(points_shp)
    # Process: Add Field
    arcpy.AddField_management(points_shp, "distfield", "FLOAT", "", "", "", "",
                              "NULLABLE", "NON_REQUIRED", "")
    xlist = []
    ylist = []
    finames = ['POINT_X', 'POINT_Y', 'distfield']
    rows = arcpy.da.UpdateCursor(points_shp, finames)
    for row in rows:
        xlist.append(row[0])
        ylist.append(row[1])
    rows.reset()
    for row in rows:
        if dir == 'e':
            changex = row[0] - min(xlist)
            changey = row[1] - min(ylist)
            row[2] = math.sqrt(changex * changex + changey * changey)
        if dir == 'w':
            changex = row[0] - max(xlist)
            changey = row[1] - min(ylist)
            row[2] = math.sqrt(changex * changex + changey * changey)
        rows.updateRow(row)
    del row
    del rows
    arcpy.PointToRaster_conversion(points_shp, "distfield", dist1,
                                   "MOST_FREQUENT", "NONE", clatoshad_14)
    return dist1
Ejemplo n.º 6
0
    def save_mu(self, *args):
        # args[0] can be an optional output directory
        try:
            self.out_dir = args[0]
        except:
            pass
        self.logger.info("")
        self.logger.info(" * SAVING ... ")
        arcpy.CheckOutExtension('Spatial')  # check out license
        arcpy.gp.overwriteOutput = True
        arcpy.env.workspace = self.cache
        arcpy.env.extent = "MAXOF"
        arcpy.CheckInExtension('Spatial')
        try:
            self.logger.info(" * Converting MU IDs to strings:")

            self.logger.info("   >> Converting raster to points ...")
            pts = arcpy.RasterToPoint_conversion(self.ras_mu, self.cache + "pts_del.shp")

            self.logger.info("   >> Converting numbers to strings ...")
            arcpy.AddField_management(pts, "MU", "TEXT")
            expression = "inverse_dict = " + fGl.dict2str(self.mu_dict, inverse_dict=True)
            arcpy.CalculateField_management(pts, "MU", "inverse_dict[!grid_code!]", "PYTHON", expression)

            self.logger.info("   >> OK")
            self.logger.info(" * Saving MU string raster as:")
            self.logger.info(str(self.out_dir) + "\\mu_str.tif")
            arcpy.PointToRaster_conversion(in_features=pts, value_field="MU",
                                           out_rasterdataset=self.out_dir + "\\mu_str.tif",
                                           cell_assignment="MOST_FREQUENT", cellsize=5)
            self.logger.info(" * OK")
        except arcpy.ExecuteError:
            self.logger.info("ExecuteERROR: (arcpy).")
            self.logger.info(arcpy.GetMessages(2))
        except Exception as e:
            self.logger.info("ExceptionERROR: (arcpy).")
            self.logger.info(e.args[0])
        except:
            self.logger.info("ERROR: Field assignment failed.")
            return True

        try:
            self.logger.info(" * Saving mu numeric raster as:")
            self.logger.info(str(self.out_dir) + "\\mu.tif")
            self.ras_mu.save(self.out_dir + "\\mu.tif")
            self.logger.info(" * OK")
        except arcpy.ExecuteError:
            self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
        except Exception as e:
            self.logger.info(arcpy.GetMessages(2))
        except:
            self.logger.info("ERROR: Saving failed.")
            return True

        try:
            self.clean_up()
        except:
            pass
        return False
Ejemplo n.º 7
0
def createTraceFiles(in_layer, name):
    # 选中常熟市企业分区1中的AOT点并导出
    arcpy.AddMessage("-------制作溯源文件。。。")
    # 企业分区文件位置
    zoneLocation = u'E:\\常熟溯源\\矢量\\常熟市溯源范围'
    # 创建文件夹
    # AOT文件
    aot_Content = 'F:\\changshutraceresult\\aot'
    try:
        os.makedirs(aot_Content)
    except:
        arcpy.AddMessage('-------' + aot_Content + '已经存在')
    path2Dir = os.listdir(zoneLocation)
    for zonefile in path2Dir:
        if zonefile[-4:].lower() == '.shp':
            zone_Layer = os.path.join(zoneLocation, zonefile)
            arcpy.AddMessage('-------' + name + ' 分区' + zonefile[-6:-4] +
                             ' -------------------')
            try:
                # 剪裁
                # Execute ExtractByMask
                outClipRaster = os.path.join(
                    u'F:\\常熟溯源\\AOT\\裁剪',
                    'AOT裁剪分区' + zonefile[-6:-4] + '_' + name + '.tif')
                arcpy.Clip_management(in_layer, "#", outClipRaster, zone_Layer,
                                      "#", "ClippingGeometry",
                                      "NO_MAINTAIN_EXTENT")
                aotFeatures = os.path.join(
                    u'F:\\常熟溯源\\AOT\\shp',
                    'aot' + zonefile[-6:-4] + '_' + name + '.shp')
                # 栅格转点
                arcpy.RasterToPoint_conversion(outClipRaster, aotFeatures,
                                               'VALUE')
                arcpy.AddMessage(
                    '-------' + name + ' 分区' + zonefile[-6:-4] +
                    ' RasterToPoint_conversion Success-------------------')
                # 将AOT转为栅格,分辨率设为0.0005,参数设置如下
                out_rasterdataset = os.path.join(
                    aot_Content,
                    'AOT分区' + zonefile[-6:-4] + '_' + name + '.tif')
                arcpy.PointToRaster_conversion(aotFeatures, 'GRID_CODE',
                                               out_rasterdataset, 'MAXIMUM',
                                               'NONE', '0.00055')
            except:
                arcpy.AddMessage('-------' + name + ' 分区' + zonefile[-6:-4] +
                                 'Clip_management 错误')
                traceback.print_exc()
    arcpy.AddMessage("------制作溯源文件完成")
Ejemplo n.º 8
0
 def point_to_raster(self,
                     in_features,
                     out_raster,
                     val_field,
                     workspace=None):
     self.input_fc_name = in_features
     self.out_features = out_raster
     self.raster_field = val_field
     if workspace is None:
         workspace = join(os.getcwd(), 'Precip.gdb')
     arcpy.env.workspace = workspace
     arcpy.env.overwriteOutput = True
     assignment_type = 'MAXIMUM'
     priority_field = ''
     cell_size = 0.045
     arcpy.PointToRaster_conversion(self.input_fc_name, self.raster_field,
                                    self.out_features, assignment_type,
                                    priority_field, cell_size)
Ejemplo n.º 9
0
def generate_level_raster(new_selection_fc, depth_lvl):
    """
    Pass in the selected pionts from the current depth level
    and generate a new EMU raster from them.

    Args:
        new_selection_fc: Feature class comprised only of points from the defined depth level.
        depth_lvl: Depth level defined within the current loop.
    Returns:
        emu_raster: Raster whose cell values reflect the EMU cluster.
    """
    emu_raster = "EMURaster" + str(depth_lvl)
    arcpy.PointToRaster_conversion(
        in_features=new_selection_fc,
        value_field="Cluster37",
        out_rasterdataset=emu_raster,
        cellsize=0.25)
    arcpy.Delete_management(new_selection_fc)
    return emu_raster
def points2Raster(fileInfo):  #传入数据,面向不同的数据存储方式,需要调整函数内读取的代码
    rootPath = list(fileInfo.keys())  #待读取数据文件的根目录
    #    print(rootPath)
    dataName = flatten_lst(list(fileInfo.values()))  #待读取数据文件的文件名列表
    #    print(dataName)
    coodiDic = []
    pattern1 = re.compile(r'(.*?)[_]', re.S)
    pattern2 = re.compile(r'(.*?)[.]', re.S)

    for fName in dataName:
        # cell_size=int(re.findall(pattern1, fName)[0]) #按照文件名标识的数字作为单元大小,例如120_POI.shp,单元大小为120
        cell_size = 2  #固定cell大小
        in_features = os.path.join(rootPath[0], fName)
        #        value_field="entropy" #指定读取值的字段,作为栅格单元值
        value_field = "cluster"  #指定读取值的字段,作为栅格单元值
        out_raster = re.findall(pattern2, fName)[0] + ".tif"  #定义输出文件名

        print(cell_size, in_features, value_field, out_raster)
        arcpy.PointToRaster_conversion(in_features, value_field, out_raster,
                                       "MOST_FREQUENT", "",
                                       cell_size)  #将点转换为栅格数据
Ejemplo n.º 11
0
    def spatial_join_analysis(self, raster, curve_data):
        # uses curve radius data and to mark all points within this radius of the input raster

        self.logger.info("   -> Converting raster to points ...")
        try:
            cov_points = self.cache + "cov_points.shp"
            arcpy.RasterToPoint_conversion(raster, cov_points)
            zero_raster = Con((IsNull(raster) == 1), (IsNull(raster) * 1), 1)
            all_points = self.cache + "all_points.shp"
            arcpy.RasterToPoint_conversion(zero_raster, all_points)
        except:
            self.error = True
            self.logger.info("ERROR: Could not perform spatial radius operations (RasterToPoint_conversion).")
        self.logger.info("   -> Delineating " + self.cover_type + " effect radius (spatial join radius: " + str(curve_data[0][0]) + ") ...")
        try:
            out_points = self.cache + "spatjoin.shp"
            rad = float(curve_data[0][0])
            arcpy.SpatialJoin_analysis(target_features=all_points, join_features=cov_points,
                                       out_feature_class=out_points, join_operation="JOIN_ONE_TO_MANY",
                                       join_type="KEEP_COMMON", field_mapping="", match_option="CLOSEST",
                                       search_radius=rad, distance_field_name="")
        except:
            self.error = True
            self.logger.info("ERROR: Could not perform spatial radius operations (SpatialJoin_analysis).")
        self.logger.info("   -> Converting points back to raster ...")
        try:
            arcpy.PointToRaster_conversion(in_features=out_points, value_field="grid_code",
                                           out_rasterdataset=self.cache + "cov_points",
                                           cell_assignment="MEAN", cellsize=self.cell_size)
            __temp_ras__ = arcpy.Raster(self.cache + "cov_points.tif")
            self.logger.info("   -> Assigning spatial HSI value (" + str(curve_data[1][0]) + ") where applies (raster calculator) ...")
            __ras__ = Con(__temp_ras__ > 0, curve_data[1][0])  # assign HSI value
        except:
            self.error = True
            self.logger.info("ERROR: Could not perform spatial radius operations (back conversion).")
        if not self.error:
            return Float(CellStatistics([__ras__], "SUM", "DATA"))
        else:
            return -1
def Model():  # Model

    # To allow overwriting outputs change overwriteOutput option to True.
    arcpy.env.overwriteOutput = False

    arcpy.ImportToolbox(r"c:\program files\arcgis\pro\Resources\ArcToolbox\toolboxes\Data Management Tools.tbx")
    Historical_Airtraffic_Data_may = "May\\airtraff_may2020_a"
    aircraft_db_csv = "aircraft_db.csv"
    Mean_NoiseMay2020_hexagon_2_ = "May\\Mean_NoiseMay2020_hexagon"

    # Process: Add Join (Add Join) 
    Joined_airtraffic_data = arcpy.AddJoin_management(in_layer_or_view=Historical_Airtraffic_Data_may, in_field="icao24", join_table=aircraft_db_csv, join_field="icao", join_type="KEEP_ALL")[0]

    # Process: Select Layer By Attribute (Select Layer By Attribute) 
    Eliminated_null_rows, Count = arcpy.SelectLayerByAttribute_management(in_layer_or_view=Joined_airtraffic_data, selection_type="NEW_SELECTION", where_clause="mdl IS NULL", invert_where_clause="")

    # Process: Generate Tessellation (Generate Tessellation) 
    Hexagonal_grid_per_1_km2 = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\GenerateTessellation"
    GenerateTessellation(Output_Feature_Class=Hexagonal_grid_per_1_km2, Extent="12.200035679703 47.3000064103676 13.700035679703 48.2000064103675", Shape_Type="HEXAGON", Size="0.0225 Unknown", Spatial_Reference="GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8.98315284119521E-09;0.001;0.001;IsHighPrecision")

    # Process: Add Fields (multiple) (Add Fields (multiple)) 
    Added_new_fields = arcpy.AddFields_management(in_table=Eliminated_null_rows, field_description=[["size_class", "TEXT", "", "255", "", ""], ["noiselevel", "LONG", "", "", "", ""]])[0]

    # Process: Classify size based on aircraft model (Calculate Field) 
    Classified_size_class = arcpy.CalculateField_management(in_table=Added_new_fields, field="size_class", expression="Reclass(!mdl!)", expression_type="PYTHON3", code_block="# Reclassify values to another value
# More calculator examples at esriurl.com/CalculatorExamples
def Reclass(mdl):
    if mdl == \"a400\":
        return \"large\"
    elif mdl == \"b757\":
        return \"large\"
    elif mdl == \"a319\":
        return \"medium\"
    elif mdl == \"a320\":
        return \"medium\"
    elif mdl == \"a321\":
        return \"medium\"
    elif mdl == \"b733\":
        return \"medium\"
    elif mdl == \"b737\":
        return \"medium\"
    elif mdl == \"b738\":
        return \"medium\"
    elif mdl == \"b739\":
        return \"medium\"
    elif mdl == \"b752\":
        return \"medium\"
    elif mdl == \"bcs3\":
        return \"medium\"
    elif mdl == \"crj2\":
        return \"medium\"
    elif mdl == \"rj1h\":
        return \"medium\"
    elif mdl == \"dh8d\":
        return \"medium\"
    elif mdl == \"fa8x\":
        return \"medium\"
    else:
        return \"small\"", field_type="TEXT")[0]

    # Process: Classify noise based on size (Calculate Field) 
    Classified_noiselevel = arcpy.CalculateField_management(in_table=Classified_size_class, field="noiselevel", expression="Reclass(!size_class!,!altitude!", expression_type="PYTHON3", code_block="# Reclassify values to another value
# More calculator examples at esriurl.com/CalculatorExamples
def Reclass(size_class, altitude):
    if size_class is \"large\" and altitude <=11000:
        return 60
    elif size_class == \"large\" and (altitude > 11000 and altitude <= 15000):
        return 52
    elif size_class == \"large\" and (altitude > 11000 and altitude <= 15000):
        return 52
    elif size_class == \"large\" and (altitude > 15000 and altitude <= 16000):
        return 43
    elif size_class == \"large\" and (altitude > 16000):
        return 42
    elif size_class == \"medium\" and altitude <=4000:
        return 71
    elif size_class == \"medium\" and (altitude > 4000 and altitude <= 6000):
        return 70
    elif size_class == \"medium\" and (altitude > 6000 and altitude <= 9000):
        return 60
    elif size_class == \"medium\" and (altitude > 9000 and altitude <= 15000):
        return 59
    elif size_class == \"medium\" and (altitude > 15000 and altitude <= 16000):
        return 46
    elif size_class == \"medium\" and (altitude > 16000):
        return 36
    elif size_class == \"small\" and altitude <=4000:
        return 67
    elif size_class == \"small\" and (altitude > 4000 and altitude <= 5000):
        return 54
    elif size_class == \"small\" and (altitude > 5000):
        return 43", field_type="TEXT")[0]

    # Process: Point to Raster (Mean aggregated) (Point to Raster) 
    Mean_Noise_may_2020 = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\airtraff_may2020_a_PointToRaster"
    arcpy.PointToRaster_conversion(in_features=Classified_noiselevel, value_field="noiselevel", out_rasterdataset=Mean_Noise_may_2020, cell_assignment="MEAN", priority_field="NONE", cellsize="0.001")

    # Process: extract cell value (Raster to Point) 
    Points_represent_noiselevel = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\RasterT_airtraf1"
    arcpy.RasterToPoint_conversion(in_raster=Mean_Noise_may_2020, out_point_features=Points_represent_noiselevel, raster_field="VALUE")

    # Process: Feature To Polygon (Feature To Polygon) 
    Mean_NoiseMay2020_hexagon = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\GenerateTessellation_Feature"
    arcpy.FeatureToPolygon_management(in_features=[Hexagonal_grid_per_1_km2], out_feature_class=Mean_NoiseMay2020_hexagon, cluster_tolerance="", attributes="ATTRIBUTES", label_features=Points_represent_noiselevel)

    # Process: Polygon to Raster (Polygon to Raster) 
    Mean_NoiseZone_may2020 = "C:\\Users\\Dell\\Documents\\ArcGIS\\Projects\\mar apr may\\mar apr may.gdb\\GenerateTessellation_Feature_PolygonToRaster"
    if Mean_NoiseMay2020_hexagon:
        arcpy.PolygonToRaster_conversion(in_features=Mean_NoiseMay2020_hexagon_2_, value_field="grid_code", out_rasterdataset=Mean_NoiseZone_may2020, cell_assignment="CELL_CENTER", priority_field="NONE", cellsize="0.02")
Ejemplo n.º 13
0

# Obtain extents of the river network
XMin, YMin, XMax, YMax = extents(river_network)

# Set the extent environment
arcpy.AddMessage("The catchment extent is " + "%s %s %s %s" %
                 (XMin, YMin, XMax, YMax))
catch_extent = "%s %s %s %s" % (XMin, YMin, XMax, YMax)
arcpy.AddMessage("-------------------------")

# Set the environment to the calculated extent
arcpy.env.extent = catch_extent

# Convert the point to a raster
point_raster = arcpy.PointToRaster_conversion(pour_point, '#', "point_raster",
                                              '#', '#', cell_size)

# Execute EucDistance
outEucDistance = EucDistance(point_raster, '#', cell_size)

# Save the output
#outEucDistance.save("eucdist")
arcpy.AddMessage("Calculated eucludian distance")
arcpy.AddMessage("-------------------------")

# Create update cursor for feature class
rows = arcpy.UpdateCursor(river_network)

for row in rows:
    row.river_depth = 1
    row.river_cell_size = cell_size
    out_layer="lay5",
    spatial_reference=arcpy.SpatialReference("WGS 1984 UTM Zone 36S"))

#Save PO shapefile
arcpy.FeatureClassToFeatureClass_conversion("lay5", out, "PO_BBJ.shp")

#Create PO raster for each month and year
for yr in year:
    for mn in month[yr - year[0]]:
        out3 = "Shapefiles\\BBJ\\" + str(mn) + str(yr) + "PO.tif"
        fc = out + "PO_BBJ.shp"
        expression = "Month = " + str(mn) + " AND Year = " + str(yr)
        arcpy.MakeFeatureLayer_management(fc, "lay6")
        arcpy.SelectLayerByAttribute_management("lay6", "NEW_SELECTION",
                                                expression)
        arcpy.PointToRaster_conversion("lay6", "Count", out3, "MAXIMUM", "",
                                       1000)
        arcpy.Delete_management("lay6")

#---------#
#-DS data-#
#---------#

#Convert to raster
arcpy.PolygonToRaster_conversion("Shapefiles\\TransectBuffer.shp", "FID",
                                 "Shapefiles\\Transect.tif", "", "", 50)

#MANUALLY CLIP RASTER EXTENT DOWN TO PIXELS INCLUDED IN REGION B: VIA ARCSCAN (DSExtent.tif)

#Convert to 50m pixels
arcpy.RasterToPolygon_conversion("Shapefiles\\DSExtent.tif",
                                 "Shapefiles\\DSpixel.shp", "NO_SIMPLIFY")
Ejemplo n.º 15
0
def mergetables_rechunk(rootdir, in_pointstoprocess, analysis_years, in_statsdir, out_chunkdir, out_formatdir,
                        linearindexing=False, in_llhood=None):
    """
    Merge access tables, create access raster, and prepare next processing batch.

    To be run in parallel, accesscalc_parallel.py needs as inputs a directory containing all the locations around which
    to compute access separated into separate workspaces, each containing a subset of the locations and all the required
    input data and scripts to run the access analysis on an individual processing core. This allows the analysis to be
    parallelized with each core having its own input data and output locations.

    This function is not very modulable but runs through the steps of:
        - Re-define/create paths to access data and results from full analysis
        - For each livelihood:
            - Compile all access statistics tables generated by running accesscalc_parallel.py
            - Create a raster of access for each llhood and year by aggregating all tables, and while doing that, assess
                which locations still need to be processed (for those livelihoods whose calculations have been started)
            - Run access analysis on 10 groups of locations to assess the number of chunks to divide all locations into
            for each livelihood so that each chunk takes the same amount of time to process.
            - Divide all locations to process into separate chunks. For each chunk; create a directory with input data
                and scripts to run accesscalc_parallel.py (see requirements in documentation).

    Args:
        rootdir (str): project directory
        in_pointstoprocess (feature class): locations for which to compute mean access index in surrounding area
        analysis_years (list; int, or str): years to run analysis for
        in_statsdir (str): directory which contains table outputs from accesscalc_parallel.py
                           (can contain multiple livelihoods and years, must respect file naming convention)
        out_chunkdir (str): root directory to output chunk directories which will be used to run accesscalc_parallel.py
        out_formatdir (str): name of directory within which to group chunk directories. out_formatdir will be written within out_chunkdir
        linearindexing (bool): Whether to try to process groups in order
                               (if some groups have already been procssed. can lead to bugs - should keep as default).
                               Default: False
        in_llhood (list or str)= livelihoods to run analysis for. If left undetermined or None,
                                 will process all livelihoods in os.path.join(datadir, 'Weighting_scheme.xlsx')

    Returns:
        None
        Side effect:
            - Access rasters in rootdir/results/Analysis_Chp1_W1/W1_3030/Access_W1_3030 for each livelihood and year
            - Chunked and formatted directory to run accesscalc_parallel.py on. Written in out_chunkdir and named based
              on out_formatdir argument.
    """

    #Make sure analysis_years is list of strings
    if isinstance(analysis_years, str) or isinstance(analysis_years, int):
        analysis_years = [analysis_years]
    analysis_years = [str(yr) for yr in analysis_years]

    #Repreate paths
    datadir = os.path.join(rootdir, 'data')
    resdir = os.path.join(rootdir, 'results')

    #Make sure that output directory exists. If not, create it
    pathcheckcreate(out_chunkdir)

    #Get table with livelihoods, buffer radii, etc.
    weighting_table = os.path.join(datadir, 'Weighting_scheme.xlsx')
    weightingpd = pd.read_excel(weighting_table, sheetname='Weighting_1')

    #Format livelihoods to process
    if in_llhood is None:
        livelihoods = weightingpd['Livelihood'].tolist()
        livelihoods.remove('Combined_livelihood')
    else:
        livelihoods = in_llhood
        if isinstance(livelihoods, str):
            livelihoods = [livelihoods]

    ### ------- Re-define/create paths ---------------------------------------------------------------------------------
    basemapgdb = os.path.join(resdir, "Base_layers_Pellegrini", "Basemaps_UTM20S.gdb")
    pelleras = os.path.join(basemapgdb, "Pellegri_department_UTM20S")
    forestoutdir = os.path.join(resdir, 'Base_layers_Pellegrini/Forest_Hansen.gdb')
    barrierweight_outras = {}; forestyearly = {}; bufferad = {}; costtab_outgdb = {}
    access_outgdb = {}; access_outras = {}

    for llhood in livelihoods:
        print(llhood)
        bufferad[llhood] = float(weightingpd.loc[weightingpd['Livelihood'] == llhood, 'Buffer_max_rad'])
        outllhood_gdb = os.path.join(resdir,'Analysis_Chp1_W1','W1_3030','Barrier_weighting_1_3030','{}_bw1_3030.gdb'.format(llhood))
        for year in analysis_years:
            barrierweight_outras[llhood+year] = os.path.join(outllhood_gdb, '{0}_bw1_{1}'.format(llhood, year))
            forestyearly[year] = os.path.join(forestoutdir, 'Hansen_GFC_v16_treecover{}'.format(year))
            costtab_outgdb[llhood+year] = os.path.join(resdir, 'Analysis_Chp1_W1',
                                                       'W1_3030', 'Cost_distance_W1_3030',
                                                       'Cost_distance_{}_w1'.format(llhood),
                                                       'CD_{0}_{1}_w1.gdb'.format(llhood, year))

            access_outgdb[llhood] = os.path.join(resdir, 'Analysis_Chp1_W1', 'W1_3030', 'Access_W1_3030',
                                                 'Access_W1_{0}'.format(llhood), 'Access_W1_{0}.gdb'.format(llhood))
            pathcheckcreate(access_outgdb[llhood])
            # Path of output access raster
            access_outras[llhood + year] = os.path.join(access_outgdb[llhood],
                                                        'accessras_W1_{0}{1}'.format(llhood, year))
    
    # LOOP: for each livelihood, for each group, create separate folder-points-buffers to run cost-distance in parallel on HPC
    ### ------- Get all processed tables -----------------------------------------------------------------------------------
    tablist = getfilelist(dir=in_statsdir, repattern=".*[.]dbf$", gdbf = False, nongdbf = True)
    tablist.extend(getfilelist(dir=in_statsdir, gdbf = True, nongdbf = False))
    
    tables_pd = pd.concat([pd.Series(tablist),
                           pd.Series(tablist).apply(lambda x: os.path.splitext(os.path.split(x)[1])[0]).
                          str.split('_', expand=True)],
                          axis=1)
    tables_pd.columns = ['path', 'dataset', 'llhood1', 'llhood2', 'year', 'weighting', 'group']
    tables_pd['llhood'] = tables_pd['llhood1'] + '_' + tables_pd['llhood2']
    tables_pd = tables_pd.drop(labels=['llhood1', 'llhood2'], axis=1)
    
    # processed_pd = tables_pd.groupby(['llhood', 'group']).filter(lambda x: x['year'].nunique() == 3).\
    #     drop_duplicates(subset=['llhood', 'group'])
    
    ### ------ Create a raster of access for each llhood and year by aggregating all tables (yielding an access value for each pixel-point) ---------
    refraster = pelleras
    fishgroupstoprocess = defaultdict(set)
    
    # Iterate over each livelihood
    for llhood in tables_pd['llhood'].unique():
        #if processing that llhood
        if llhood in livelihoods:
            #Create a field that records whether all analysis years have been processed.
            # If all analysis years have been processed, it has a value of None, if not, it has a value of 0
            accessfield_toprocess = 'access{0}_toprocess'.format(llhood, year)
            print('Create {} field'.format(accessfield_toprocess))
            if accessfield_toprocess in [f.name for f in arcpy.ListFields(in_pointstoprocess)]:
                arcpy.DeleteField_management(in_pointstoprocess, accessfield_toprocess)
            arcpy.AddField_management(in_table=in_pointstoprocess, field_name= accessfield_toprocess, field_type='SHORT')

            # Iterate over each year
            for year in tables_pd['year'].unique():
                #if processing that year
                if year in analysis_years:
                    # Perform analysis only if output raster doesn't exist
                    if not arcpy.Exists(access_outras[llhood+year]):
                        print("Processing {}...".format(access_outras[llhood+year]))

                        # Aggregate values across all pixels-points for that livelihood-year
                        print('Aggregating zonal statistics tables...')
                        merged_dict = tabmerge_dict(tables_pd.loc[(tables_pd['llhood'] == llhood) &
                                                                  (tables_pd['year'] == year), 'path'])

                        if len(merged_dict) > 0:
                            # Join all statistics tables of access to in_pointstoprocess (a point for each 30x30 m pixel in Pellegrini department)
                            print('Joining tables to points...')

                            accessfield = 'access{0}{1}'.format(llhood, year)
                            if not accessfield in [f.name for f in arcpy.ListFields(in_pointstoprocess)]:
                                print('Create {} field'.format(accessfield))
                                arcpy.AddField_management(in_table=in_pointstoprocess, field_name=accessfield, field_type='FLOAT')

                            with arcpy.da.UpdateCursor(in_pointstoprocess, ['pointid', accessfield,  accessfield_toprocess,
                                                                            'group{}'.format(llhood)]) as cursor:
                                x = 0
                                for row in cursor:
                                    if x % 100000 == 0:
                                        print(x)
                                    if row[0] in merged_dict:
                                        row[1] = merged_dict[row[0]]
                                    else:
                                        row[2] = 1
                                        fishgroupstoprocess[llhood].add(row[3])
                                        print('pointid {} was not found in dictionary'.format(row[0]))
                                    cursor.updateRow(row)
                                    x += 1

                            # Convert points back to raster
                            #if len(merged_dict) == x:
                            print('Converting points to raster...')
                            arcpy.env.snapRaster = arcpy.env.extent = refraster
                            arcpy.PointToRaster_conversion(in_features=in_pointstoprocess,
                                                           value_field=accessfield,
                                                           cellsize=refraster,
                                                           out_rasterdataset=access_outras[llhood+year])
                            arcpy.ClearEnvironment("extent")
                            arcpy.ClearEnvironment("snapRaster")
                        else:
                            print('No zonal statistics available for that livelihood for that year...')

                    else:
                        print('{} already exists...'.format(access_outras[llhood+year]))

    ### ------ Run analysis on 10 groups to check speed ----- ####
    testdir = os.path.join(out_chunkdir, 'testdir')
    if os.path.isdir(testdir):
        arcpy.Delete_management(testdir)
    pathcheckcreate(testdir)
    grp_process_time = defaultdict(float)
    
    for llhood in livelihoods:
        print('Assessing access calculation run time for {}...'.format(llhood))
    
        if 'group{}'.format(llhood) not in [i.name for i in arcpy.Describe(in_pointstoprocess).indexes]:
            print('Adding index to in_pointstoprocess...')
            arcpy.AddIndex_management(in_pointstoprocess, fields='group{}'.format(llhood),
                                      index_name='group{}'.format(llhood))  # Add index to speed up querying
    
        if ((llhood not in fishgroupstoprocess) and
                (not all(arcpy.Exists(access_outras[llhood+y]) for y in analysis_years))):
            print('Getting groups...')
            fishgroupstoprocess[llhood] = {row[0] for row in arcpy.da.SearchCursor(in_pointstoprocess,
                                                                                   'group{}'.format(llhood))}
    
        # Output points for 10 groups for each livelihood
        for group in list(fishgroupstoprocess[llhood])[0:10]:
            print(group)
    
            outpoints = os.path.join(testdir, 'testpoints_{0}_{1}_{2}.shp').format(llhood, int(bufferad[llhood]), group)
            if not arcpy.Exists(outpoints):
                # Subset points based on group (so that their buffers don't overlap) and only keep points that overlap study area
                arcpy.MakeFeatureLayer_management(in_features=in_pointstoprocess, out_layer='subpoints{}'.format(group),
                                                  where_clause='({0} = {1}) AND ({2} = 1)'.format(
                                                      'group{}'.format(llhood), group, accessfield_toprocess))
                arcpy.CopyFeatures_management('subpoints{}'.format(group), outpoints)
    
            # Test time that each group takes to process for each livelihood
            inbw = {yr: barrierweight_outras['{0}{1}'.format(llhood, yr)] for yr in analysis_years}
    
            tic = time.time()
            # Get subpoints
            accesscalc(inllhood=llhood,
                        ingroup=group,
                        inpoints=outpoints,
                        inbuffer_radius=bufferad[llhood],
                        inyears=analysis_years,
                        inbarrierweight_outras=inbw,
                        inforestyearly=forestyearly,
                        costtab_outdir=testdir)
            toc = time.time()
            print(toc - tic)
            grp_process_time[llhood] = grp_process_time[llhood] + (toc - tic) / 10.0
    
    ### ------ Compute number of chunks to divide each livelihood in to process each chunk with equal time ------###
    numcores = 14  # Number of chunks to divide processing into
    maxdays = 1 #Max number of days that processes can be run at a time

    #Assess amount of time reguired and number of chunks
    totaltime = sum([grp_process_time[llhood] * len(fishgroupstoprocess[llhood]) for llhood in livelihoods])
    print('Total processing times among {0} cores: {1} days...'.format(
        numcores, totaltime/float(3600.0*24*numcores))) #Total time if process is divided into numcores chunks at same speed
    numchunks = math.ceil(totaltime / float(3600.0 * 24 * maxdays))
    print('Total number of chunks for each to be processed within {0} days among {1} cores: {2}...'.format(
        maxdays, numcores, numchunks))
    
    ### ------ Assign groups to chunks ------###
    llhood_chunks = {}
    formatdir_data = os.path.join(out_formatdir, 'data')
    formatdir_results = os.path.join(out_formatdir, 'results')
    formatdir_src = os.path.join(out_formatdir, 'src')
    pathcheckcreate(formatdir_data, verbose=True)
    pathcheckcreate(formatdir_results, verbose=True)
    pathcheckcreate(formatdir_src, verbose=True)
    
    #Copy processing file to directory
    in_processingscript = os.path.join(rootdir, 'src', 'Chap1_Analysis1', 'accesscalc_parallel.py')
    out_processingscript = os.path.join(formatdir_src, 'accesscalc_parallel.py')
    copyfile(in_processingscript, out_processingscript)
    
    for llhood in livelihoods:
        if len(fishgroupstoprocess[llhood]) > 0:
            print(llhood)
            llhood_chunks[llhood] = math.ceil(numchunks * grp_process_time[llhood] * len(fishgroupstoprocess[llhood]) / totaltime)
            print('    Number of chunks to divide {0} groups into: {1}...'.format(
                llhood, llhood_chunks[llhood]))

            if linearindexing == True:
                groupchunklist = groupindexing(grouplist=list(fishgroupstoprocess[llhood]), chunknum=llhood_chunks[llhood])
            else:
                interval = int(math.ceil(len(fishgroupstoprocess[llhood])/ float(numchunks)))
                groupchunklist = [list(fishgroupstoprocess[llhood])[i:(i + interval)] for i
                                  in range(0, len(fishgroupstoprocess[llhood]), interval)]
    
            #Output points and ancillary data to chunk-specific gdb
            for chunk in range(0, len(groupchunklist)):
                print(chunk)
                outchunkgdb = os.path.join(formatdir_data, '{0}{1}_{2}.gdb'.format(llhood, int(bufferad[llhood]), chunk))
                if not (arcpy.Exists(outchunkgdb)):
                    pathcheckcreate(outchunkgdb, verbose=True)
                    outchunkpoints= os.path.join(outchunkgdb, 'subpoints{0}{1}_{2}'.format(llhood, int(bufferad[llhood]), chunk))

                    print('Copying points...')
                    if len(groupchunklist[chunk])>0:
                        print(len(groupchunklist[chunk]))
                        arcpy.CopyFeatures_management(
                            arcpy.MakeFeatureLayer_management(
                                in_features=in_pointstoprocess, out_layer='pointslyr',
                                where_clause='(group{0} IN {1}) AND ({2} = 1)'.format(
                                    llhood, tuple(groupchunklist[chunk]), accessfield_toprocess)
                            ), #[i for i in groupchunklist[chunk] if i is not None]
                            outchunkpoints)

                        print('Copying ancillary data...')
                        for yr in analysis_years:
                            #Copy barrier raster
                            arcpy.CopyRaster_management(barrierweight_outras[llhood + yr],
                                                        os.path.join(outchunkgdb, os.path.split(barrierweight_outras[llhood + yr])[1]))
                            #Copy forest cover
                            if llhood == 'Charcoal_production':
                                arcpy.CopyRaster_management(forestyearly[yr],
                                                            os.path.join(outchunkgdb, os.path.split(forestyearly[yr])[1]))
                else:
                    print('{} already exists...'.format(outchunkgdb))
        else:
            print('All groups for {} have already been processed...'.format(llhood))
        row.setValue("Date", date_list[y])
        row.setValue(Param_Name, New_stat_list[x][y])
        row.setValue(X_header, New_X_long[x])
        row.setValue(Y_header, New_Y_long[x])

        rows.insertRow(row)

del rows

# Deleting Test Table
arcpy.AddMessage("Deleting Joined Table")
arcpy.Delete_management(New_Table)

# Process: Make XY Event Layer
arcpy.AddMessage("Creating Point Feature Class From Table")

Layer_Name = Interp_Table_Name + "_Points"
Layer_Path = pre_Work_Space + "\\" + Layer_Name

arcpy.MakeXYEventLayer_management(Interp_Table, X_header, Y_header, Layer_Path, Spat_Ref, Param_Name)
arcpy.FeatureClassToFeatureClass_conversion(Layer_Path, pre_Work_Space, Layer_Name)

# Process: Point to Raster
arcpy.AddMessage("Creating the Raster")
arcpy.PointToRaster_conversion(Layer_Path, Param_Name, RASTER_Name, Cell_assignment_type, "NONE", Cellsize)

# Deleting Stuff
arcpy.AddMessage("Deleting Unnecessary Feature Classes")
arcpy.Delete_management(Interp_Table)
arcpy.Delete_management(Layer_Path)
Ejemplo n.º 17
0
def calculateCFactor(downloadBool, localCdlList, watershedFile, rasterTemplateFile, yrStart, yrEnd, \
 outRotation, outHigh, outLow, legendFile, cFactorXwalkFile):
    print 'hello'
    import arcpy, os, random, urllib, xml, sys, subprocess
    sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)))
    import setupTemp as tmp
    import numpy as np
    from arcpy import env
    arcpy.CheckOutExtension("Spatial")
    from arcpy.sa import *
    env.overwriteOutput = True
    env.pyramid = 'NONE'
    env.rasterStatistics = 'NONE'

    import subprocess
    from subprocess import Popen
    startupinfo = subprocess.STARTUPINFO()
    startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW

    tempDir = tmp.tempDir
    tempGdb = tmp.tempGdb
    os.environ['ARCTMPDIR'] = tmp.tempDir

    env.scratchWorkspace = tempDir
    env.workspace = tempDir

    rid = str(random.randint(10000, 99999))
    watershedCdlPrj = tempGdb + '/watershedCdlPrj_' + rid
    samplePts = tempGdb + '/samplePts_' + rid
    outRotation1 = tempGdb + '/outRotation1_' + rid
    outHigh1 = tempGdb + '/outHigh1_' + rid
    outLow1 = tempGdb + '/outLow1_' + rid
    cdlUrl = r'http://nassgeodata.gmu.edu:8080/axis2/services/CDLService/GetCDLFile?'

    arcpy.AddMessage(
        "Projecting Area Of Interest to Cropland Data Layer projection...")
    sr = arcpy.SpatialReference(102039)
    arcpy.Project_management(watershedFile, watershedCdlPrj, sr)

    ext = arcpy.Describe(watershedCdlPrj).extent

    if downloadBool == 'true':
        ping = Popen(['ping', '-n', '1', 'nassgeodata.gmu.edu'],
                     startupinfo=startupinfo)
        ping.wait()
        if ping == 1:
            arcpy.AddError(
                'The CropScape server is down. Please try again later, or download local Cropland Data Layers at http://www.nass.usda.gov/research/Cropland/Release/index.htm'
            )
        arcpy.AddMessage("Downloading Cropland Data Layers...")
        years = range(int(yrStart), int(yrEnd) + 1)
        cdlTiffs = []
        for year in years:
            year = str(year)
            clipUrl = cdlUrl\
             + r'year='\
             + year + r'&'\
             + r'bbox='\
             + str(ext.XMin) + '%2C'\
             + str(ext.YMin) + '%2C'\
             + str(ext.XMax) + '%2C'\
             + str(ext.YMax)
            try:
                downloadLocXml = tempDir + '/download_' + year + '_' + rid + '.xml'
                urllib.urlretrieve(clipUrl, downloadLocXml)
                tiffUrl = xml.etree.ElementTree.parse(
                    downloadLocXml).getroot()[0].text
                downloadTiff = tempDir + '/cdl_' + year + '_' + rid + '.tif'
                urllib.urlretrieve(tiffUrl, downloadTiff)
            except:
                arcpy.AddError(
                    "The CropScape server failed. Please download the layers to your hard drive at http://www.nass.usda.gov/research/Cropland/Release/index.htm"
                )
            cdlTiffs.append(downloadTiff)
    else:
        arcpy.AddMessage(
            "Clipping Cropland Data Layers to watershed extent...")
        localCdlList = localCdlList.split(';')
        cdlTiffs = []
        years = []
        for i, localCdl in enumerate(localCdlList):
            clipCdl = tempDir + '/cdl_' + str(i) + '_' + rid + '.tif'
            arcpy.Clip_management(localCdl, '', clipCdl, watershedCdlPrj)
            cdlTiffs.append(clipCdl)
            years.append(i)

    resolutions = []
    for cdlTiff in cdlTiffs:
        res = float(
            arcpy.GetRasterProperties_management(cdlTiff,
                                                 'CELLSIZEX').getOutput(0))
        resolutions.append(res)

    minResCdlTiff = np.array(cdlTiffs)[resolutions == np.min(resolutions)][0]

    arcpy.AddMessage(
        "Converting Cropland Data Layer grid to points. If your watershed is larger than a HUC12, this may take awhile..."
    )
    arcpy.RasterToPoint_conversion(minResCdlTiff, samplePts)

    cdlList = []
    yrCols = []
    for i, year in enumerate(years):
        yrCol = 'lc_' + str(year)
        yrCols.append(yrCol)
        cdlList.append([cdlTiffs[i], yrCol])

    arcpy.AddMessage("Pulling crop sequence from Cropland Data Layers...")
    ExtractMultiValuesToPoints(samplePts, cdlList, 'NONE')

    nonRotCropVals = [0] + range(63, 181) + range(182, 204)
    corn = np.array([1])
    alfalfa = np.array([28, 36, 37, 58])
    pasture = np.array([62, 181])
    soyAndGrain = np.array([4, 5, 21, 22, 23, 24, 25, 27, 28, 29, 30, 39, 205])
    potatoes = np.array([43])
    veggies = np.array([12, 42, 47, 49, 50, 53, 206, 216])

    # Read in C-factor crosswalk table and CDL legend file
    cFactorXwalk = np.loadtxt(cFactorXwalkFile \
     , dtype=[('LAND_COVER', 'S40'), ('COVER_LEVEL', 'S10'), ('C_FACTOR', 'f4')] \
     , delimiter=',', skiprows=1)

    cdlLegend = np.loadtxt(legendFile \
     , dtype=[('VALUE', 'u1'), ('CLASS_NAME', 'S30')] \
     , delimiter=',', skiprows=1)

    arcpy.AddField_management(samplePts, 'rotation', 'TEXT')
    arcpy.AddField_management(samplePts, 'cFactorLow', 'FLOAT')
    arcpy.AddField_management(samplePts, 'cFactorHigh', 'FLOAT')

    ptCount = int(arcpy.GetCount_management(samplePts).getOutput(0))
    msg = "Generalizing rotation from crop sequence, and applying a C-factor..."
    arcpy.SetProgressor("step", msg, 0, ptCount, 1)
    rows = arcpy.UpdateCursor(samplePts)
    for i, row in enumerate(rows):
        lcs = []
        for yrCol in yrCols:
            if row.getValue(yrCol) is None:
                lcs.append(0)
            else:
                lcs.append(row.getValue(yrCol))
        lcs = np.array(lcs)
        nYr = float(len(lcs))
        # Crop proportions
        pNas = float(len(np.where(lcs == 0)[0])) / nYr
        pCorn = float(len(np.where(np.in1d(lcs, corn))[0])) / nYr
        pAlfalfa = float(len(np.where(np.in1d(lcs, alfalfa))[0])) / nYr
        pPasture = float(len(np.where(np.in1d(lcs, pasture))[0])) / nYr
        pSoyAndGrain = float(len(np.where(np.in1d(lcs, soyAndGrain))[0])) / nYr
        pPotato = float(len(np.where(np.in1d(lcs, potatoes))[0])) / nYr
        pVeggies = float(len(np.where(np.in1d(lcs, veggies))[0])) / nYr

        noDataBool = pNas == 1.
        contCornBool = pCorn >= 3./5 and \
         (pSoyAndGrain + pPotato + pVeggies + pAlfalfa + pPasture) == 0.
        cashGrainBool = (pCorn + pSoyAndGrain) >= 2./5 and \
         (pPotato + pVeggies + pAlfalfa + pPasture) == 0.
        dairyBool1 = pAlfalfa >= 1./5 and \
         (pCorn + pSoyAndGrain) >= 1./5
        dairyPotatoBool = pPotato >= 1./5 and \
         pAlfalfa >= 1./5 and \
         pVeggies == 0.
        potGrnVegBool = (pPotato + pVeggies) >= 1. / 5  # and \
        # (pSoyAndGrain + pCorn) >= 2./5
        pastureBool = pPasture >= 1./5 and \
         (pCorn + pSoyAndGrain + pPotato + pVeggies + pAlfalfa) == 0.
        dairyBool2 = (pAlfalfa + pPasture) >= 1. / 5  # and \
        # (pCorn + pSoyAndGrain) >= 1./5
        if noDataBool:
            rot = "No Data"
        elif cashGrainBool:
            rot = "Cash Grain"
        elif dairyBool1:
            rot = "Dairy Rotation"
        elif dairyPotatoBool:
            rot = "Dairy Potato Year"
        elif potGrnVegBool:
            rot = "Potato/Grain/Veggie Rotation"
        elif contCornBool:
            rot = "Continuous Corn"
        elif pastureBool:
            rot = "Pasture/Hay/Grassland"
        elif dairyBool2:
            rot = "Dairy Rotation"
        else:
            rot = "No agriculture"
            c_s = np.empty(len(lcs))
            for j, lc in enumerate(lcs):
                c = np.extract(cFactorXwalk['LAND_COVER'] == str(lc) \
                 , cFactorXwalk['C_FACTOR'])
                if len(c) > 0:
                    c_s[j] = c
                else:
                    c_s[j] = np.nan
            c_ave = np.nansum(c_s) / np.sum(np.isfinite(c_s))
            if np.isnan(c_ave):
                c_high = None
                c_low = None
            else:
                c_high = float(c_ave)
                c_low = float(c_ave)
        if rot != "No agriculture":
            rotBool = cFactorXwalk['LAND_COVER'] == rot
            highBool = np.in1d(cFactorXwalk['COVER_LEVEL'],
                               np.array(['High', '']))
            lowBool = np.in1d(cFactorXwalk['COVER_LEVEL'],
                              np.array(['Low', '']))
            c_high = np.extract(np.logical_and(rotBool, highBool),
                                cFactorXwalk['C_FACTOR'])
            c_low = np.extract(np.logical_and(rotBool, lowBool),
                               cFactorXwalk['C_FACTOR'])
            c_high = float(c_high)
            c_low = float(c_low)
        row.cFactorHigh = c_high
        row.cFactorLow = c_low
        row.rotation = rot
        rows.updateRow(row)
        arcpy.SetProgressorPosition()
    arcpy.ResetProgressor()
    del row, rows

    arcpy.AddMessage("Converting points to raster...")
    arcpy.PointToRaster_conversion(samplePts, "rotation", outRotation1, 'MOST_FREQUENT', \
     '', minResCdlTiff)
    # arcpy.PointToRaster_conversion(samplePts, "cFactorHigh", outHigh1, 'MEAN', \
    # '', minResCdlTiff)
    # arcpy.PointToRaster_conversion(samplePts, "cFactorLow", outLow1, 'MEAN', \
    # '', minResCdlTiff)

    wtm = arcpy.Describe(rasterTemplateFile).spatialReference
    outRes = int(
        arcpy.GetRasterProperties_management(rasterTemplateFile,
                                             'CELLSIZEX').getOutput(0))
    arcpy.ProjectRaster_management(outRotation1, outRotation, wtm, 'NEAREST',
                                   outRes)
            upCur.updateRow(row)
        del upCur, row
        arcpy.Delete_management("tempLay", 'GPFeatureLayer')
        """----CREATE RASTERS FROM SHAPEFILE----"""

        #Size of output raster cells
        cellSize = ""

        #Value parameter for cell assignment
        cellAssign = "MAXIMUM"
        #Name of the .5 foot water depth raster to create
        outRasfldext = csvName + "_fldext.tif"
        print "Creating %s flood extent raster" % csvName
        #Convert the newly created shapefile to a water depth raster
        arcpy.PointToRaster_conversion(outPathshp + outFC, "fldext",
                                       outPathras + outRasfldext, cellAssign,
                                       "", cellSize)

        #Name of the .5 foot water depth raster to create
        outRaswd = csvName + "_wd_hlfft.tif"
        print "Creating %s 1/2 foot water depth exceedance raster" % csvName
        #Convert the newly created shapefile to a water depth raster
        arcpy.PointToRaster_conversion(outPathshp + outFC, "wd_hlfft",
                                       outPathras + outRaswd, cellAssign, "",
                                       cellSize)

        #Name of the 1 foot water depth raster to create
        outRaswd = csvName + "_wd_1ft.tif"
        print "Creating %s 1 foot water depth exceedance raster" % csvName
        #Convert the newly created shapefile to a water depth raster
        arcpy.PointToRaster_conversion(outPathshp + outFC, "wd_1ft",
def sbdd_ProcessAddress (myFD, myFL):
   arcpy.AddMessage("     Begining Address Processing")
   theFields = ["FRN","PROVNAME","DBANAME","TRANSTECH","MAXADDOWN","MAXADUP",
                "TYPICDOWN","TYPICUP","Provider_Type","ENDUSERCAT"]
   chkFC = ["Address_frq","Address"]
   for cFC in chkFC:
       if arcpy.Exists(cFC):
           arcpy.Delete_management(cFC)
   if int(arcpy.GetCount_management(myFD + "/" + myFL).getOutput(0)) > 1:
       arcpy.Frequency_analysis(myFD + "/" + myFL, "Address" + "_frq", theFields, "")    
       #open a cursor loop to get all the distinct values
       myCnt = 1
       theQ = "(MAXADDOWN = '3' OR MAXADDOWN = '4' OR MAXADDOWN = '5' OR MAXADDOWN = '6' OR " + \
              " MAXADDOWN = '7' OR MAXADDOWN = '8' OR MAXADDOWN = '9' OR MAXADDOWN = '10' OR MAXADDOWN = '11') AND " + \
              "(MAXADUP = '2' OR MAXADUP = '3' OR MAXADUP = '4' OR MAXADUP = '5' OR MAXADUP = '6' OR " + \
              " MAXADUP = '7' OR MAXADUP = '8' OR MAXADUP = '9' OR MAXADUP = '10' OR MAXADUP = '11' )"
       for row in arcpy.SearchCursor("Address" + "_frq", theQ):
           theProviderType=row.getValue("Provider_Type")
           theEndUserCat=row.getValue("ENDUSERCAT")

           theProvName = row.getValue("PROVNAME").replace("'","")
           theDBA = row.getValue("DBANAME").replace("'","")
           theFRN = row.getValue("FRN")
           theTransTech = row.getValue("TRANSTECH")
           theAdUp = row.getValue("MAXADUP")
           theAdDown = row.getValue("MAXADDOWN")
           theTyUp = row.getValue("TYPICUP")
           theTyDown = row.getValue("TYPICDOWN")
           theTyUpQ = ""
           theTyDownQ = ""
           if theTyUp == "ZZ":
               theTyUp = "ZZ"  #used for naming / logic on calculating
               theTyUpQ = "TYPICUP = 'ZZ'"  #used as a selection set
           elif theTyUp == None:
               theTyUp = "IsNull"  #used for naming / logic on calculating
               theTyUpQ = "TYPICUP Is Null"  #used as a selection set
           elif theTyUp == " ":
               theTyUp = "IsNull"
               theTyUpQ = "TYPICUP = ' '"
           else:
               theTyUp = str(abs(int(theTyUp)))
               theTyUpQ = "TYPICUP = '" + theTyUp + "'"
           if theTyDown == "ZZ":
               theTyDown = "ZZ"  #used for naming / logic on calculating
               theTyDownQ = "TYPICDOWN = 'ZZ'"  #used as a selection set                
           elif theTyDown == None:
               theTyDown = "IsNull"
               theTyDownQ = "TYPICDOWN Is Null"
           elif theTyDown == " ":
               theTyDown = "IsNull"
               theTyDownQ = "TYPICDOWN = ' '"
           else:
               theTyDown = str(abs(int(theTyDown)))
               theTyDownQ = "TYPICDOWN = '" + theTyDown + "'"
           theQry = "FRN = '" + theFRN + "'"
           theQry = theQry + " AND TRANSTECH = " + str(theTransTech)
           theQry = theQry + " AND MAXADDOWN = '" + theAdDown + "' AND MAXADUP = '" 
           theQry = theQry + theAdUp + "' AND " + theTyUpQ + " AND " + theTyDownQ
           myFLName = theFRN + str(theTransTech) + theAdUp + theAdDown + theTyUp + theTyDown
           arcpy.MakeFeatureLayer_management(myFD + "/" + myFL, myFLName, theQry)
           if int(arcpy.GetCount_management(myFLName).getOutput(0)) > 0 :  #originally 1 for the raster case
               outPT = theST + theFRN + "_" + str(theTransTech) + "_" + theAdDown + "_" + \
                       theAdUp + "_" + theTyDown + "_" + theTyUp + "_x" #the selection of points
               outRT = theST + theFRN + "_" + str(theTransTech) + "_" + theAdDown + "_" + \
                       theAdUp + "_" + theTyDown + "_" + theTyUp + "_g" #the raster grid
               inPly = theST + theFRN + "_" + str(theTransTech) + "_" + theAdDown + "_" + \
                       theAdUp + "_" + theTyDown + "_" + theTyUp + "_p" #the output of grid poly
               bfPly = theST + theFRN + "_" + str(theTransTech) + "_" + theAdDown + "_" + \
                       theAdUp + "_" + theTyDown + "_" + theTyUp + "_pb" #the output of buffer
               chkFC = [outPT, outRT, inPly, bfPly]
               for cFC in chkFC:
                   if arcpy.Exists(cFC):
                       arcpy.Delete_management(cFC)
               del cFC, chkFC
               #first create a feature class of the selected points
               arcpy.FeatureClassToFeatureClass_conversion(myFLName, thePGDB, outPT) 
               arcpy.RepairGeometry_management(outPT)                 
               arcpy.Delete_management(myFLName)                
               if int(arcpy.GetCount_management(outPT).getOutput(0)) > 50:
                   #arcpy.AddMessage("          processing by raster point: " + outPT)
                   #second covert the selection to a grid data set (e.g. raster)
                   arcpy.PointToRaster_conversion(outPT, "FRN", outRT, "", "", 0.0028) 
                   theH = arcpy.Describe(outRT).Height
                   theW = arcpy.Describe(outRT).Width

                   if int(theH) > 2 and int(theW) > 2:
                       #third convert the rasters back to a polygon
                       arcpy.RasterToPolygon_conversion(outRT, inPly, "NO_SIMPLIFY", "") 
                       arcpy.AddField_management (inPly, "FRN", "TEXT", "", "", 10)
                       arcpy.AddField_management (inPly, "PROVNAME", "TEXT", "", "", 200)
                       arcpy.AddField_management (inPly, "DBANAME", "TEXT", "", "", 200)
                       arcpy.AddField_management (inPly, "TRANSTECH", "SHORT", "", "", "")
                       arcpy.AddField_management (inPly, "MAXADDOWN", "TEXT", "", "", 2)
                       arcpy.AddField_management (inPly, "MAXADUP", "TEXT", "", "", 2)
                       arcpy.AddField_management (inPly, "TYPICDOWN", "TEXT", "", "", 2)
                       arcpy.AddField_management (inPly, "TYPICUP", "TEXT", "", "", 2)
                       arcpy.AddField_management (inPly, "State", "TEXT", "", "", 2)
                       arcpy.AddField_management (inPly, "Provider_Type", "SHORT", "", "", "")
                       arcpy.AddField_management (inPly, "ENDUSERCAT", "TEXT", "", "", 2)



                       arcpy.CalculateField_management(inPly, "FRN", "'" + theFRN + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "PROVNAME", r"'" + theProvName + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "DBANAME", r"'" + theDBA + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "TRANSTECH", theTransTech, "PYTHON")
                       arcpy.CalculateField_management(inPly, "MAXADDOWN", "'" + theAdDown + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "MAXADUP", "'" + theAdUp + "'" ,"PYTHON")
                       #arcpy.AddMessage("theProvider_type: " + str(theProviderType))
                       if theTyDown <> "IsNull":
                           arcpy.CalculateField_management(inPly, "TYPICDOWN", "'" + theTyDown + "'" ,"PYTHON")
                       if theTyUp <> "IsNull":
                           arcpy.CalculateField_management(inPly, "TYPICUP", "'" + theTyUp + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "State", "'" + theST + "'" ,"PYTHON")
                       arcpy.CalculateField_management(inPly, "Provider_Type", theProviderType,"PYTHON")
                       arcpy.CalculateField_management(inPly, "ENDUSERCAT", "'" + theEndUserCat + "'" ,"PYTHON")
                       #arcpy.AddMessage("theProvider_type: " + str(theProviderType))
                       arcpy.Buffer_analysis(inPly, bfPly, "100 Feet", "FULL", "ROUND", "LIST", theFields)
                       if myCnt == 1:  #this is the first time through, rename the bfPly to Address
                           arcpy.Rename_management(bfPly,"Address")
                       else: #otherwise append it to the first one through
                           arcpy.Append_management([bfPly], "Address")  
                   del theH, theW
               #then buffer them
               else:  
                   arcpy.AddMessage("          processing by buffering: " + outPT)
                   arcpy.Buffer_analysis(outPT, bfPly, "500 Feet", "FULL", "ROUND", "LIST", theFields)
                   if myCnt == 1:  #this is the first time through, rename the bfPly to Address
                       arcpy.Rename_management(bfPly,"Address")
                   else: #otherwise append it to the first one through
                       arcpy.Append_management([bfPly], "Address")  
               chkFC = [outPT, outRT, inPly, bfPly]
               for cFC in chkFC:
                   if arcpy.Exists(cFC):
                       arcpy.Delete_management(cFC)
               del outPT, outRT, inPly, bfPly, cFC, chkFC
               myCnt = myCnt + 1
           del theProvName, theDBA, theFRN, theTransTech, theAdUp, theAdDown, theTyUp, \
               theTyUpQ, theTyDown, theTyDownQ, theQry, myFLName, theProviderType,theEndUserCat
       sbdd_ExportToShape("Address")
       arcpy.Delete_management("Address_frq")
       del row, myCnt, theFields, theQ, myFL, myFD
   return ()
env.scratchWorkspace = env.workspace
infc = "D:\\projects\\Fire_AK_reburn\\data\\viirs\\viirs_pts.gdb\\VNP14IMGTDL_NRT_Alaska_7d"
outfc = "D:\\projects\\Fire_AK_reburn\\data\\viirs\\viirs_pts.gdb\\VNP14IMGTDL_NRT_Alaska_7d_proj"

# Project to meter based coordinate system (NAD 83 Alaska Albers) - USER DEFINED BY CHANGING THE INPUT AND OUTPUT FEATURE CLASSES
arcpy.Project_management(infc, outfc, "PROJCS['NAD_1983_Alaska_Albers',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Albers'],PARAMETER['False_Easting',0.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-154.0],PARAMETER['Standard_Parallel_1',55.0],PARAMETER['Standard_Parallel_2',65.0],PARAMETER['Latitude_Of_Origin',50.0],UNIT['Meter',1.0]]", "WGS_1984_(ITRF00)_To_NAD_1983", "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]", "NO_PRESERVE_SHAPE", "")

# Loop the select by attributes and convert point to raster
with arcpy.da.SearchCursor(outfc, ["JULIAN"]) as cursor:
    list=sorted({row[0] for row in cursor})

print(list)

# Set the extent environment using a keyword.
arcpy.env.extent = "-600000 2233344 700000 781582"

valField = "JULIAN"
assignmentType = "MOST_FREQUENT"
priorityField = "NONE"
cellSize = 375

for item in list:
    selected = env.workspace + r"\selected5" + "_" + str(item)
    outRaster = env.workspace + r"\raster5" + "_" + str(item)
    print item
    checkAndDelete(selected)
    checkAndDelete(outRaster)
    arcpy.Select_analysis(outfc, selected, "JULIAN = " + str(item))

    arcpy.PointToRaster_conversion(selected, valField, outRaster, assignmentType, priorityField, cellSize)
Ejemplo n.º 21
0
def point_to_raster():
    inRaster = r'{}/Buurten/{}/bk_ahn.tif'.format(path,buurt)
    fc=r'{}/RasterT_tif12'.format(outPoint_path)
    arcpy.PointToRaster_conversion(fc, "grid_code", inRaster, "MAXIMUM", "", inRaster)
Ejemplo n.º 22
0
    def execute(self, inRefGrid, in3DFC, out3DGrid, pScratchWorkspace = None):
    #def Convert3DLinetoRasterPy(self, inRefGrid, in3DFC, out3DGrid):
        sOK = apwrutils.C_OK
        try:
            # use scratchworkspace to hold intermediate datasets. ..ye, @1/2/2016 9:35:45 AM on ZYE1
            if((flooddsconfig.debugLevel & 2)==2): arcpy.AddMessage("in conver3dlinetoraster: os.environ['TMP']={}, os.environ['TEMP']={}".format(os.environ['TMP'], os.environ['TEMP']))
            if(pScratchWorkspace!=None):
                pScratchWKS = pScratchWorkspace
                arcpy.env.scratchWorkspace = pScratchWorkspace
            else:
                pScratchWKS =  flooddsconfig.pScratchWorkspace   # "%scratchworkspace%"   # "in_memory"   # 
            # Set current environment state
            envInitSnapRaster = arcpy.env.snapRaster    # snap raster
            envInitCellSize = arcpy.env.cellSize        # cell size
            envInitEnvExtent = arcpy.env.extent         # analysis environment

            # Set raster processing environment to input DEM grid
            arcpy.env.snapRaster = inRefGrid
            outCellSize = float(str(arcpy.GetRasterProperties_management(inRefGrid, "CELLSIZEX")))
            arcpy.env.cellSize = outCellSize
            arcpy.env.extent = inRefGrid

            # Setting workspace to input fc for the temporary FC copy
            fullPath = arcpy.Describe(in3DFC).path
            arcpy.env.workspace = fullPath
            tmpLineFC = os.path.join(pScratchWKS, "xTmpLine")
            tmpPntFC = os.path.join(pScratchWKS, "xTmpPnt")
            if(arcpy.Exists(tmpLineFC)): arcpy.Delete_management(tmpLineFC)
            if(arcpy.Exists(tmpPntFC)): arcpy.Delete_management(tmpPntFC)
            #tmpLineFC = fullPath + "\\xxxTmpLine"                                                           # temporary 3D line FC
            #tmpPntFC = fullPath + "\\xxxTmpPnt"                                                             # temporary 3D point FC
            # Start processing
            # ----------------
            dt = time.clock()
            # Create temporary 3D line FC and densify it (densify).
            if((self.DebugLevel & 1)==1):  arcpy.AddMessage("  Densifying input 3D line feature class...")
            arcpy.CopyFeatures_management(in3DFC, tmpLineFC)
            denDistance = outCellSize * 0.1                              # set densification distance to be 1/2 of the cell size
#            arcpy.Densify_edit(tmpLineFC, "DISTANCE", "10 Feet")       # need to adjust the densification as function of cell size
            arcpy.Densify_edit(tmpLineFC, "DISTANCE", denDistance)       # need to adjust the densification as function of cell size

            dt2 = time.clock()
            if((self.DebugLevel & 1)==1):  arcpy.AddMessage("      Densifying input 3D line feature class completed in " + str("%.2f" % (dt2 - dt)) + " seconds.")

            # Create temporary point FC (feature vertices to points).
            if((self.DebugLevel & 1)==1):  arcpy.AddMessage("  Converting densified 3D line into points...")
            arcpy.FeatureVerticesToPoints_management(tmpLineFC, tmpPntFC, "ALL")

            dt3 = time.clock()
            if((self.DebugLevel & 1)==1):  arcpy.AddMessage("      Converting densified 3D line into points completed in " + str("%.2f" % (dt3 - dt2)) + " seconds.")

            # Create 3D stream grid from points.
            if((self.DebugLevel & 1)==1):   arcpy.AddMessage("  Generating 3D line raster...")
            arcpy.PointToRaster_conversion(tmpPntFC,  "Shape.Z", out3DGrid)
            #arcpy.PointToRaster_conversion(tmpPntFC,  apwrutils.FN_ShapeAtZ, out3DGrid)

            dt4 = time.clock()
            if((self.DebugLevel & 1)==1):  arcpy.AddMessage("      Generating 3D line raster completed in " + str("%.2f" % (dt4 - dt3)) + " seconds.")
            # Clean up - delete temporary grids and FCs
            if((self.DebugLevel & 1)==1): arcpy.AddMessage("  Cleaning up...")
            try:
                arcpy.Delete_management(tmpLineFC, "")
                arcpy.Delete_management(tmpPntFC, "")
            except arcpy.ExecuteError:
                arcpy.AddWarning(str(arcpy.GetMessages(2)))
            except:
                arcpy.AddWarning(str(trace()))

                        
        except arcpy.ExecuteError:
            sMsg = str(arcpy.GetMessages(2))
            arcpy.AddError(sMsg)
        except:
            sMsg = str(trace())
            arcpy.AddWarning(sMsg)
            #arcpy.AddError(str(arcpy.GetMessages(2)))
        finally:
            # Setting output variables - needed for outputs for proper chaining
            arcpy.SetParameterAsText(2,out3DGrid)      # output = 3D line grid
            print ('Function Convert3DLinetoRasterPy finished')

        if(sOK==apwrutils.C_OK):
            tResults = (apwrutils.C_OK, out3DGrid)
        else:
            tResults = (sOK)

        return tResults
Ejemplo n.º 23
0
dislocation = float(arcpy.GetParameterAsText(6))

# Set the study area extent using a shapefile
study_area = arcpy.GetParameterAsText(7)
study_area_limits = extents(study_area)
arcpy.env.extent = ' '.join([str(value) for value in study_area_limits])

raster_list = list()
pix_to_point_list = list()
delta = int(max_box)
while i >= 0:
    # The name of the raster generated in each step is its 'delta' value. It is saved in the output directory
    output_raster = os.path.join(output_dir, str(delta) + 'm.tif')
    raster_list.append(output_raster)
    if desc_input.shapeType == 'Point':
        arcpy.PointToRaster_conversion(input, fld, output_raster, 'MOST_FREQUENT', '', delta)
    if desc_input.shapeType == 'Polyline':
        arcpy.PolylineToRaster_conversion(input, fld, output_raster, 'MAXIMUM_LENGTH', '', delta)
    pixel_to_point = arcpy.RasterToPoint_conversion(output_raster, os.path.join(output_dir, str(delta) + 'm'))
    pix_to_point_list.append(pixel_to_point)
    delta = delta / 2
    i = i - 1

# Duplicates the input for second largest box in order to compute the box count for the largest box size
pix_to_point_list[0] = pix_to_point_list[1]

# Creates the output point shapefile. The automatically created field ('Id') stores a number that identifies the box where the couting was done
arcpy.CreateFeatureclass_management(os.path.dirname(output_shape), os.path.basename(output_shape), 'POINT', spatial_reference=study_area)

# Stores the box size for a given count, in a given box and box size
arcpy.AddField_management(output_shape, 'box_size', 'DOUBLE')
# Set coordinate system
arcpy.env.outputCoordinateSystem = Input_Points

# Determine type of input feature
file_extension = os.path.splitext(Input_Points)[1]

# Select search points based on minimum search distance
arcpy.AddMessage(
    "Determining search points based on minimum search distance...")
arcpy.RepairGeometry_management(Input_Points, "DELETE_NULL")
if file_extension == ".shp":
    valField = "FID"
    assignmentType = "MAXIMUM"
    priorityField = ""
    arcpy.PointToRaster_conversion(Input_Points, valField, Search_Raster,
                                   assignmentType, priorityField,
                                   Min_Search_Distance)
else:
    valField = "OBJECTID"
    assignmentType = "MAXIMUM"
    priorityField = ""
    arcpy.PointToRaster_conversion(Input_Points, valField, Search_Raster,
                                   assignmentType, priorityField,
                                   Min_Search_Distance)
reclassField = "VALUE"
remap = RemapRange([[0, 1000000, 1]])
Search_Reclassify = Reclassify(Search_Raster, reclassField, remap, "NODATA")
Search_Extract = ExtractByMask(Search_Reclassify, Input_Points)
outputField = "VALUE"
arcpy.RasterToPoint_conversion(Search_Extract, Search_Points, outputField)
Ejemplo n.º 25
0
    fieldName = str(field.name)

    print("Starting the processing " + fieldName)

    if fieldName != "FID" and fieldName != "Shape":

        raster = str("C:/Users/Shan Ye/Desktop/paleoclimate/raster/" +
                     fieldName + "Stat")

        table = str(
            "C:/Users/Shan Ye/Desktop/paleoclimate/spatialGrid/ZonalSt_" +
            fieldName)

        # point to raster

        arcpy.PointToRaster_conversion(points, fieldName, raster, "MEAN", "",
                                       "0.61")

        print("point to raster done")

        # zonal statistics

        arcpy.gp.ZonalStatisticsAsTable_sa(refGrid, "PageName", raster, table,
                                           "DATA", "MEAN")

        print("zonal stats done")

        # join table

        arcpy.JoinField_management(refGrid, "PageName", table, "PageName",
                                   "MEAN")
Ejemplo n.º 26
0
tahmin = log_r.predict_proba(pre)
logr_cls = pd.DataFrame(data=tahmin,
                        index=range(s_analiz),
                        columns=["zeros", "ones"])
K = pd.concat([koor, logr_cls], axis=1)
#///////////////////////////////Saving Prediction Data as log_r.csv////////////
arcpy.AddMessage("Saving Prediction Data as log_r.csv")
logr_result = os.path.join(sf, "logr.csv")
K.to_csv(logr_result, columns=["x", "y", "ones"])
#//////////////////////////Creating Susceptibility map/////////////////////////
arcpy.AddMessage("Analysis finished")
logr_sus_map = os.path.join(sf, "logr_sus")
arcpy.AddMessage("Creating SUSCEPTIBILITY Map and Calculating ROC ")
arcpy.MakeXYEventLayer_management(logr_result, "x", "y", "model", koordinat,
                                  "ones")
arcpy.PointToRaster_conversion("model", "ones", logr_sus_map, "MOST_FREQUENT",
                               "", cell_size)
arcpy.AddMessage(
    "Susceptibility Map was created in {} folder as logr_sus raster file".
    format(sf))

#////////////////////////////CALCULATING PERFORMANCE///////////////////////////

mx = float(
    arcpy.GetRasterProperties_management(logr_sus_map, "MAXIMUM").getOutput(0))
mn = float(
    arcpy.GetRasterProperties_management(logr_sus_map, "MINIMUM").getOutput(0))

e = (float(mx) - float(mn)) / 100

d = []
x = 0
# 克里金插值测试
outKrig = Kriging(r"D:/shp_data/0_10_t_p.shp", "meanrfl30", KrigingModelOrdinary("CIRCULAR", 2000, 2.6, 542, 0), 2000, RadiusFixed(20000, 12))
outKrig.save(r"D:/shp_data/t/meanrfl30_0_10")




# 点文件提取栅格图像插值
pool = ['meanrfl30', 'meanrf', 'cvrf', 'mtmean', 'mtrange', 'Fal_PET']
for t in range(4):
    for m in ["sp","su","au","wi"]:
        for name in pool:
            sp = "D:/lp_data/project_data/{}_{}_v_p.shp".format(t,m)
            dp = "D:/lp/topoint/{}_{}_{}".format(name,t,m)
            arcpy.PointToRaster_conversion(sp, name,dp, "MAXIMUM", "", 2000)



"""

3、图像掩模提取

"""

import arcpy
from arcpy import env

# 图像掩模提取
from arcpy.sa import *
env.workspace =r"D:/lp_data/"
Ejemplo n.º 28
0
    def calculate_det(self, path2h_ras, path2dem_ras):
        try:
            arcpy.CheckOutExtension('Spatial')  # check out license
            arcpy.gp.overwriteOutput = True
            arcpy.env.workspace = self.cache

            try:
                self.logger.info(" * Reading input rasters ...")
                ras_h = arcpy.Raster(path2h_ras)
                ras_dem = arcpy.Raster(path2dem_ras)
                arcpy.env.extent = ras_dem.extent
                self.logger.info(" * OK")
            except:
                self.logger.info(
                    "ERROR: Could not find / access input rasters.")
                return True

            try:
                self.logger.info(" * Making Thalweg elevation raster ...")
                ras_hmin = Con((ras_h > 0.0), Float(ras_dem))
                ras_h_with_null = Con((Float(ras_h) > 0.0), Float(ras_h))
                temp_dem = Con(((ras_dem > 0) & IsNull(ras_h_with_null)),
                               Float(ras_dem))
                ras_dem = temp_dem
                self.logger.info(" * OK")
            except:
                self.logger.info("ERROR: Input Rasters contain invalid data.")
                return True

            try:
                self.logger.info(" * Converting Thalweg raster to points ...")
                pts_hmin = arcpy.RasterToPoint_conversion(
                    ras_hmin, self.cache + "pts_hmin.shp")
                self.logger.info(" * OK")
                self.logger.info(" * Converting DEM raster to points ...")
                pts_dem = arcpy.RasterToPoint_conversion(
                    ras_dem, self.cache + "pts_dem.shp")
                self.logger.info(" * OK")
            except arcpy.ExecuteError:
                self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
                return True
            except Exception as e:
                self.logger.info(arcpy.GetMessages(2))
                return True

            base_join = self.out_dir + '\\spat_join_det.shp'
            try:
                self.logger.info(" * Spatial join analysis ...")
                base_join = arcpy.SpatialJoin_analysis(
                    target_features=pts_dem,
                    join_features=pts_hmin,
                    out_feature_class=base_join,
                    join_operation='JOIN_ONE_TO_MANY',
                    join_type='KEEP_ALL',
                    match_option='CLOSEST')
                self.logger.info(" * OK")
            except arcpy.ExecuteError:
                self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
                return True
            except Exception as e:
                self.logger.info(arcpy.GetMessages(2))
                return True

            try:
                self.logger.info(
                    " * Converting relative Thalweg dem to raster ...")
                arcpy.PointToRaster_conversion(in_features=base_join,
                                               value_field="grid_cod_1",
                                               out_rasterdataset=self.cache +
                                               "ras_hmin_dem",
                                               cell_assignment="MEAN",
                                               cellsize=5)
                self.logger.info(" * OK")
            except arcpy.ExecuteError:
                self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
                return True
            except Exception as e:
                self.logger.info(arcpy.GetMessages(2))
                return True

            try:
                self.logger.info(
                    " * Calculating depth to Thalweg raster (detrended DEM) ..."
                )
                ras_hmin_dem = arcpy.Raster(self.cache + "ras_hmin_dem")
                ras_det = Con((ras_hmin_dem > 0), (ras_dem - ras_hmin_dem))
                self.logger.info(" * OK")
            except arcpy.ExecuteError:
                self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
                return True
            except Exception as e:
                self.logger.info(arcpy.GetMessages(2))
                return True

            try:
                self.logger.info(
                    " * Saving depth to Thalweg raster (detrended DEM) to:\n%s"
                    % self.out_dir + "\\dem_detrend.tif")
                ras_det.save(self.out_dir + "\\dem_detrend.tif")
                self.logger.info(" * OK")
            except arcpy.ExecuteError:
                self.logger.info(arcpy.AddError(arcpy.GetMessages(2)))
                return True
            except Exception as e:
                self.logger.info(arcpy.GetMessages(2))
                return True

            arcpy.CheckInExtension('Spatial')
        except arcpy.ExecuteError:
            self.logger.info("ExecuteERROR: (arcpy).")
            self.logger.info(arcpy.GetMessages(2))
            return True
        except Exception as e:
            self.logger.info("ExceptionERROR: (arcpy).")
            self.logger.info(e.args[0])
            return True
        try:
            del ras_det, ras_hmin, base_join, pts_dem, pts_hmin, ras_hmin_dem
        except:
            pass

        try:
            self.clean_up()
        except:
            pass
        # return False if successfull
        return False
import arcpy
import os
import re

from arcpy import env
env.workplace = 'E:\\workplace\\CarbonProject\\temp'

re_shp = re.compile(r'shp')
files_path = 'E:\\workplace\\CarbonProject\\shapefiles'
raster_path = 'E:\\workplace\\CarbonProject\\raster'
files = os.listdir(files_path)
shapefiles = []

value_field = 'co2_con'
cell_assignment = 'MOST_FREQUENT'

for file in files:
    if not os.path.isdir(file):
        if re_shp.search(file):
            shapefiles.append(file)

if not shapefiles:
    exit

for shapefile in shapefiles:
    in_features = files_path + '\\' + shapefile
    filename = raster_path + '\\' + shapefile[:-3] + 'tif'
    arcpy.PointToRaster_conversion(in_features, value_field, filename, cell_assignment, "", 1)
Ejemplo n.º 30
0
    print arcpy.GetMessages()
except:
    # Get the traceback object
    tb = sys.exc_info()[2]
    tbinfo = traceback.format_tb(tb)[0]
    # Concatenate error information into message string
    pymsg = 'PYTHON ERRORS:\nTraceback info:\n{0}\nError Info:\n{1}' \
        .format(tbinfo, str(sys.exc_info()[1]))
    msgs = 'ArcPy ERRORS:\n {0}\n'.format(arcpy.GetMessages(2))
    # Return python error messages for script tool or Python Window
    arcpy.AddError(pymsg)
    arcpy.AddError(msgs)

# CONVERTING POINT FEATURE TO RASTER
arcpy.env.outputCoordinateSystem = arcpy.SpatialReference('WGS 1984 TM 6 NE')
valField = "Intensity"
assignmentType = "MEAN"
priorityField = ""
cellSize = 1

for files in shape_files:
    # noinspection SpellCheckingInspection
    for inFeatures in shape_files:
        outRaster = "{}\{}.tif".format(output_raster_folder, inFeatures[:-4])
        print('Working on {}.....'.format(inFeatures))
        arcpy.PointToRaster_conversion(inFeatures, valField, outRaster,
                                       assignmentType, priorityField, cellSize)
        # I used 'inFeatures[:-4}' to remove the '.shp' prefix from the output name, else, we would get 'IKPL01.shp.tif'
        print('Finished writing {}.tif to {}.'.format(inFeatures[:-4],
                                                      env.workspace))