コード例 #1
0
def list_csvs_in_folder(path_to_folder, filetype, option = 'basename_only'):
    
    import arcpy
    arcpy.env.workspace = path_to_folder
    
    if option == 'basename_only':
        return [i.split('.')[0] for i in arcpy.ListFiles('*.' + filetype)]
    else:
        return arcpy.ListFiles('*.' + filetype)
コード例 #2
0
def checkandrepair1(path_result, path_new):
    arcpy.env.workspace = path_new
    GDBs_new = arcpy.ListFiles('*.gdb')

    #   print "Checking whether all the data are created"

    while 1:

        arcpy.env.workspace = path_result
        GDBs_result = arcpy.ListFiles('*.gdb')
        needrecreate = 0
        needrecopy = 0

        for GDB_new in GDBs_new:
            if GDB_new not in GDBs_result and GDB_new[-6:-4] != '00':
                needrecreate = 1

        if needrecreate == 1:
            for GDB_new in GDBs_new:
                if GDB_new not in GDBs_result and GDB_new[-6:-4] != '00':
                    print GDB_new + ': lack of GDB'
                    outname = GDB_new
                    CreateFileGDB(path_result, outname)
        #print "Checking whether all the layers are copied and modified"
        for GDB_result in GDBs_result:
            arcpy.env.workspace = path_result + '\\' + GDB_result
            layer = arcpy.ListFeatureClasses()
            if not layer:
                needrecopy = 1
        if needrecopy == 1:
            for GDB_result in GDBs_result:
                arcpy.env.workspace = path_result + '\\' + GDB_result
                layer = arcpy.ListFeatureClasses()
                if not layer:
                    f = open(sys.path[0] + '/log/' + GDB_result[0:6] + '.txt',
                             'a')
                    time_start = time.strftime("%Y-%m-%d#%H:%M:%S",
                                               time.localtime())
                    start = time.clock()
                    print GDB_result + ': Lack of LCA_new'
                    inFeatureClass = path_new + '/' + GDB_result + '/LCA'
                    outFeatureClass = 'LCA_new'
                    Copy(inFeatureClass, outFeatureClass)
                    AddField(outFeatureClass, 'PAC', '50', GDB_result)
                    AlterField(outFeatureClass, 'CC', 'CC_new', 'CC_new')
                    time_end = time.strftime("%Y-%m-%d#%H:%M:%S",
                                             time.localtime())
                    end = time.clock()
                    f.write('R0,' + time_start + ',' + time_end + ',' +
                            str(end - start) + '\n')
                    f.close()
        if needrecreate == 0 and needrecopy == 0:
            break
    print 'check1 finished'
コード例 #3
0
def inspect_files(level, folder, found):
    item = "File"
    level += 1
    arcpy.env.workspace = folder
    for name in arcpy.ListFiles():
        if name not in found:
            print_item(level, item, None, name)
コード例 #4
0
def merge_shapefiles(target_workspace, target_shapefile_name):
    import arcpy
    try:
        start_timeStampName = time.strftime('%Y_%m_%d %H:%M:%S',
                                            time.localtime(time.time()))
        start = time.time()
        print("合并shps任务开始,启动时间:" + start_timeStampName)

        arcpy.env.workspace = target_workspace
        arcpy.env.overwriteOutput = True

        shps = arcpy.ListFiles('*.shp')

        inputs = ""
        target_shp = target_shapefile_name
        for shp in shps:
            inputs += shp + ";"

        inputs = inputs[:-1]

        print("inputs shps:", inputs)
        print("target shp:", target_shp)

        arcpy.Merge_management(inputs, target_shp)

        end_timeStampName = time.strftime('%Y_%m_%d %H:%M:%S',
                                          time.localtime(time.time()))
        end = time.time()
        elapse_time = end - start
        print("合并shps任务结束,结束时间:" + end_timeStampName, "任务总耗时:", elapse_time,
              "秒")
    except:
        print("Merge shapefiles failed!")
コード例 #5
0
def listfolder(path):
    arcpy.env.workspace = path

    featureclass = arcpy.ListFeatureClasses()
    raster = arcpy.ListRasters()
    workspace = arcpy.ListWorkspaces()
    mxd = arcpy.ListFiles("*.mxd")

    csvfile.writerow(["hello world"])

    for fc in featureclass:
        desc = arcpy.Describe(fc)
        print("there was a feature class")
        csvfile.writerow(["feature class goes here"])

    for ras in raster:
        desc = arcpy.Describe(ras)
        print("there was a raster")
        csvfile.writerow(["raster data goes here"])

    for maps in mxd:
        desc = arcpy.Describe(maps)
        csvfile.writerow(["maps data will go here"])
        #need to do some other stuff before printing

    for work in workspace:
        if work.endswith(".gdb"):
            print(work + " is a file geodatabase will run function")
            #call list file geodatabase function
        elif os.path.isdir(work):
            print(work + " Is a folder will call again to run recursively")
            #make recursive call to funtion to start again :)
    file.flush()
コード例 #6
0
def main():
    def mod_date(filename):
        """Get the modification date of a file"""
        return datetime.date.fromtimestamp(os.path.getmtime(filename))

    arcpy.env.workspace = r"E:\Data Collection\UnProcessedSurvey"
    surveyDict = {}

    #Get modification date for each KMZ file and associate the base file name with a date
    rawKmz = arcpy.ListFiles('*.KM*')
    for kmz in rawKmz:
        kmzPath = os.path.join(arcpy.env.workspace, kmz)
        kmzBase = os.path.splitext(kmz)[0]
        surveyDate = mod_date(kmzPath)
        surveyDict[kmzBase] = surveyDate

    arcpy.env.workspace = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    keep = [i for i in arcpy.ListFeatureClasses() if i[-3:] != '_AA']
    ftc = [f for f in arcpy.ListFeatureClasses() if f not in keep]
    for x in ftc:
        ftcPath = os.path.join(arcpy.env.workspace, x)
        base = x[:-3]
        getSurveyDate = surveyDict[
            base]  #Find the survey date associated with the base file name
        arcpy.CalculateField_management(ftcPath, "Survey_Date",
                                        "'" + str(getSurveyDate) + "'",
                                        "PYTHON_9.3", "#")
    return
コード例 #7
0
def bad_listdir_method(folder_loc, blank_project):
    """
    Search a folder structure.
    What are the limitations of this method? Is this a recessive search?
    """

    # List the folders in folder.
    folder_list = os.listdir(folder_loc)
    # Iterate over the folders.
    for folder in folder_list:
        child_folder = os.path.join(folder_loc, folder)
        new_folder = os.listdir(child_folder)
        # Iterate one deep subfolders. This will not search the sub sub folders.
        for i in new_folder:
            folder1 = os.path.join(child_folder, i)
            arcpy.env.workspace = folder1
            # List the files in the folder.
            files_list = arcpy.ListFiles()
            for w in files_list:
                
                split = os.path.splitext(w)
                if split[1] == ".mxd":
                    out_path = os.path.join(folder1, split[0] + ".aprx")
                    # Update the mxd. Call this method only if the path is a file.
                    mxd_path = os.path.join(folder1, w)
                    create_project(mxd_path, out_path, blank_project)
コード例 #8
0
def allMXDtoPDFandJPG(ruta_MXD, calidad):

    arcpy.env.workspace = ruta_MXD
    ruta = arcpy.env.workspace

    mxd_list = arcpy.ListFiles("*.mxd")
    print(mxd_list)

    for mxd in mxd_list:
        current_mxd = arcpy.mapping.MapDocument(os.path.join(ruta, mxd))
        pdf = mxd[:-4] + ".pdf"
        jpg = mxd[:-4] + ".jpg"

        print(pdf)
        print(jpg)

        arcpy.mapping.ExportToPDF(current_mxd,
                                  pdf,
                                  resolution=calidad,
                                  image_quality="BEST",
                                  data_frame="PAGE_LAYOUT")

        arcpy.mapping.ExportToJPEG(current_mxd,
                                   jpg,
                                   resolution=calidad,
                                   data_frame="PAGE_LAYOUT")

        return mxd_list, pdf, jpg, resolution
コード例 #9
0
def trans_pro_wgs84(in_path, out_path):
    """
    Transform shpfile project to wgs84
    :param in_path: input file path
    :param out_path: output file path
    :return:
    """
    print "*****************************************"
    print "Bat shapeFile transfer project to wgs84----------"

    assert isinstance(in_path, basestring)
    assert isinstance(out_path, basestring)
    if not os.path.exists(in_path):
        print 'Input path is incorrect ,please re-input.'
        sys.exit(1)
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    arcpy.env.workspace = in_path
    for shp in arcpy.ListFiles('*.shp'):
        wgs84 = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]]," \
                "PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]"
        in_data_set = shp
        template_data_set = ''
        transformation = ''
        outfile = out_path
        try:
            arcpy.BatchProject_management(in_data_set, outfile, wgs84,
                                          template_data_set, transformation)
            print '%s transform project success !' % in_data_set
        except Exception as te:
            print 'transprowgs84 occur exception : %s' % te.message

    print "ALL Data transform Project Success ---------"
    print "*****************************************"
コード例 #10
0
def define_pro_wgs84(in_path):
    """
    Define shpfile project to wgs84
    :param in_path: input file path
    :return:
    """
    print "*****************************************"
    print "Bat define project to shapeFile----------"

    assert isinstance(in_path, basestring)
    if not os.path.exists(in_path):
        print 'Input path is incorrect ,please re-input.'
        sys.exit(1)
    arcpy.env.workspace = in_path
    for shp in arcpy.ListFiles('*.shp'):
        wgs84 = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]]," \
                "PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]"
        in_data_set = shp
        try:
            arcpy.DefineProjection_management(in_data_set, wgs84)
            print '%s Define Project wgs84 Success !' % in_data_set
        except Exception as e:
            print e.message

    print "ALL data define project success ---------"
    print "*****************************************"
コード例 #11
0
def main():
    def mod_date(filename):
        """get modification date of a file"""
        return datetime.date.fromtimestamp(os.path.getmtime(filename))

    arcpy.env.workspace = r"E:\Data Collection\UnProcessedSurvey"
    surveyDict = {}

    rawKmz = arcpy.ListFiles('*.KM*')
    for kmz in rawKmz:
        kmzPath = os.path.join(arcpy.env.workspace, kmz)
        kmzBase = os.path.splitext(kmz)[0]
        surveyDate = mod_date(kmzPath).strftime('%m%d%Y')
        surveyDict[kmzBase] = surveyDate

    arcpy.env.workspace = r"E:\Data Collection\ProcessedSurvey\SurveyTrees.gdb"
    copyLoc = r"E:\Data Collection\Trees\Processed.gdb\SecondCycle"  #processed geodatabase
    out = r"E:\Data Collection\Trees\SecondCycle.gdb\Master"  #master geodatabase
    keep = [i for i in arcpy.ListFeatureClasses() if i[-3:] != '_AA']
    ftc = [f for f in arcpy.ListFeatureClasses() if f not in keep]
    for x in ftc:
        ftcPath = os.path.join(arcpy.env.workspace, x)
        dropFields = ['Join_Count', 'TARGET_FID']
        arcpy.DeleteField_management(ftcPath, dropFields)
        outBase = x[:-3]
        outName = outBase + '_' + surveyDict[outBase]
        outData = os.path.join(copyLoc, outName)
        arcpy.Copy_management(ftcPath, outData)
        arcpy.Append_management(ftcPath, out, "TEST", "")
    return
コード例 #12
0
ファイル: main.py プロジェクト: jasonMatney/GIS-540-final
def make_map(dir):
    prev_workspace = arcpy.env.workspace
    arcpy.env.workspace = dir
    #Make a Map Document
    try:
        mapList = arcpy.ListFiles("*.mxd")
        mxd_path = os.path.join(dir, mapList[0])

        mxd = arcpy.mapping.MapDocument(mxd_path)
        data_frames = arcpy.mapping.ListDataFrames(mxd)
        data_frame = data_frames[0]
        fcs = arcpy.ListFeatureClasses()

        for f in fcs:
            out_layer = f[:-4]
            out_layer_file = out_layer + ".lyr"
            arcpy.MakeFeatureLayer_management(f, out_layer)
            arcpy.SaveToLayerFile_management(out_layer, out_layer_file)
            layer_object = arcpy.mapping.Layer(f)
            arcpy.mapping.AddLayer(data_frame, layer_object)

        arcpy.RefreshTOC()
        project_map = map_document[:-4] + "Presentation.mxd"
        mxd.saveACopy(os.path.join(prev_workspace, project_map))
        os.startfile(os.path.join(prev_workspace, project_map))
        arcpy.env.workspace = prev_workspace
        del mxd
    except arcpy.AddMessage("\tPlease check inputs"):
        arcpy.GetMessages()
def export_xml():
    #set the output path for export metadata function
    OUTPUT = os.path.join(base_path, record, "final_XMLs")

    #get a list of all the XMLs
    files = arcpy.ListFiles("*.xml")
    print files
    raw_input()

    #loop through XMLs and export the metadata for each to the final_XMLs directory
    for f in files:

        if f[len(f) - 7:len(f) - 4] == 'shp':
            filePath = os.path.join(OUTPUT, f[:-8] + '.xml')
        elif f[len(f) - 7:len(f) - 4] == 'txt':
            pass
        else:
            filePath = os.path.join(OUTPUT, f)

        print filePath

        statinfo = os.stat(os.path.join(base_path, record, record_path, f))

        print f, '=', statinfo.st_size

        if statinfo.st_size == 0:
            continue

        if os.path.exists(filePath):
            print f, 'already exists.  Deleting now.'
            os.remove(filePath)

        print 'Trying to export XML for: ', f
        arcpy.ExportMetadata_conversion(
            f, TRANSLATOR, os.path.join(OUTPUT, f[:f.find(".")] + ".xml"))
コード例 #14
0
def map_create(_dataset, _sde_path, _mxd_folder):
    arcpy.AcceptConnections(_sde_path, True)
    _dataset_name = _sde_path.split('\\')[-1] + '.' + _dataset
    _dataset_path = _sde_path + '/' + _dataset_name
    print(_dataset_path)
    arcpy.env.workspace = _mxd_folder
    mxd_list = arcpy.ListFiles()
    mxd_temp = _mxd_folder + "/" + "Temp.mxd"
    mxd_new = _mxd_folder + "/" + _dataset + ".mxd"
    if _dataset + '.mxd' not in mxd_list:
        log_write(_dataset, "{}.mxd was created".format(_dataset))
        arcpy.Copy_management(mxd_temp, mxd_new)
        # Read MXD file
        _mxd = arcpy.mapping.MapDocument(mxd_new)
        # Read Layers sets
        _layers = arcpy.mapping.ListDataFrames(_mxd, "Layers")[0]
        _layers.name = _dataset  # Layers sets rename
        print(_layers.name)
        # Read feature lists
        arcpy.env.workspace = _dataset_path
        for ft in arcpy.ListFeatureClasses():
            print(ft)
            layer_add = arcpy.mapping.Layer(
                _dataset_path + '/' + ft
            )  # This operation need to add full path in order to keeping the full name of the layer.
            print(layer_add)
            print(layer_add.datasetName)
            arcpy.mapping.AddLayer(_layers, layer_add,
                                   "AUTO_ARRANGE")  # Add layer
            log_write(_dataset,
                      "{1} was added in {0}.mxd".format(_dataset, ft))
        arcpy.RefreshActiveView()
        arcpy.RefreshTOC()
        _mxd.save()
    else:
        _mxd = arcpy.mapping.MapDocument(mxd_new)
        log_write(_dataset, "{}.mxd was Exited".format(_dataset))
        print(arcpy.mapping.ListLayers(_mxd))
        layer_lists = list(map(ly_name, arcpy.mapping.ListLayers(_mxd)))
        print(layer_lists)
        # Read feature lists
        arcpy.env.workspace = _dataset_path
        print(arcpy.ListFeatureClasses())
        for ft in arcpy.ListFeatureClasses():
            if ft not in layer_lists:
                print(ft)
                layer_add = arcpy.mapping.Layer(
                    _dataset_path + '/' + ft
                )  # This operation need to add full path in order to keeping the full name of the layer.
                print(layer_add)
                print(layer_add.datasetName)
                _layers = arcpy.mapping.ListDataFrames(_mxd, _dataset)[0]
                arcpy.mapping.AddLayer(_layers, layer_add,
                                       "AUTO_ARRANGE")  # Add layer
                log_write(_dataset,
                          "{1} was added in {0}.mxd".format(_dataset, ft))
        arcpy.RefreshActiveView()
        arcpy.RefreshTOC()
        _mxd.save()
コード例 #15
0
ファイル: main.py プロジェクト: jasonMatney/GIS-540-final
def find_all_csv(workspace):
    prev_workspace = arcpy.env.workspace

    arcpy.env.workspace = workspace
    csv = arcpy.ListFiles("*csv")

    arcpy.env.workspace = prev_workspace
    return csv
def import_XML():

    #baseSHP = raw_input('Enter shapefile to use as template: ')
    #shpPath = os.path.join(base_path, record,record_path,baseSHP)
    templatePath = os.path.join(base_path, record, 'final_XMLs\\template.xml')
    #print shpPath

    # Used to explort a shapefile as a default template
    '''arcpy.ExportMetadata_conversion (shpPath, ISOTRANSLATOR,
    templatePath)'''

    for root, dir, files in os.walk(os.path.join(base_path, record)):
        #set workspace
        arcpy.env.workspace = root

        #get a list of all the SHPs
        files = arcpy.ListFiles("*.shp")

        totalTimeTic = time.time()

        #loop through SHPs and import the metadata for each
        for f in files:

            #shapefilePath = os.path.join(base_path,record,record_path,f)

            tic = time.time()
            print 'Trying to import XML to: ', f
            arcpy.ImportMetadata_conversion(templatePath, "FROM_ESRIISO", f,
                                            "DISABLED")

            # Trying to get thumbnail, postponing for now.
            """# get the map document
            mxd = arcpy.mapping.MapDocument(blankMXD)
            # get the data frame
            df = arcpy.mapping.ListDataFrames(mxd,"*")[0]
            # create a new layer
            newlayer = arcpy.mapping.Layer(shapefilePath)
            # add the layer to the map at the bottom of the TOC in data frame 0
            arcpy.mapping.AddLayer(df, newlayer,"BOTTOM")
            print "creating thumbnail for " + f
            mxd.makeThumbnail()
            mxd.save()
            arcpy.mapping.RemoveLayer(df, newlayer)
            mxd.save()"""

            toc = time.time()
            s = toc - tic
            m, s = divmod(s, 60)
            h, m = divmod(m, 60)
            timeFormat = "%d:%02d:%02d" % (h, m, s)
            print 'Time elapsed: ', timeFormat

        totalTimeToc = time.time()
        s = totalTimeToc - totalTimeTic
        m, s = divmod(s, 60)
        h, m = divmod(m, 60)
        timeFormat = "%d:%02d:%02d" % (h, m, s)
        print 'Total time elapsed: ', timeFormat
コード例 #17
0
def checkandrepair3(path_result):
    path_match = sys.path[0] + '/match.config'
    dictionary = read_match(path_match)
    arcpy.env.workspace = path_result
    GDBs_result = arcpy.ListFiles('*.gdb')
    while 1:
        needrepair = 0
        for GDB_result in GDBs_result:
            arcpy.env.workspace = path_result+'\\' + GDB_result
            layers = arcpy.ListFeatureClasses()
            if 'LCA_intersect' not in layers:
                needrepair = 1
        if needrepair == 1:
            for GDB_result in GDBs_result:
                arcpy.env.workspace = path_result+'\\' + GDB_result
                layers = arcpy.ListFeatureClasses()         
                if 'LCA_intersect' not in layers:
                    print GDB_result+':lack of LCA_intersect'
                    f = open(sys.path[0]+'/log/'+GDB_result[0:6]+'.txt','a')
                    start = time.clock()
                    time_start = time.strftime("%Y-%m-%d#%H:%M:%S",time.localtime())
                    
                    AlterField('LCA_old','CC','CC_old','CC_old')

                    #create fishnet
                    extent = getMinXY('LCA_new')
                    fishnet_out = 'grid'
                    originCoordinate = str(extent[0] - 0.025) + " " + str(extent[1] - 0.025)
                    yAxisCoordinate = str(extent[0]) + " " + str(extent[1] + 10)
                    numRows = math.ceil((extent[3] - extent[1])/0.05) + 1
                    numColumns=  math.ceil((extent[2] - extent[0])/0.05) + 1
                    arcpy.CreateFishnet_management(fishnet_out, originCoordinate, yAxisCoordinate, '0.05', '0.05', numRows, numColumns, '#', 'NO_LABELS', 'LCA_new', 'POLYGON')


                    #PAC_old intersect grid
                    intersectOutput = 'LCA_old_grid'
                    intersect(['LCA_old','grid'],intersectOutput)

                    #PAC_old intersect grid
                    intersectOutput = 'LCA_new_grid'
                    intersect(['LCA_new','grid'],intersectOutput)
                    
                    #PAC_old_grid intersect PAC_new_grid           
                    inFeatures = ['LCA_old_grid','LCA_new_grid']
                    intersectOutput ='LCA_intersect'
                    intersect(inFeatures,intersectOutput)
                    print GDB_result+':intersect finished'
                    
                    end = time.clock()
                    time_end = time.strftime("%Y-%m-%d#%H:%M:%S",time.localtime())
                    f.write('R2,'+time_start+','+time_end+','+str(end-start)+'\n')
                    f.close()            
        else:
            break
    print "check3 finished"            
コード例 #18
0
def dbf_to_csv():
    #print arcpy.ListFiles("*.dbf")
    
    #find dbf files
    for dbf_file in arcpy.ListFiles("*Zstats.dbf"):
        dbf_str = float(dbf_file.replace('_Zstats.dbf',''))
        if (dbf_str >31251) & (dbf_str<65001):
            outLocation = 'Z:/AKSeward/Data/GIS/Teller/2019_Snow/zonal_stats_onbuffers/'
            outName = dbf_file.replace('.dbf','.csv')
            arcpy.TableToTable_conversion(dbf_file, outLocation, outName)
            print outName
コード例 #19
0
ファイル: walk.py プロジェクト: Liyamu/Python_Scripts
def walk( key, directory, wildcard = None ):
    '''Walks through root DIRECTORY and lists all KEY files
KEY - is the type of data to list (ex: RASTER, TABLE, SHP, MXD)
DIRECTORY - is the root directory for the walk (i.e. the starting point)
WILDCARD - is a pattern to select against in the file name

EX:  walk( 'SHP', r'C:\\', '*BON*' )
*This will capture all SHPs with BON in the title that live on your C drive*
'''
    #Preserve the old workspace
    oldWKSP = arcpy.env.workspace

    #Set the workspace to the directory input
    arcpy.env.workspace = directory

    #Create return list object
    retList = []

    #Check to see if any sub workspaces exist
    lstWKSPs = arcpy.ListWorkspaces()
    if len(lstWKSPs) > 0:
        for wksp in lstWKSPs:
            retList = retList + walk( key , wksp , wildcard )

    if key.upper().strip()  == 'RASTER':
        for raster in arcpy.ListRasters( wildcard ):
            retList.append( os.path.join(directory , raster ) )

    elif key.upper().strip()  == 'TABLE':
        for table in arcpy.ListTables( wildcard ):
            retList.append( os.path.join(directory, table ) )

    elif key.upper().strip() == 'SHP':
        for shp in arcpy.ListFeatureClasses( wildcard ):
            retList.append( os.path.join(directory , shp ) )

    elif key.upper().strip()  == 'MXD':
        for fileName in arcpy.ListFiles( wildcard ):
            if fileName[-3:].lower() == 'mxd':
                retList.append(os.path.join(directory, fileName ) )

#
#    elif key.upper().strip() == 'LYR':
#        for shp in arcpy.ListFeatureClasses( wildcard ):
#            retList.append( os.path.join(directory , lyr ) )
#
#    elif key.upper().strip() == 'PRJ':
#        for shp in arcpy.ListFeatureClasses( wildcard ):
#            retList.append( os.path.join(directory , prj ) )

    arcpy.env.workspace = oldWKSP

    return retList
コード例 #20
0
def calc_area(path_result):

    arcpy.env.workspace = path_result
    GDBs_result = arcpy.ListFiles('*.gdb')
    list_txt = arcpy.ListFiles('*.txt')
    for pac in GDBs_result:
        if pac[0:6] + '.txt' not in list_txt:
            f = open(sys.path[0] + '/log/' + pac[0:6] + '.txt', 'a')
            start = time.clock()
            time_start = time.strftime("%Y-%m-%d#%H:%M:%S", time.localtime())
            print pac[0:6] + 'Lack of area'
            I_path = sys.path[0] + "\\x64\\spatialoperation.exe"
            file_txt = path_result + '/' + pac[0:6] + '.txt'
            os.system("{0} {1} {2} {3} {4} ".format(I_path, "TRANSITIONMATRIX",
                                                    path_result + '/' + pac,
                                                    'LCA_intersect', file_txt))
            time_end = time.strftime("%Y-%m-%d#%H:%M:%S", time.localtime())
            end = time.clock()
            f.write('R3,' + time_start + ',' + time_end + ',' +
                    str(end - start) + '\n')
            f.close()
コード例 #21
0
def initialCheck():

    arcpy.env.workspace = working_directory
    wks = arcpy.ListWorkspaces('*', 'FileGDB')

    wks.remove(WorkingGDB)
    arcpy.AddMessage("Cleaning up working directory, one moment please")
    for fgdb in wks:
        if arcpy.Exists(fgdb):
            arcpy.AddMessage("deleting %s" % fgdb)
            arcpy.Delete_management(fgdb)

    files = arcpy.ListFiles("*.lyr")

    for lyr in files:
        if arcpy.Exists(lyr):
            arcpy.AddMessage("deleting %s" % lyr)
            arcpy.Delete_management(lyr)

    UID_List = []
    with arcpy.da.SearchCursor(final_fc, ['UniqueID']) as cursor:
        for row in cursor:
            UID_List.append(str(row[0]) + '.kml')

    arcpy.env.workspace = kmlDirectory
    arcpy.env.overwriteOutput = True

    for kml in arcpy.ListFiles("*.kml"):
        kmlFile = str(kml)
        if kmlFile not in UID_List:
            arcpy.AddMessage('%s is a unique name' % kml[:-4])
            GetKmlToFc(kml)

        else:
            arcpy.AddMessage(
                '%s needs a unique name, this kml file will not be processed!'
                % kml[:-4])

    getLayerToFC()
コード例 #22
0
def do_clip(input_dir, output_dir, clip_feature, is_copy_mxd):
    """
    deal with shape and working to clip
    :param input_dir: input dir
    :param output_dir: output dir
    :param clip_feature: clip feature(path + name)
    :param is_copy_mxd: is or not copy mxd to output_dir
    :return:clip num and failure clip features name
    """
    assert isinstance(input_dir, basestring)
    assert isinstance(output_dir, basestring)
    assert isinstance(clip_feature, basestring)
    assert isinstance(is_copy_mxd, bool)
    if not os.path.exists(input_dir) or not os.path.exists(clip_feature):
        return 0, []
    input_cur_dir = os.path.basename(input_dir)
    output_dir = os.path.join(
        output_dir, (input_cur_dir + "_" +
                     os.path.splitext(os.path.split(clip_feature)[1])[0]))
    print output_dir
    if os.path.exists(output_dir):
        print "Output dir is exist, delete."
        shutil.rmtree(output_dir)
        time.sleep(1)
        os.makedirs(output_dir)
    else:
        os.makedirs(output_dir)

    clip_num = 1
    fail_features = []
    # 设置工作空间
    arcpy.env.workspace = input_dir
    for input_feature in arcpy.ListFiles("*.shp"):
        input_feature_name = os.path.splitext(input_feature)[0]
        out_feature = os.path.join(output_dir, input_feature_name)

        print "Execute clip num = %d, chip feature is: %s" % (
            clip_num, input_feature_name)
        try:
            arcpy.Clip_analysis(input_feature, clip_feature, out_feature)
            print "Finish."
            clip_num += 1
        except Exception as clip_e:
            fail_features.append(input_feature_name)
            print "%s occur exception is : %s" % (input_feature_name,
                                                  clip_e.message)

    if is_copy_mxd:
        copy_mxd(input_dir, output_dir)

    return clip_num - 1, fail_features
コード例 #23
0
def dwg2gdb(cad_folder, gdb_path, muban_excel):
    # wb = openpyxl.load_workbook(muban_folder + '地形图提取模板.xlsx')
    wb = openpyxl.load_workbook(muban_excel)
    os.chdir(cad_folder)
    cads = arcpy.ListFiles("*.dwg")
    num00 = len(cads)
    print('文件夹中存在CAD数据:' + str(cads) + '\n')

    # 提取数据
    feature_kinds = ['Point', 'Polyline', 'Polygon', 'Annotation']
    for feature_kind in feature_kinds:
        num = num00
        sheet = wb[feature_kind]
        max_row = sheet.max_row
        for cad in cads:
            num = num -1
            print('\n' + "-------------------------" + feature_kind + "还剩:" + str(num) + ' 个数据需要提取------------------------' + '\n')
            print('【正在从"' + cad + '"中提取' + feature_kind + '数据】')
            dwg_feature = cad_folder + cad + "\\" + feature_kind
            arcpy.env.workspace = gdb_path
            arcpy.env.overwriteOutput = True
            for name in range(2, max_row+1):
                sheetvalue = sheet.cell(name, 1).value
                if sheetvalue is None:
                    continue
                # print(sheetvalue)
                cadname = cad.split('.')[0]
                # print(cadname)
                feature_name = sheetvalue + '_' + cadname
                selet_condition = sheet.cell(name, 2).value
                if feature_name is None:
                    pass
                else:
                    selection = arcpy.SelectLayerByAttribute_management(dwg_feature, "NEW_SELECTION", selet_condition)
                    rowcount1 = arcpy.GetCount_management(selection)
                    if int(str(rowcount1)) > 0:   # 因为rowcount1返回的是一个result格式
                        arcpy.FeatureClassToFeatureClass_conversion(dwg_feature, gdb_path, feature_name, selet_condition)
                        print("成功在数据库中添加要素:" + feature_name + '  数量:' + str(rowcount1))
        print('-----------------------' + '\n' + '【' + feature_kind + "数据已提取完成" + '】' + "\n")

    # 删除空数据
    print('【' + "正在删除空数据" + '】')
    arcpy.env.workspace = gdb_path
    for shpfile in arcpy.ListFeatureClasses():
        rowcount = arcpy.GetCount_management(shpfile)
        # print("数据名称: " + shpfile + "     数据量:" + str(rowcount))
        if int(str(rowcount)) == 0:
            fullpath = gdb_path + '\\' + shpfile
            arcpy.Delete_management(fullpath)  # 在GDB中删除数据
            print(shpfile + ": 由于数据为空,已在GDB中删除")
    print("--------------------------CAD数据入库已完成------------------------", '\n')
コード例 #24
0
def ShpGeoJsonToJson(shpField, shpPath):
    """
    将shp类型数据转为Json数据
    :param shpField: shp数据进行写入Json的标识字段
    :param shpPath: 存放shp数据的文件夹路径
    :return:
    """
    assert isinstance(shpPath, basestring)
    assert isinstance(shpField, list)
    arcpy.env.workspace = shpPath
    for shp in arcpy.ListFiles("*.shp"):
        shpData = os.path.join(shpPath, shp)
        jsonFile = os.path.join(shpPath,
                                (os.path.splitext(shp)[0] + '_EChart.js'))
        if os.path.exists(jsonFile):
            print '%s File Is Exists , Delete' % jsonFile
            os.unlink(jsonFile)
        f_w = codecs.open(jsonFile, 'w', 'utf-8')
        data = []
        num = 0
        rowNums = GetShpRowNums(shpData)
        try:
            with arcpy.da.SearchCursor(shpData, fields) as cursor:
                data.append('{"type": "FeatureCollection" ,')
                data.append('"features": [')
                for row in cursor:
                    num += 1
                    if isinstance(row[0], unicode):
                        name = row[0].encode('utf-8')
                    else:
                        name = str(row[0]).decode('utf-8')
                    geoName = row[0]
                    info = row[1]
                    geoInfo = str(info).split(':')[1].split(',"')[0].split(
                        '}')[0]
                    data.append('{"type":"Feature",')
                    data.append('"properties":{"name":"%s"},' % geoName)
                    data.append('"geometry":{')
                    data.append('"type":"MultiPolygon",')
                    data.append('"coordinates":[%s' % geoInfo)
                    if num < rowNums:
                        data.append(']}},')
                    else:
                        data.append(']}}]}')
                f_w.writelines(data)
                f_w.close()
                print '%s is transfer to json success !' % str(jsonFile)
        except Exception as e:
            print 'Occur exception : %s' % e.message
            print 'Tool execute failure .'
            sys.exit(0)
コード例 #25
0
def main():
    arcpy.env.overwriteOutput = True
    inspace = sys.argv[1]
    arcpy.env.workspace = inspace
    files = arcpy.ListFeatureClasses()

    for f in files:
        infile = os.path.join(arcpy.env.workspace, f)
        outf = os.path.splitext(f)[0] + '.csv'
        outfile = os.path.join(inspace, outf)
        arcpy.QuickExport_interop(infile, outfile)

    print('{} contains:'.format(inspace))
    print(arcpy.ListFiles('*csv'))
コード例 #26
0
def CreateAtlas(folder):
    # Define workspace, and assign pdf_folder as a function of the master folder
    pdf_folder = folder + "\\zAtlas"
    arcpy.env.workspace = pdf_folder
    pdf_list = arcpy.ListFiles("*.pdf")
    arcpy.AddMessage(pdf_list)
    # Defines path for final Atlas
    pdfPath = folder + "\\City Atlas.pdf"
    if os.path.exists(pdfPath):
        os.remove(pdfPath)
    pdfDoc = arcpy.mapping.PDFDocumentCreate(pdfPath)

    for i in range(0,10):
        file=pdf_list[i]
        pdfDoc.appendPages(pdf_folder + "\\" + file)
コード例 #27
0
def excelTOtable():
    env.workspace = excels

    excel_list = arcpy.ListFiles()
    for filename in excel_list:
        arcpy.ExcelToTable_conversion(filename, filename)
        print "File %s converted to DBASE file." % filename

    DBASEs = arcpy.ListTables()
    for filename in DBASEs:
        arcpy.TableToTable_conversion(filename, geodatabase,
                                      filename[:-4] + "_tempTable")
        print "File %s converted to Table." % filename
        arcpy.Delete_management(filename)
        print "Redundant DBASE files deleted."
コード例 #28
0
def mpkExport(arguments):
    """
    Goes through the list of mxds in the given directory and tries to export them as mpks.
    Needs:
        arguments: the list of arguments from the parser in main()
    """
    workspace = arguments.inDir
    output = arguments.outDir
    logDir = arguments.logDir
    logName = arguments.logName
    error_count = 0
    # Set environment settings
    arcpy.env.overwriteOutput = True
    arcpy.env.workspace = workspace
    mxdlist = arcpy.ListFiles("*.mxd")
    # Create a logfile
    log = createLogfile(logDir, logName)

    if len(mxdlist) < 1:
        error_count -= 1
    else:
        # Loop through the workspace, find all the mxds and create a map package using the same name as the mxd
        for mxd in mxdlist:
            print("Packaging " + mxd)
            outPath = output + '\\' + os.path.splitext(mxd)[0]
            try:
                arcpy.PackageMap_management(mxd, outPath + '.mpk', "PRESERVE",
                                            "CONVERT_ARCSDE", "#", "ALL",
                                            "RUNTIME")
                print("SUCCESS: " + mxd + " processed successfully.")
                # if a packaging folder was created, it will be removed
                if os.path.exists(outPath):
                    try:
                        shutil.rmtree(outPath, onerror=remove_readonly)
                    except Exception as e:
                        error_count += 1
                        writeLog(log, e, error_count, mxd)
                        print(
                            "Removal of package directory " + outPath +
                            " failed. Consider running the script with administrator permissions."
                        )
            except Exception as e:
                error_count += 1
                writeLog(log, e, error_count, mxd)
                print("ERROR while processing " + mxd)

    # close the Logfile
    closeLog(log, error_count)
def dwg2gdb(input_folder, output_folder, template_excel):
    # wb = openpyxl.load_workbook(muban_folder + '地形图提取模板.xlsx')
    wb = openpyxl.load_workbook(template_excel)
    os.chdir(input_folder)
    cads = arcpy.ListFiles("*.dwg")
    print('文件夹中存在CAD数据:' + str(cads) + '\n')

    # 提取数据
    feature_kinds = ['Point', 'Polyline', 'Polygon', 'Annotation']
    for feature_kind in feature_kinds:
        sheet = wb[feature_kind]
        max_row = sheet.max_row
        print("共有" + str(max_row - 1) + "条" + feature_kind + "数据将会被提取出来")
        for cad in cads:
            print('【正在从"' + cad + '"中提取' + feature_kind + '数据】')
            dwg_feature = input_folder + cad + "\\" + feature_kind
            arcpy.env.workspace = output_folder
            arcpy.env.overwriteOutput = True
            for name in range(2, max_row + 1):
                feature_name = sheet.cell(name,
                                          1).value + '_' + cad.split('.')[0]
                selet_condition = sheet.cell(name, 2).value
                if feature_name is None:
                    pass
                else:
                    print('[' + feature_name + ']')
                    print(dwg_feature)
                    print(output_folder)
                    print(feature_name)
                    print(selet_condition)
                    arcpy.FeatureClassToFeatureClass_conversion(
                        dwg_feature, output_folder, feature_name,
                        selet_condition)
                    print("成功在数据库中添加要素:" + feature_name + '\n')
            print('-----------------------' + '\n' + '【' + feature_kind +
                  "数据已提取完成" + '】' + "\n")

    # 删除空数据
    print('【' + "正在删除空数据" + '】')
    arcpy.env.workspace = output_folder
    for shpfile in arcpy.ListFeatureClasses():
        rowcount = arcpy.GetCount_management(shpfile)
        print("数据名称: " + shpfile + "     数据量:" + str(rowcount))
        if int(str(rowcount)) == 0:
            fullpath = output_folder + '\\' + shpfile
            arcpy.Delete_management(fullpath)  # 在GDB中删除数据
            print(shpfile + ": 由于数据为空,已在GDB中删除")
    print("【" + "CAD数据入库已完成" '】' + "\n")
コード例 #30
0
def samples(workspace,outputfolder,samplesize):
    """This function takes a workspace of CSVs, takes the first # of rows and writes them to a new directory with the same csv names."""
    import pandas as pd
    import arcpy
    arcpy.env.workspace = workspace
    arcpy.env.overwriteOutput = True
    path1 = outputfolder
    path = workspace
    templist = arcpy.ListFiles("*.csv")
    print (templist)
    for i in templist:
        print (i)
        df = pd.read_csv(path + i,encoding='latin-1')
        df.sort_values('pointid',inplace = True)
        df = df.head(samplesize)
        df.to_csv(path1 + i)