Exemplo n.º 1
0
def loadShapefile(file_name):
        global db
        shp_bounding_box = []
        shp_type = 0
        file_name = file_name
        records = []
        # open dbf file and get records as a list
        dbf_file = file_name[0:-4] + '.dbf'
        dbf = open(dbf_file, 'rb')
        db = list(dbfUtils.dbfreader(dbf))
        dbf.close()
        fp = open(file_name, 'rb')
        
        # get basic shapefile configuration
        fp.seek(32)
        shp_type = readAndUnpack('i', fp.read(4))                
        shp_bounding_box = readBoundingBox(fp)
        
        # fetch Records
        fp.seek(100)
        while True:
                shp_record = createRecord(fp)
                if shp_record == False:
                        break
                records.append(shp_record)
        
        return records    
def loadShapefile(file_name):
    global db
    shp_bounding_box = []
    shp_type = 0
    file_name = file_name
    records = []
    # open dbf file and get records as a list
    dbf_file = file_name[0:-4] + '.dbf'
    dbf = open(dbf_file, 'rb')
    db = list(dbfUtils.dbfreader(dbf))
    dbf.close()
    fp = open(file_name, 'rb')

    # get basic shapefile configuration
    fp.seek(32)
    shp_type = readAndUnpack('i', fp.read(4))
    shp_bounding_box = readBoundingBox(fp)

    # fetch Records
    fp.seek(100)
    while True:
        shp_record = createRecord(fp)
        if shp_record == False:
            break
        records.append(shp_record)

    return records
Exemplo n.º 3
0
def loadShapefile( filename ):
        # open dbf file and get features as a list
        global db
        dbfile = open( filename[0:-4] + '.dbf', 'rb' )
        db = list( dbfUtils.dbfreader(dbfile) )
        dbfile.close()
        
        fp = open( filename, 'rb' )
        
        # get basic shapefile configuration
        fp.seek(32)
        filetype = readAndUnpack('i', fp.read(4))
        bounds = readBounds( fp )
        
        # fetch Records
        fp.seek(100)
        features = []
        while True:
                feature = createRecord(fp)
                if feature == False: break
                getPolyInfo( feature )
                features.append( feature )
        return { 'type': filetype, 'bounds': bounds, 'features': features }
Exemplo n.º 4
0
def loadShapefile(filename):
    # open dbf file and get features as a list
    global db
    dbfile = open(filename[0:-4] + '.dbf', 'rb')
    db = list(dbfUtils.dbfreader(dbfile))
    dbfile.close()

    fp = open(filename, 'rb')

    # get basic shapefile configuration
    fp.seek(32)
    filetype = readAndUnpack('i', fp.read(4))
    bounds = readBounds(fp)

    # fetch Records
    fp.seek(100)
    features = []
    while True:
        feature = createRecord(fp)
        if feature == False: break
        getPolyInfo(feature)
        features.append(feature)
    return {'type': filetype, 'bounds': bounds, 'features': features}
Exemplo n.º 5
0
Arquivo: Shp.py Projeto: LFox/finches
 def _readDbfTable(self):
     dbfFile = self.filePath[0:-4] + '.dbf'
     dbf = open(dbfFile, 'rb')
     db = list(dbfUtils.dbfreader(dbf))
     dbf.close()
     return db
Exemplo n.º 6
0
            })
            #'http://prj2epsg.cloudfoundry.com/search.json'
            url = 'http://prj2epsg.org/search.json'
            webres = urlopen(url, query)
            jres = json.loads(webres.read())
            if 'errors' in jres and 0 < length(jres['errors']):
                srid = None
            if jres['codes']:
                srid = int(jres['codes'][0]['code'])
        except:
            srid = None  # ensure set back to 4326 whatever happens

try:
    # Try to detect the encoding
    dbf = open(dbf_file.strip(), 'rb')
    db = dbfUtils.dbfreader(dbf)

    fnames = db.next()
    ftypes = db.next()

    # find string fields
    sfields = []
    for fno in range(len(fnames)):
        if (ftypes[fno][0] == 'C'): sfields.append(fno)

    detector = UniversalDetector()

    # 100 rows should be enough to figure encoding
    # TODO: more broader and automated testing, allow
    #       setting limit by command line param
    for row in itertools.islice(db, 100):
Exemplo n.º 7
0
      query = urlencode({
          'exact' : True,
          'error' : True,
          'mode' : 'wkt',
          'terms' : prj_string})
      webres = urlopen('http://prj2epsg.org/search.json', query)
      jres = json.loads(webres.read())
      if jres['codes']:
        srid = int(jres['codes'][0]['code'])
    except:
      srid=4326 # ensure set back to 4326 whatever happens    

try:
    #Try to detect the encoding
    dbf = open(dbf_file, 'rb')
    db = dbfUtils.dbfreader(dbf)

    fnames = db.next()
    ftypes = db.next()

    # find string fields
    sfields = []
    for fno in range(len(fnames)):
      if ( ftypes[fno][0] == 'C' ) : sfields.append(fno)
   
    detector = UniversalDetector()

    # 100 rows should be enough to figure encoding
    # TODO: more broader and automated testing, allow 
    #       setting limit by command line param
    for row in itertools.islice(db, 100):
Exemplo n.º 8
0
Arquivo: Shp.py Projeto: LFox/finches
 def _readDbfTable(self):
     dbfFile = self.filePath[0:-4] + '.dbf'
     dbf = open(dbfFile, 'rb')
     db = list(dbfUtils.dbfreader(dbf))
     dbf.close()
     return db