コード例 #1
0
    def pull(self,
             name=None,
             wsne=None,
             tstart=datetime.min,
             tend=datetime.max):
        """Pulls a subset of the imerg gridded dataset as netcdf from an motu enabled server
        This routine calls the internal routines of the motuclient python client
        :param name: Name of the  output datatset (file will be named 'name.nc')
        :param wsne: bounding box of the section of interest as [West,South,North,East]
        :param tstart: start date (as yyyy-mm-dd) for the extraction
        :param tend: end date (as yyyy-mm-dd) for the extraction
        """

        if name is not None:
            self.queueDownloadjob(name, wsne, tstart, tend)

        auth = self.conf.authCred("podaac")
        ddir = self.dataDir()
        for ky, val in self.pullqueue.items():
            url = self.rooturl + val
            fout = os.path.join(ddir, ky + ".nc")
            http(url,
                 cookiefile=self.cookiefile,
                 auth=auth,
                 lastmod=datetime(2022, 7, 1)).download(ddir,
                                                        outfile=fout,
                                                        check=True)
コード例 #2
0
ファイル: RGIDsets.py プロジェクト: danilecug/geoslurp
def pullRGI(downloaddir,comparewithversion):
        httpserv=http('http://www.glims.org/RGI/rgi60_files/00_rgi60.zip',lastmod=datetime(2018,1,1))
        #Newest version which is supported by this plugin
        newestver=(6,0)
        upd=False
        #now determine whether to retrieve the file
        if newestver > comparewithversion:
            uri,upd=httpserv.download(downloaddir,check=True)
            if not os.path.exists(os.path.join(downloaddir,'extract')):
                #unzip all the goodies
                zipd=os.path.join(downloaddir,'zipfiles')
                with ZipFile(uri.url,'r') as zp:
                    zp.extractall(zipd)

                #now recursively zip the other zip files
                for zf in glob(zipd+'/*zip'):
                    slurplogger().info("Unzipping %s"%(zf))
                    with ZipFile(zf,'r') as zp:
                        zp.extractall(os.path.join(downloaddir,'extract'))
                #remove zipfiles after extracting
                shutil.rmtree(zipd)

                #Patches one csv file which contains a . instead of a , at one point
                #download patch from github
                pf='04_rgi60_ArcticCanadaSouth_hypso.csv.patch'
                slurplogger().info("Patching csv file %s"%(pf) )
                httpget=http("https://raw.githubusercontent.com/strawpants/geoslurp/master/patches/"+pf)
                uri,pupd=httpget.download(os.path.join(downloaddir,'extract'),check=True)
                #apply patch
                subprocess.Popen(['patch','-i',pf],cwd=os.path.dirname(uri.url))
        else:
            slurplogger().info("RGI 6.0 already downloaded")


        return newestver,upd
コード例 #3
0
 def pull(self):
     """Download ascii file"""
     httpserv = http(os.path.join(
         'https://icesat4.gsfc.nasa.gov/cryo_data/drainage_divides/',
         self.fbase),
                     lastmod=datetime(2020, 1, 10))
     uri, upd = httpserv.download(self.cacheDir(), check=True, gzip=True)
コード例 #4
0
def pullFoG(downloaddir):
    """Pulls various GRDC datasets from the webdav folder"""
    fogSource=http("http://www.wgms.ch/downloads/DOI-WGMS-FoG-2018-06.zip")
    urif,upd=fogSource.download(downloaddir)
    if upd:
        with ZipFile(urif.url,'r') as zp:
                zp.extractall(downloaddir)
コード例 #5
0
ファイル: deg1n2.py プロジェクト: danilecug/geoslurp
 def download(self):
     if self.uri.startswith('http'):
         uri=http(self.uri,lastmod=self.meta['lastupdate'])
     elif self.uri.startswith('ftp'):
         uri=ftp(self.uri)
         uri.updateModTime()
     uri.download(self.direc,check=True,outfile=self.fout)
コード例 #6
0
ファイル: Hydrosheds.py プロジェクト: strawpants/geoslurp
def pullHydro(hytype, downloaddir):
    # see https://www.dropbox.com/sh/hmpwobbz9qixxpe/AAAI_jasMJPZl_6wX6d3vEOla for the 'root' of the hydrsheds data
    hysource = {
        "hybas_af":
        "https://www.dropbox.com/sh/hmpwobbz9qixxpe/AADoPLdVZNd2JG-KaJNY0zT1a/HydroBASINS/standard/af/hybas_af_lev01-06_v1c.zip",
        "hybas_eu":
        "https://www.dropbox.com/sh/hmpwobbz9qixxpe/AABz1Pym5esD6GUJcnzaaqpEa/HydroBASINS/standard/eu/hybas_eu_lev01-06_v1c.zip",
        "af_riv_30s":
        "https://www.dropbox.com/sh/hmpwobbz9qixxpe/AAC9imuUajl_1bS0tKWqPE8Ya/HydroSHEDS_RIV/RIV_30s/af_riv_30s.zip",
        "eu_riv_30s":
        "https://www.dropbox.com/sh/hmpwobbz9qixxpe/AAD68vqkhRNJd5qK3NVvM7TSa/HydroSHEDS_RIV/RIV_30s/eu_riv_30s.zip"
    }
    httpserv = http(hysource[hytype], lastmod=datetime(2021, 2, 8))
    #Newest version which is supported by this plugin
    uri, upd = httpserv.download(downloaddir, check=True)
    if upd:
        #unzip all the goodies
        zipd = os.path.join(downloaddir, 'extract')
        with ZipFile(uri.url, 'r') as zp:
            zp.extractall(zipd)
    else:
        slurplogger().info(
            "This component of hydrosheds is already downloaded")

    return upd
コード例 #7
0
def pullFoG(downloaddir):
    """Pulls a zip archive of the WGMS data"""
    fogSource = http("http://www.wgms.ch/downloads/DOI-WGMS-FoG-2018-06.zip")
    urif, upd = fogSource.download(downloaddir)
    if upd:
        with ZipFile(urif.url, 'r') as zp:
            zp.extractall(downloaddir)
コード例 #8
0
 def getSubTree(self, url):
     if self.token:
         #add the api token to the header
         headers = [f"Authorization: token {self.token}"]
     else:
         headers = None
     return json.loads(http(url, headers=headers).buffer().getvalue())
コード例 #9
0
ファイル: ArcticDEM.py プロジェクト: strawpants/geoslurp
 def pull(self, intersect=None):
     # download the entire mosaic domain in one tif
     if self.res in ['1km', '500m', '100m']:
         rasteruri = http(
             "http://data.pgc.umn.edu/elev/dem/setsm/ArcticDEM/mosaic/v3.0/"
             + self.res + "/" + self.rasterfile,
             lastmod=datetime(2018, 9, 26))
         rasterfileuri, upd = rasteruri.download(self.srcdir, check=False)
コード例 #10
0
    def pull(self):
        """Pulls the google kml files from the copernicus server"""
        rooturl = 'https://sentinel.esa.int/documents/247904/685098/Sentinel-3-Absolute-Ground-Tracks.zip'
        cache = self.cacheDir()
        httpserv = http(rooturl, lastmod=datetime(2021, 11, 29))
        uri, upd = httpserv.download(cache, check=True)

        if upd:
            with ZipFile(uri.url, 'r') as zp:
                zp.extractall(cache)
コード例 #11
0
    def pull(self):
        """Pulls the dataset from github and unpacks it in the cache directory"""
        #download the inventory file
        lastchanged = datetime(2021, 11, 5)
        inventory = "https://github.com/strawpants/GRACE-filter/raw/master/inventory.xlsx"
        uri, upd = http(inventory,
                        lastmod=lastchanged).download(self.cacheDir(),
                                                      check=True)
        pdinvent = pd.read_excel(uri.url, engine="openpyxl")
        #download all the files
        ddir = self.dataDir()
        for idx, row in pdinvent.iterrows():
            ffile, upd = http(row.uri,
                              lastmod=lastchanged).download(ddir, check=True)
            #update file with newly downloaded file
            pdinvent.at[idx, 'uri'] = self.conf.generalize_path(ffile.url)

        #write updated excel file
        pdinvent.to_csv(os.path.join(self.pdfile))
コード例 #12
0
    def loadCoordinates(self):
        """Download the time, lon and lat coordinates and load these coordinates from the netdf file"""
        if self.dscoords is not None:
            return

        auth = self.conf.authCred("podaac")
        cache = self.cacheDir()
        coordfile = os.path.join(cache, "Coordinates.nc")
        opendapcoordinates = self.rooturl + "?time[0:1:255],lat[0:1:1799],lon[0:1:3599]"

        http(opendapcoordinates,
             cookiefile=self.cookiefile,
             auth=auth,
             lastmod=datetime(2022, 7,
                              1)).download(cache,
                                           outfile=os.path.basename(coordfile),
                                           check=True)

        self.dscoords = xr.open_dataset(coordfile, decode_times=True)
コード例 #13
0
 def pull(self):
     """Download acsii files in zip and unpack ascii data"""
     httpserv = http(
         'https://github.com/AustralianAntarcticDivision/orsifronts/raw/master/data-raw/fronts.zip',
         lastmod=datetime(2018, 1, 1))
     uri, upd = httpserv.download(self.cacheDir(), check=True)
     if upd:
         #unpack zip
         with ZipFile(uri.url, 'r') as zp:
             zp.extractall(self.cacheDir())
コード例 #14
0
    def pull(self):
        """Pulls the shapefile from the aviso server"""
        httpserv = http(os.path.join(
            'https://www.aviso.altimetry.fr/fileadmin/documents/missions/Swot/',
            self.zipf),
                        lastmod=datetime(2019, 9, 6))
        cache = self.cacheDir()
        uri, upd = httpserv.download(cache, check=True)

        if upd:
            with ZipFile(os.path.join(cache, self.zipf), 'r') as zp:
                zp.extractall(cache)
コード例 #15
0
ファイル: ArcticDEM.py プロジェクト: strawpants/geoslurp
    def pull(self):
        """Pulls the shapefile layers from the server"""
        zipf = http(
            "http://data.pgc.umn.edu/elev/dem/setsm/ArcticDEM/indexes/" +
            self.filebase + ".zip",
            lastmod=datetime(2018, 9, 26))

        #download the zip shapefiles
        downloaddir = self.cacheDir()
        uri, upd = zipf.download(downloaddir, check=True)
        zipd = os.path.join(downloaddir, 'extract')
        if not os.path.exists(zipd):
            #unzip the goodies
            with ZipFile(uri.url, 'r') as zp:
                slurplogger().info("Unzipping %s" % (uri.url))
                zp.extractall(zipd)
コード例 #16
0
ファイル: wribasin.py プロジェクト: whigg/geoslurp
    def pull(self):
        """Pulls the wribasin data from the internet and unpacks it in the cache directory"""
        #wrisource=http("http://www.fao.org/geonetwork/srv/en/resources.get")
        wrisource = http(
            "http://www.fao.org/geonetwork/srv/en/resources.get?id=30914&fname=wri_basins.zip&access=private"
        )

        # urif, upd=wrisource.download(self.cacheDir(),check=True,outfile=os.path.join(self.cacheDir(),"wri_basin.zip"),postdic={"id":"30914","fname":"wri_basins.zip","access":"private"})
        urif, upd = wrisource.download(self.cacheDir(),
                                       check=True,
                                       outfile=os.path.join(
                                           self.cacheDir(), "wri_basin.zip"))

        if upd:
            with ZipFile(urif.url, 'r') as zp:
                zp.extractall(self.cacheDir())
コード例 #17
0
ファイル: deg1n2.py プロジェクト: strawpants/geoslurp
 def pull(self):
     """Pulls the geocenter ascii files in the cache"""
     
     uri=http("https://wobbly.earth/data/"+self.fout,lastmod=datetime(2018,10,16)).download(self.cacheDir(),check=True)
コード例 #18
0
ファイル: geoshapes.py プロジェクト: strawpants/geoslurp
 def pull(self):
     """Pulls the geojson data from github and unpacks it in the cache directory"""
     uri = http(self.url)
     basename = os.path.basename(self.path)
     uri.download(direc=self.cacheDir(), outfile=basename)
コード例 #19
0
ファイル: deg1n2.py プロジェクト: strawpants/geoslurp
 def pull(self):
     """Pulls the geocenter ascii files in the cache"""
     uri=http("https://podaac-tools.jpl.nasa.gov/drive/files/allData/grace/docs/"+self.fout,lastmod=datetime(2019,12,1)).download(self.cacheDir(),check=True)
コード例 #20
0
ファイル: deg1n2.py プロジェクト: strawpants/geoslurp
 def pull(self):
     """Pulls the geocenter ascii files in the cache"""
     
     uri=http("http://download.csr.utexas.edu/pub/slr/geocenter/"+self.fout30).download(self.cacheDir())
     uri=http("http://download.csr.utexas.edu/pub/slr/geocenter/"+self.fout60).download(self.cacheDir())
コード例 #21
0
 def getCatalog(url, auth=None):
     """Retrieve a catalogue"""
     slurplogger().info("getting Thredds catalog: %s" % (url))
     buf = http(url, auth=auth).buffer()
     return XMLTree.fromstring(buf.getvalue())
コード例 #22
0
ファイル: AWIPIES.py プロジェクト: whigg/geoslurp
 def pull(self):
     """Pulls the OBP matlab file from the cloud"""
     cred=self.conf.authCred("awipies",['url','user','passw'])
     obpsource=http(cred.url,auth=cred)
     obpsource.download(self.dataDir(),outfile=self.obpfile)
コード例 #23
0
ファイル: icgem.py プロジェクト: danilecug/geoslurp
 def __init__(self):
     super().__init__(url="http://icgem.gfz-potsdam.de/tom_longtime")
     buf=http(self.rooturl).buffer()
     self._roothtml=HTMLtree(buf.getvalue())
コード例 #24
0
ファイル: github.py プロジェクト: whigg/geoslurp
 def getSubTree(self, url):
     if self.token:
         #add the api token to the end
         url += "?access_token=%s" % (self.token)
     return json.loads(http(url).buffer().getvalue())
コード例 #25
0
ファイル: github.py プロジェクト: whigg/geoslurp
 def uris(self, depth=10):
     """Construct Uris from tree nodes"""
     for item in self.treeitems(depth=depth):
         yield http(item["url"])
コード例 #26
0
ファイル: PSMSL.py プロジェクト: strawpants/geoslurp
 def pull(self):
     http(self.url).download(self.cacheDir())
     zpf=os.path.join(self.cacheDir(),os.path.basename(self.url))
     #unzip file
     with ZipFile(zpf,'r') as zp:
         zp.extractall(self.cacheDir())
コード例 #27
0
 def pull(self):
     upsrc = http(
         "https://confluence.ecmwf.int/download/attachments/143039724/upArea.nc",
         lastmod=datetime(2021, 11, 17))
     #download to cache only (will be in db raster)
     urif, upd = upsrc.download(self.srcdir, check=True)