def SWARM_MLI_2D_0501(lmax=133): ''' SWARM_MLI_2D_0501 is a degree 133 magnetic field model of the Earth's lithsophere that is based largely on satellite data. Though this model is based largely on data from the SWARM mission, data from the CHAMP mission and some ground-based measurements are also used. Parameters ---------- lmax : int, optional The maximum spherical harmonic degree to return. Reference --------- Thébault, E., Vigneron, P., Maus, S., Chulliat, A., Sirol, O., Hulot, G. (2013). Swarm SCARF dedicated lithospheric field inversion chain. Earth, Planets and Space, 65, 7, doi:10.5047/eps.2013.07.008. ''' fname = _retrieve( url="ftp://swarm-diss.eo.esa.int/Level2longterm/MLI/SW_OPER_MLI_SHA_2D_00000000T000000_99999999T999999_0501.ZIP", # noqa: E501 known_hash="sha256:53b92d229ff9416c4cd5663975bdcb23f193f41e7212f2956685dae34dbc6f7f", # noqa: E501 downloader=_FTPDownloader(progressbar=True), processor=_Unzip(), path=_os_cache('pyshtools'), ) return _SHMagCoeffs.from_file(fname[0], format='dov', r0=6371.2e3, r0_index=None, lmax=lmax, header=True, header2=True, skip=3, file_units='nT', units='nT')
def Carbonatites(): ''' Load Carbonatite data from Humphreys-Williams and Zahirovic (2021) ''' fname = _retrieve( url="https://zenodo.org/record/5968095/files/1_CarbonatitesShapefile_WithAgeConstraints.zip?download=1", known_hash="md5:7f219044c7a1ea9d81fc3410b64b2876", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), processor=_Unzip(extract_dir='Carbonatites') ) gdf = _gpd.read_file('{:s}/Carbonatites/1_CarbonatitesShapefile_WithAgeConstraints/carbonatites_gplates.shp'.format(str(_os_cache('gprm')))) return gdf
def LargeIgneousProvinces(catalogue='Whittaker', load=True): ''' (Large) Igneous Province polygons included in GPlates sample data: - 'Whittaker' [default], from Whittaker et al (2015) - 'Johansson' from Johansson et al (2018) and also - 'UTIG' from the 2011 version of the UTIG LIP compilation ''' if catalogue in ['Whittaker', 'Johansson']: fnames = _retrieve( url="https://www.earthbyte.org/webdav/ftp/earthbyte/GPlates/SampleData_GPlates2.2/Individual/FeatureCollections/LargeIgneousProvinces_VolcanicProvinces.zip", known_hash="sha256:8f86ab86a12761f5534beaaeaddbed5b4e3e6d3d9b52b0c87ee9b15af2a797cd", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), processor=_Unzip(extract_dir='LIPs'), ) for fname in fnames: if _os.path.split(fname)[1] == 'License.txt': dirname = _os.path.split(fname)[0] if catalogue=='Whittaker': fname='{:s}/LargeIgneousProvinces_VolcanicProvinces/Whittaker_etal_2015_LargeIgneousProvinces/SHP/Whittaker_etal_2015_LIPs.shp'.format(dirname) elif catalogue=='Johansson': fname='{:s}/LargeIgneousProvinces_VolcanicProvinces/Johansson_etal_2018_VolcanicProvinces/SHP/Johansson_etal_2018_VolcanicProvinces_v2.shp'.format(dirname) elif catalogue=='UTIG': fname = _retrieve( url="http://www-udc.ig.utexas.edu/external/plates/data/LIPS/Data/LIPS.2011.gmt", known_hash="sha256:11cd037382c518ec0b54b93728fef5e476ec3d8d57e5c433a1ccf14420ee99dd", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), ) else: raise ValueError('Unknown catalogue {:s}'.format(catalogue)) if load: return _gpd.read_file(fname) else: return fname
def Geochem(usecols=None, return_column_names=False, remove_invalid_coordinates=True): ''' Load the geochemistry database of Gard et al (2019) doi: https://doi.org/10.5194/essd-11-1553-2019 Options: usecols: optionally define a list of columns to load (rather than the full table) [default=None] remove_invalid_coordinates: specify whether to remove rows from the table for which the latitude and/or longitide are invalid [default=True] return_column_names: instead of loading table into memory, return a list of column names ''' fname = _retrieve( url="https://zenodo.org/record/3359791/files/complete.zip", known_hash="md5:9b97b54887ee7184c6650c845b4e92d4", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), processor=_Unzip(extract_dir='GeochemDB'), )[0] if usecols: # TODO if 'Long and Lat fields are not included, the attempt to create a gdf will throw an error - fix this usecols = ['longitude' if x=='Longitude' else x for x in usecols] usecols = ['latitude' if x=='Latitude' else x for x in usecols] # TODO include some shorthands for the usecols, e.g. 'Major', 'REE', etc if return_column_names: return _pd.read_csv(fname, index_col=0, nrows=0, engine='python').columns.tolist() else: df = _pd.read_csv(fname, usecols=usecols, engine='python', encoding="ISO-8859-1") if remove_invalid_coordinates: df = df.dropna(subset=['longitude','latitude']) df.reset_index(inplace=True) df.rename(columns={'longitude':'Longitude', 'latitude':'Latitude'}, inplace=True) return _gpd.GeoDataFrame(df, geometry=_gpd.points_from_xy(df.Longitude, df.Latitude), crs=4326)
def fetch_Paleomap(resolution='01d'): """ PaleoDEM rasters from Scotese and Wright (2018) resolution can be '01d' (default) or '06m' """ if resolution == '01d': fnames = _retrieve( url= "https://zenodo.org/record/5460860/files/Scotese_Wright_2018_Maps_1-88_1degX1deg_PaleoDEMS_nc.zip?download=1", known_hash="md5:77147998623ab039d86ff3e0b5e40344", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), processor=_Unzip(extract_dir='Paleomap_01d'), ) dirname = '{:s}/Paleomap_01d/Scotese_Wright_2018_Maps_1-88_1degX1deg_PaleoDEMS_nc_v2'.format( fnames[0].split('Paleomap_01d')[0]) #dirname = '{:s}/Scotese_Wright_2018_Maps_1-88_1degX1deg_PaleoDEMS_nc_v2'.format(_os.path.split(fnames[0])[0]) # if downloading for first time, remove the unwanted cache files for file in _os.listdir(dirname): if file.endswith(".cache"): _os.remove('{:s}/{:s}'.format(dirname, file)) raster_dict = {} for file in _os.listdir(dirname): if file.endswith(".nc"): # Replace whitespace with underscore to help pygmt plotting if ' ' in file: _os.rename( '{:s}/{:s}'.format(dirname, file), '{:s}/{:s}'.format(dirname, file.replace(' ', '_'))) raster_dict[float( file.split('_')[-1][:-5])] = '{:s}/{:s}'.format( dirname, file.replace(' ', '_')) ordered_raster_dict = collections.OrderedDict( sorted(raster_dict.items())) return ordered_raster_dict elif resolution == '06m': fnames = _retrieve( url= "https://zenodo.org/record/5460860/files/Scotese_Wright_2018_Maps_1-88_6minX6min_PaleoDEMS_nc.zip?download=1", known_hash="md5:89eb50d8645707ab221b023078535bda", downloader=_HTTPDownloader(progressbar=True), path=_os_cache('gprm'), processor=_Unzip(extract_dir='Paleomap_06m'), ) dirname = '{:s}/Paleomap_06m/Scotese_Wright_2018_Maps_1-88_6minX6min_PaleoDEMS_nc'.format( fnames[0].split('Paleomap_06m')[0]) #dirname = '{:s}/Scotese_Wright_2018_Maps_1-88_6minX6min_PaleoDEMS_nc'.format(_os.path.split(fnames[0])[0]) raster_dict = {} for file in _os.listdir(dirname): if file.endswith(".nc"): # Replace whitespace with underscore to help pygmt plotting if ' ' in file: _os.rename( '{:s}/{:s}'.format(dirname, file), '{:s}/{:s}'.format(dirname, file.replace(' ', '_'))) raster_dict[float( file.split('_')[-1][:-5])] = '{:s}/{:s}'.format( dirname, file.replace(' ', '_')) ordered_raster_dict = collections.OrderedDict( sorted(raster_dict.items())) return ordered_raster_dict else: ValueError( 'Spacing for source grids must be either 01d (for 1 degree version) or 06m (for 6 minute version)' )