def insertAMTCruiseTraj(): server = 'Rainier' tableName = 'tblCruise_Trajectory' usecols = ['Cruise_name', 'time', 'lat', 'lon'] rawFilePath = cfgv.rep_AMT_cruises_raw + 'amt/' rawFileName = 'master_AMT.csv' path = rawFilePath + rawFileName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' os.chdir(rawFilePath) df = pd.read_csv(rawFilePath + rawFileName, sep=',', usecols=usecols) for Cruise_name in df['Cruise_name'].unique(): export_path = '%s%s.csv' % (exportBase, Cruise_name) print(Cruise_name) cruise_df = df[df['Cruise_name'] == Cruise_name] #selects only df of cruise Cruise_ID = iF.findID_CRUISE(Cruise_name[0:3] + Cruise_name[-2:]) cruise_df['Cruise_ID'] = Cruise_ID cruise_df = ip.removeMissings(['time', 'lat', 'lon'], cruise_df) cruise_df = ip.convertYYYYMMDD(cruise_df) cruise_df = ip.NaNtoNone(cruise_df) cruise_df = ip.colDatatypes(cruise_df) cruise_df = ip.convertYYYYMMDD(cruise_df) cruise_df = ip.removeDuplicates(cruise_df) cruise_df = cruise_df[['Cruise_ID', 'time', 'lat', 'lon']] cruise_df.to_csv(export_path, index=False) ip.sortByTimeLatLon(cruise_df, export_path, 'time', 'lat', 'lon') print('export path: ', Cruise_name + export_path) iF.toSQLbcp(export_path, tableName, server)
def insertSeaFlowCruiseTraj(): server = 'Rainier' tableName = 'tblCruise_Trajectory' rawFilePath = cfgv.rep_allSeaFlowCruises_raw os.chdir(rawFilePath) sfl_cruise_list = glob.glob('*.sfl*') usecols_sfl = ['DATE', 'LAT', 'LON'] for cruise in sfl_cruise_list: prefix = cruise[:-8] + '_traj' rawFileName = cruise path = rawFilePath + rawFileName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) print(cruise) Cruise_ID = iF.findID_CRUISE(cruise[:-8]) df = pd.read_csv(cruise, sep='\t', usecols=usecols_sfl) df['DATE'] = pd.to_datetime(df['DATE'], format='%Y-%m-%dT%H:%M:%S') df['Cruise_ID'] = Cruise_ID df.rename(columns={ 'DATE': 'time', 'LAT': 'lat', 'LON': 'lon' }, inplace=True) df = df[['Cruise_ID', 'time', 'lat', 'lon']] df = ip.removeMissings(['time', 'lat', 'lon'], df) df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.convertYYYYMMDD(df) df = ip.removeDuplicates(df) df.to_csv(export_path, index=False) ip.sortByTimeLatLon(df, export_path, 'time', 'lat', 'lon') print('export path: ', export_path) # print(export_path,tableName) iF.toSQLbcp(export_path, tableName, server)
def makeHL2A_diel_metagenomics(rawFilePath, rawFileName, tableName): path = rawFilePath + rawFileName prefix = tableName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) df = pd.read_excel(path, sep=',',sheet_name='data', usecols=usecols) df = ip.removeMissings(['time','lat', 'lon','depth'], df) df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.addIDcol(df) df = ip.removeDuplicates(df) df.to_csv(export_path, index=False) ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth') print('export path: ' ,export_path) return export_path
def makeMesoscope_km1709(rawFilePath, rawFileName, tableName): path = rawFilePath + rawFileName prefix = tableName df = pd.read_excel(path, 'data') df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df) df = ip.colDatatypes(df) df['time'] = pd.to_datetime(df['time'], format='%Y-%m-%d') df['ID'] = None df = ip.removeDuplicates(df) exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) df.to_csv(export_path, index=False) ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth') df.to_csv(export_path, index=False) print('export path: ', export_path) return export_path
def makeSingleCellGenomes_Chisholm(rawFilePath, rawFileName, tableName): path = rawFilePath + rawFileName prefix = tableName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) df = pd.read_excel(path, 'data') df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df) df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.convertYYYYMMDD(df) df = ip.addIDcol(df) df = ip.removeDuplicates(df) df.to_csv(export_path, index=False) ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth') df.to_csv(export_path, index=False) print('export path: ', export_path) return export_path
def makeFlombaum(rawFilePath, rawFileName, tableName): path = rawFilePath + rawFileName prefix = tableName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) df = pd.read_excel(path, sep=',', sheet_name='data') df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df) df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.convertYYYYMMDD(df) df = ip.addIDcol(df) df = ip.removeDuplicates(df) df['lon'] = df['lon'].abs() df.to_csv(export_path, index=False) ip.mapTo180180(export_path, 'lon') ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth') print('export path: ', export_path) return export_path
def insertSeaFlowCruiseSalinity(): server = 'Rainier' tableName = 'tblCruise_Salinity' rawFilePath = cfgv.rep_allSeaFlowCruises_raw os.chdir(rawFilePath) sfl_cruise_list = glob.glob('*.sfl*') usecols_sfl = ['DATE', 'LAT', 'LON', 'SALINITY'] for cruise in sfl_cruise_list: prefix = cruise[:-8] + '_temp' rawFileName = cruise path = rawFilePath + rawFileName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) print(cruise) Cruise_ID = iF.findID_CRUISE(cruise[:-8]) df = pd.read_csv(cruise, sep='\t', usecols=usecols_sfl) df['DATE'] = pd.to_datetime(df['DATE'], format='%Y-%m-%dT%H:%M:%S') df['DEPTH'] = 5.0 df['Cruise_ID'] = Cruise_ID df.rename(columns={ 'DATE': 'time', 'LAT': 'lat', 'LON': 'lon', 'DEPTH': 'depth', 'SALINITY': 'salinity' }, inplace=True) df = df[['Cruise_ID', 'time', 'lat', 'lon', 'depth', 'salinity']] df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df) df = df[pd.to_numeric(df['salinity'], errors='coerce').notnull()] df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.convertYYYYMMDD(df) df = ip.removeDuplicates(df) print(df.head()) if df.empty: print(cruise + ' had no salinity values. Not inserted into database') else: df.to_csv(export_path, index=False) ip.sortByTimeLatLon(df, export_path, 'time', 'lat', 'lon') print('export path: ', export_path) # print(export_path,tableName) iF.toSQLbcp(export_path, tableName, server)
def insertAMTCruiseTemperature(): server = 'Rainier' tableName = 'tblCruise_Temperature' usecols = ['Cruise_name', 'time', 'lat', 'lon', 'temp', 'temp_flag'] rawFilePath = cfgv.rep_AMT_cruises_raw + 'amt/' rawFileName = 'master_AMT.csv' path = rawFilePath + rawFileName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' os.chdir(rawFilePath) df = pd.read_csv(rawFilePath + rawFileName, sep=',', usecols=usecols) for Cruise_name in df['Cruise_name'].unique(): export_path = '%s%s%s.csv' % (exportBase, Cruise_name, tableName) print(Cruise_name) cruise_df = df[df['Cruise_name'] == Cruise_name] #selects only df of cruise Cruise_ID = iF.findID_CRUISE(Cruise_name[0:3] + Cruise_name[-2:]) cruise_df['Cruise_ID'] = Cruise_ID cruise_df = cruise_df[(cruise_df['temp_flag'] != 'N') & (cruise_df['temp_flag'] != 'S') & (cruise_df['temp_flag'] != 'M') & (cruise_df['temp_flag'] != 'L')] cruise_df = ip.removeMissings(['time', 'lat', 'lon'], cruise_df) cruise_df = ip.convertYYYYMMDD(cruise_df) cruise_df = ip.colDatatypes(cruise_df) cruise_df = ip.convertYYYYMMDD(cruise_df) cruise_df = ip.removeDuplicates(cruise_df) cruise_df = ip.renameCol(cruise_df, 'temp', 'temperature') cruise_df = cruise_df[[ 'Cruise_ID', 'time', 'lat', 'lon', 'temperature' ]] cruise_df = cruise_df.dropna(subset=['temperature']) cruise_df = ip.NaNtoNone(cruise_df) if cruise_df.empty: print(Cruise_name + ' had no temperature values. Not inserted into database') else: cruise_df.to_csv(export_path, index=False) ip.sortByTimeLatLon(cruise_df, export_path, 'time', 'lat', 'lon') print('export path: ', export_path) iF.toSQLbcp(export_path, tableName, server)
def makeGlobal_PicoPhytoPlankton(rawFilePath, rawFileName, tableName): path = rawFilePath + rawFileName prefix = tableName exportBase = cfgv.opedia_proj + 'db/dbInsert/export/' export_path = '%s%s.csv' % (exportBase, prefix) df = pd.read_excel(path, sep=',', sheet_name='data', usecols=usecols) df['year'] = df['year'].astype('str') df['month'] = ((df['month'].astype('str')).apply(lambda x: x.zfill(2))) df['day'] = ((df['day'].astype('str')).apply(lambda x: x.zfill(2))) print(len(df)) df = df[(df['day'] != '-9') & (df['day'] != '-1')] df['year'] = df['year'].replace('10', '2010') df['year'] = df['year'].replace('11', '2011') df['year'] = df['year'].replace('6', '2006') # df = df[(df['year'] != '10') & (df['year'] != '11')& (df['year'] != '6')] df['time'] = pd.to_datetime(df[['year', 'month', 'day']], format='%Y%m%d') ip.renameCol(df, 'Lat', 'lat') ip.renameCol(df, 'Long', 'lon') ip.renameCol(df, 'Depth', 'depth') ip.renameCol(df, 'PromL', 'prochlorococcus_abundance') ip.renameCol(df, 'SynmL', 'synechococcus_abundance') ip.renameCol(df, 'PEukmL', 'picoeukaryote_abundance') ip.renameCol(df, 'pico_abund', 'picophytoplankton_abundance') ip.renameCol(df, 'picophyto [ug C/L]', 'picophytoplankton_biomass') ip.removeColumn(['year', 'day', 'month'], df) df = ip.reorderCol(df, [ 'time', 'lat', 'lon', 'depth', 'prochlorococcus_abundance', 'synechococcus_abundance', 'picoeukaryote_abundance', 'picophytoplankton_abundance', 'picophytoplankton_biomass' ]) df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df) df = ip.NaNtoNone(df) df = ip.colDatatypes(df) df = ip.addIDcol(df) df = ip.removeDuplicates(df) df.to_csv(export_path, index=False) ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth') print('export path: ', export_path) return export_path