def makeKM1709_mesoscope(rawFilePath, rawFileName, tableName):
    path = rawFilePath + rawFileName
    prefix = tableName
    exportBase = cfgv.opedia_proj + 'db/dbInsert/export/'
    export_path = '%s%s.csv' % (exportBase, prefix)
    df = pd.read_excel(path, sep=',', sheet_name='data', usecols=usecols)
    ip.renameCol(df, 'Time', 'time')
    ip.renameCol(df, 'Latitude', 'lat')
    ip.renameCol(df, 'Longitude', 'lon')
    ip.renameCol(df, 'Depth', 'depth')
    df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df)
    df = ip.NaNtoNone(df)
    df = ip.colDatatypes(df)
    df = ip.addIDcol(df)
    df = ip.removeDuplicates(df)
    df.to_csv(export_path, index=False)
    ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth')
    print('export path: ', export_path)
    return export_path
def insertAMTCruiseTemperature():
    server = 'Rainier'
    tableName = 'tblCruise_Temperature'
    usecols = ['Cruise_name', 'time', 'lat', 'lon', 'temp', 'temp_flag']
    rawFilePath = cfgv.rep_AMT_cruises_raw + 'amt/'
    rawFileName = 'master_AMT.csv'
    path = rawFilePath + rawFileName
    exportBase = cfgv.opedia_proj + 'db/dbInsert/export/'
    os.chdir(rawFilePath)

    df = pd.read_csv(rawFilePath + rawFileName, sep=',', usecols=usecols)
    for Cruise_name in df['Cruise_name'].unique():
        export_path = '%s%s%s.csv' % (exportBase, Cruise_name, tableName)

        print(Cruise_name)

        cruise_df = df[df['Cruise_name'] ==
                       Cruise_name]  #selects only df of cruise
        Cruise_ID = iF.findID_CRUISE(Cruise_name[0:3] + Cruise_name[-2:])
        cruise_df['Cruise_ID'] = Cruise_ID
        cruise_df = cruise_df[(cruise_df['temp_flag'] != 'N')
                              & (cruise_df['temp_flag'] != 'S') &
                              (cruise_df['temp_flag'] != 'M') &
                              (cruise_df['temp_flag'] != 'L')]
        cruise_df = ip.removeMissings(['time', 'lat', 'lon'], cruise_df)
        cruise_df = ip.convertYYYYMMDD(cruise_df)
        cruise_df = ip.colDatatypes(cruise_df)
        cruise_df = ip.convertYYYYMMDD(cruise_df)
        cruise_df = ip.removeDuplicates(cruise_df)
        cruise_df = ip.renameCol(cruise_df, 'temp', 'temperature')
        cruise_df = cruise_df[[
            'Cruise_ID', 'time', 'lat', 'lon', 'temperature'
        ]]
        cruise_df = cruise_df.dropna(subset=['temperature'])
        cruise_df = ip.NaNtoNone(cruise_df)

        if cruise_df.empty:
            print(Cruise_name +
                  ' had no temperature values. Not inserted into database')
        else:
            cruise_df.to_csv(export_path, index=False)
            ip.sortByTimeLatLon(cruise_df, export_path, 'time', 'lat', 'lon')
            print('export path: ', export_path)
            iF.toSQLbcp(export_path, tableName, server)
def makeGLODAP(rawFilePath, rawFileName, tableName):
    path = rawFilePath + rawFileName
    prefix = tableName
    exportBase = cfgv.opedia_proj + 'db/dbInsert/export/'
    export_path = '%s%s.csv' % (exportBase, prefix)
    df = pd.read_csv(path, sep=',', usecols=usecols)

    df['year'] = df['year'].astype('int').astype(
        'str')  # removing ending zero, then str
    df['month'] = df['month'].astype('int').astype('str')
    df['day'] = df['day'].astype('int').astype('str')
    df['hour'] = df['hour'].astype('int').astype('str')
    df['minute'] = df['minute'].astype('int').astype('str')
    df['second'] = '0'
    #construct datetime
    df['time'] = pd.to_datetime(
        df[['year', 'month', 'day', 'hour', 'minute', 'second']],
        format='%Y%m%dT%H%M%S')

    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'longitude', 'lon')
    # renaming Variables
    ip.renameCol(df, 'theta', 'theta_potential_temperature')
    ip.renameCol(df, 'sigma0', 'sigma0_potential_density')
    ip.renameCol(df, 'sigma1', 'sigma1_potential_density_ref_1000_dbar')
    ip.renameCol(df, 'sigma2', 'sigma2_potential_density_ref_2000_dbar')
    ip.renameCol(df, 'sigma3', 'sigma3_potential_density_ref_3000_dbar')
    ip.renameCol(df, 'sigma4', 'sigma4_potential_density_ref_4000_dbar')

    ip.renameCol(df, 'gamma', 'gamma_neutral_density')
    ip.renameCol(df, 'TAlk', 'TAlk_total_alkalinity')
    ip.renameCol(df, 'phts25p0', 'phts25p0_pH_25C_0dbar')
    ip.renameCol(df, 'phtsinsitutp', 'phtsinsitutp_pH_insitu')
    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'latitude', 'lat')
    ip.renameCol(df, 'latitude', 'lat')

    #import cruise data to ID file and do join
    expocodes = pd.read_csv(rawFilePath + rawFileName_expocodes,
                            sep='\t',
                            names=['cruise_ID', 'expocode'])
    df = pd.merge(df, expocodes, left_on='cruise', right_on='cruise_ID')
    df = df.drop('cruise_ID', 1)
    ip.renameCol(df, 'expocode', 'cruise_expocode')

    df = ip.arrangeColumns(usecols_rearange, df)
    df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df)
    df = ip.NaNtoNone(df)
    df = ip.colDatatypes(df)
    df = ip.convertYYYYMMDD(df)
    df = ip.addIDcol(df)
    df.to_csv(export_path, index=False)
    ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth')
    print('export path: ', export_path)
    return export_path, df
rawFilePath = '/media/nrhagen/Drobo/OpediaVault/model/darwin_3day/'
netcdf_list = glob.glob(rawFilePath + '*.nc')
exportBase = cfgv.opedia_proj + 'db/dbInsert/export_temp/'
prefix = tableName
export_path = '%s%s.csv' % (exportBase, prefix)
############################
############################
path = sys.argv[1]

if os.path.isfile(exportBase + os.path.basename(path)[:-3] + '_DONE.txt'): #checks .txt 'catalog' file exists before reprocessing
    sys.exit(0)
else:
    xdf = xr.open_dataset(path)
    df = xdf.to_dataframe()
    df.reset_index(inplace=True) # converts netcdf dims to cols
    df = ip.renameCol(df, 'lat_c', 'lat')
    df = ip.renameCol(df, 'lon_c', 'lon')
    df = ip.renameCol(df, 'dep_c', 'depth')
    df = ip.convertcolDatatype(df,['FeT', 'PO4', 'DIN', 'SiO2', 'O2'])
    # df = ip.removeMissings(['time','lat', 'lon', 'depth'], df)
    df = ip.arrangeColumns(['time','lat', 'lon','depth', 'FeT', 'PO4', 'DIN', 'SiO2', 'O2'], df)
    df = ip.NaNtoNone(df)
    df = ip.addIDcol(df)
    df = ip.colDatatypes(df)
    df.sort_values(['time', 'lat', 'lon', 'depth'], ascending=[True, True, True, True], inplace=True)
    df.to_csv(exportBase + os.path.basename(path)[:-3] + '.csv', mode='a', chunksize=1000000, index=False)

    # writes .txt file to catalog which files processed
    file = open(exportBase + os.path.basename(path)[:-3] + '_DONE.txt', "w")
    file.close()
Example #5
0
def makeGlobal_PicoPhytoPlankton(rawFilePath, rawFileName, tableName):
    path = rawFilePath + rawFileName
    prefix = tableName
    exportBase = cfgv.opedia_proj + 'db/dbInsert/export/'
    export_path = '%s%s.csv' % (exportBase, prefix)
    df = pd.read_excel(path, sep=',', sheet_name='data', usecols=usecols)
    df['year'] = df['year'].astype('str')
    df['month'] = ((df['month'].astype('str')).apply(lambda x: x.zfill(2)))
    df['day'] = ((df['day'].astype('str')).apply(lambda x: x.zfill(2)))
    print(len(df))
    df = df[(df['day'] != '-9') & (df['day'] != '-1')]

    df['year'] = df['year'].replace('10', '2010')
    df['year'] = df['year'].replace('11', '2011')
    df['year'] = df['year'].replace('6', '2006')
    # df = df[(df['year'] != '10') & (df['year'] != '11')& (df['year'] != '6')]
    df['time'] = pd.to_datetime(df[['year', 'month', 'day']], format='%Y%m%d')
    ip.renameCol(df, 'Lat', 'lat')
    ip.renameCol(df, 'Long', 'lon')
    ip.renameCol(df, 'Depth', 'depth')
    ip.renameCol(df, 'PromL', 'prochlorococcus_abundance')
    ip.renameCol(df, 'SynmL', 'synechococcus_abundance')
    ip.renameCol(df, 'PEukmL', 'picoeukaryote_abundance')
    ip.renameCol(df, 'pico_abund', 'picophytoplankton_abundance')
    ip.renameCol(df, 'picophyto [ug C/L]', 'picophytoplankton_biomass')
    ip.removeColumn(['year', 'day', 'month'], df)
    df = ip.reorderCol(df, [
        'time', 'lat', 'lon', 'depth', 'prochlorococcus_abundance',
        'synechococcus_abundance', 'picoeukaryote_abundance',
        'picophytoplankton_abundance', 'picophytoplankton_biomass'
    ])
    df = ip.removeMissings(['time', 'lat', 'lon', 'depth'], df)
    df = ip.NaNtoNone(df)
    df = ip.colDatatypes(df)
    df = ip.addIDcol(df)
    df = ip.removeDuplicates(df)
    df.to_csv(export_path, index=False)
    ip.sortByTimeLatLonDepth(df, export_path, 'time', 'lat', 'lon', 'depth')
    print('export path: ', export_path)
    return export_path