current_dir = all_dirs[i] dir = direction[current_dir] conv = (np.int64(deg)+np.int64(min)/60.0+np.int64(sec)/3600.0) * dir conv = np.around(conv,5) conv_array=np.append(conv_array,conv) return conv_array met_lats = conversion(met_lats_degs,met_lats_mins,met_lats_secs,met_lats_dirs) met_lons = conversion(met_lons_degs,met_lons_mins,met_lons_secs,met_lons_dirs) #check site is not urban using anthrome map from 2000 anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_result,class_name = modules.anthrome_classify(anthload,met_lats.astype('float64'),met_lons.astype('float64')) del_list = np.where(class_result == 'invalid') del_list = del_list[0] alt_meta_refs = np.delete(met_refs,del_list) valid_refs = [x for x in valid_refs if x in alt_meta_refs] print 'n refs after class remove = ',len(valid_refs) #read files site at a time for ref_i in range(len(valid_refs)): site_ref = valid_refs[ref_i] print 'Current Ref is = ', valid_refs[ref_i] #find if sites have full valid range from start year and finishing in end year s_files = glob.glob('/work/home/db876/observations/surface/%s/EMEP/%s*'%(species,site_ref)) year_files = [file.replace("/work/home/db876/observations/surface/%s/EMEP/"%(species), "") for file in s_files]
def site_iter_process(valid_refs, c): # for each valid location process # limit obs data due for each site in valid_obs_site_names # for c in range(len(valid_refs)): all_lat = [] all_lon = [] all_alt = [] all_st = [] all_mm = [] site_ref = valid_refs[c] file_valid = True data_valid = True print site_ref file_res = data_resolutions[c] print file_res # read files for each valid site s_files = sorted( glob.glob("/work/home/db876/observations/surface/%s/GAW/%s**.%s**.dat" % (species, site_ref.lower(), file_res)) ) print s_files if file_res == "hr": site_files = sorted(s_files, key=lambda x: x.split(".hr")[1]) else: site_files = sorted(s_files) delete_inds = [] if file_res == "hr": # limit site files before and after year limit for i in range(len(site_files)): f = site_files[i] year = f.split(".hr")[1][:4] if int(year) < int(start_year): delete_inds.append(i) if int(year) > int(end_year): delete_inds.append(i) site_files = np.delete(site_files, delete_inds) print site_files site_file_len = len(site_files) s_count = 0 start_ind = 0 end_ind = 0 for f in site_files: print f read = np.loadtxt(f, dtype="S10,S5,f8", comments="C", usecols=(0, 1, 4), unpack=True) read = np.array(read) dates = read[0, :] times = read[1, :] conc = read[2, :] conc = np.array(conc) conc = conc.astype(float) # change all vals < 0 to np.NaN inv_test = conc < 0 conc[inv_test] = np.NaN start_ind = end_ind end_ind += len(conc) s_count += 1 units = [] mycsv = csv.reader(open(f)) row_count = 0 for row in mycsv: if row_count == 11: val = " ".join(row) lat = val.replace(" ", "") lat = lat[12:] lat = float(lat) all_lat.append(lat) # get lon if row_count == 12: val = " ".join(row) lon = val.replace(" ", "") lon = lon[13:] lon = float(lon) all_lon.append(lon) # get altitude if row_count == 13: val = " ".join(row) alt = val.replace(" ", "") alt = alt[12:] alt = float(alt) all_alt.append(alt) # get units if row_count == 20: val = " ".join(row) unit = val.replace(" ", "") unit = unit[19:] # get measurement method if row_count == 21: val = " ".join(row) mm = val.replace(" ", "") mm = mm[21:] all_mm.append(mm) # get sampling type if row_count == 22: val = " ".join(row) st = val.replace(" ", "") st = st[16:] all_st.append(st) if row_count == 23: val = " ".join(row) tz = val.replace(" ", "") tz = tz[12:] row_count += 1 # test if units are in ppb for each file - if not convert if (unit != "ppb") & (unit != "ppbv"): if (unit == "ug/m3") or (unit == "ugN/m3"): print "converting units, temp = 20degC" # calculate conversion factor from mg/m3 assuming 20 degC and 1 atm - default for GAW site O3 instruments # R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144 / mol_mass * (273.15 + 20) / (1013.25 / 10) conc = conv_fact * conc elif (unit == "ug/m3-20C") or (unit == "ugN/m3-20C"): print "converting units, temp = 20degC" # calculate conversion factor from mg/m3 assuming 20 degC and 1 atm - default for GAW site O3 instruments # R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144 / mol_mass * (273.15 + 20) / (1013.25 / 10) conc = conv_fact * conc elif (unit == "ug/m3-25C") or (unit == "ugN/m3-25C") or (unit == "ug/m3at25C"): print "converting units, temp = 25degC" # calculate conversion factor from mg/m3 assuming 25 degC and 1 atm # R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144 / mol_mass * (273.15 + 25) / (1013.25 / 10) conc = conv_fact * conc elif (unit == "mg/m3-20C") or (unit == "mgN/m3-20C"): print "converting units, temp = 25degC" # calculate conversion factor from mg/m3 assuming 25 degC and 1 atm # R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144 / mol_mass * (273.15 + 20) / (1013.25 / 10) conc = (conv_fact * conc) * 1e3 elif (unit == "mg/m3-25C") or (unit == "mgN/m3-25C"): print "converting units, temp = 25degC" # calculate conversion factor from mg/m3 assuming 25 degC and 1 atm # R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144 / mol_mass * (273.15 + 25) / (1013.25 / 10) conc = (conv_fact * conc) * 1e3 elif (unit == "ppm") or (unit == "ppmv"): conc = conc * 1.0e3 elif (unit == "ppt") or (unit == "pptv"): conc = conc / 1.0e3 else: print "Unknown Unit" print unit 1 + "a" break if tz != "UTC": if tz == "": if site_ref.lower() in ["plm"]: tz = -5 if site_ref.lower() in ["kos", "edm", "vdl", "nwr"]: tz = 0 if site_ref.lower() in [ "jfj", "kps", "rig", "pay", "glh", "cmn", "zep", "dig", "hhe", "ktb", "stp", "ivn", "jcz", "kam", "lzp", "snz", "zbl", "kmw", "don", "mhn", "nia", "roq", "spm", ]: tz = 1 if site_ref.lower() in ["rcv", "aht", "oul", "uto", "vir", "fdt", "sem", "stn"]: tz = 2 if site_ref.lower() in ["dak"]: tz = 3 if site_ref.lower() in ["shp"]: tz = 4 if site_ref.lower() in ["isk"]: tz = 5 if site_ref.lower() in ["hkg"]: tz = 8 if site_ref.lower() in ["cgo"]: tz = 10 else: tz = tz.replace("LocaltimeUTC", "") tz = tz.replace("OtherUTC", "") tz = tz.replace("Localtime", "") tz = tz.replace(":", ".") try: before, sep, after = tz.rpartiton(".") after = int(after) conv = (100.0 / 60) * after tz = before + sep + str(conv) except: 1 + 1 tz = float(tz) else: tz = 0 # check tz is whole number else skip site if (tz % 1) != 0: print "File Invalid, timezone is not a whole number." conc[:] = -99999 # process dates from date, time to days since start year dates = [s.replace("-", "") for s in dates] times = [s.replace(":", "") for s in times] if file_res == "hr": # some times go from 0100 to 2400, assume this is when sites report ave for hour previous. Thus all times should have hour minused for i in range(len(times)): if times[i] == "2400": current_date = dates[i] test = np.array(dates) == current_date indices = [i for i, x in enumerate(test) if x] for x in indices: current_time = times[x] if current_time == "2400": current_time = "0000" date_datetime = datetime.datetime( int(current_date[0:4]), int(current_date[4:6]), int(current_date[6:]), int(current_time[:2]), int(current_time[2:]), ) date_datetime = date_datetime - datetime.timedelta(hours=1) times[x] = date_datetime.strftime("%H%M") # adjust dates and times if tz is not equal to 0 if tz != 0: for i in range(len(dates)): # create datetime dt = datetime.datetime( int(dates[i][:4]), int(dates[i][4:6]), int(dates[i][6:]), int(times[i][:2]), int(times[i][2:]) ) if tz > 0: # print 'Old dt', dt dt = dt - datetime.timedelta(hours=int(tz)) # print 'New dt', dt elif tz < 0: # print 'Old dt', dt dt = dt + datetime.timedelta(hours=np.abs(int(tz))) # print 'New dt', dt dates[i] = dt.strftime("%Y%m%d") times[i] = dt.strftime("%H%M") data = [dates, times, conc] try: big_list = np.hstack((big_list, data)) except: big_list = np.array(data) if s_count == site_file_len: # make sure big list exists try: big_list except: data_valid = False if data_valid == True: # get dates and times date_con = big_list[0, :] time_con = big_list[1, :] # get vals vals = np.array(big_list[2, :]).astype(float) # delete big list del big_list # if dates outside what asked for exclude first_date_val = int("%s0101" % (start_year)) last_date_val = int("%s1231" % (end_year)) test_valid = (np.array(date_con).astype(int) >= first_date_val) & ( np.array(date_con).astype(int) <= last_date_val ) date_con = date_con[test_valid] time_con = time_con[test_valid] vals = vals[test_valid] # Check if any times are duplicate, if so delete all but first del_list = [] for d in range(len(date_con) - 1): if (date_con[d] == date_con[d + 1]) & (time_con[d] == time_con[d + 1]): del_list.append(d + 1) if len(del_list) > 0: print "Deleting duplicate timepoints" print date_con[del_list], time_con[del_list] date_con = np.delete(date_con, del_list) time_con = np.delete(time_con, del_list) vals = np.delete(vals, del_list) # if file resolution is daily or monthly then replicate times after point, to fill hourly data array. count = 0 if file_res == "da": file_hours = len(date_con) for i in range(file_hours): current_hh = int(time_con[count][:2]) current_mm = int(time_con[count][2:]) s = datetime.datetime(year=start_year, month=1, day=1, hour=current_hh, minute=current_mm) e = datetime.datetime(year=start_year, month=1, day=2, hour=current_hh, minute=current_mm) day_hours = [d.strftime("%H%M") for d in pd.date_range(s, e, freq="H")][1:-1] date_con = np.insert(date_con, count + 1, [date_con[count]] * 23) time_con = np.insert(time_con, count + 1, day_hours) vals = np.insert(vals, count + 1, [vals[count]] * 23) count += 24 if file_res == "mo": file_hours = len(date_con) for i in range(file_hours): current_year = int(date_con[count][:4]) current_month = int(date_con[count][4:6]) next_month = current_month + 1 if next_month > 12: next_month = 1 next_year = current_year + 1 else: next_year = current_year s = datetime.datetime(year=current_year, month=current_month, day=1, hour=1, minute=0) e = datetime.datetime(year=next_year, month=next_month, day=1, hour=0, minute=0) day_date = [d.strftime("%Y%m%d") for d in pd.date_range(s, e, freq="H")][:-1] day_hour = [d.strftime("%H%M") for d in pd.date_range(s, e, freq="H")][:-1] date_con = np.insert(date_con, count + 1, day_date) time_con = np.insert(time_con, count + 1, day_hour) vals = np.insert(vals, count + 1, [vals[count]] * len(day_date)) count += len(day_date) + 1 date_con = np.array(date_con).astype(int) time_con = np.array(time_con).astype(int) # create max possible o3 grid o3_data = np.empty(n_hours) o3_data[:] = -99999 # delete dates,times and var outside date range val_test = (date_con >= int(output_res_dates_strings[0])) & ( date_con <= int(output_res_dates_strings[-1]) ) date_con = date_con[val_test] time_con = time_con[val_test] vals = vals[val_test] print date_con # find matching times between actual times and grid of times, return big array of indices of matched indices in grid converted_time = modules.date_process(date_con, time_con, start_year) converted_time = np.round(converted_time, decimals=5) syn_grid_time = np.arange(0, n_days, 1.0 / 24) syn_grid_time = np.round(syn_grid_time, decimals=5) # find matching times between actual times and grid of times, return big array of indices of matched indices in grid indices = np.searchsorted(syn_grid_time, converted_time, side="left") o3_data[indices] = vals # convert all Nans back to -99999 test = np.isnan(o3_data) o3_data[test] = -99999 # get mode of metadata lat = np.float64(stats.mode(all_lat)[0][0]) lon = np.float64(stats.mode(all_lon)[0][0]) alt = np.float64(stats.mode(all_alt)[0][0]) st = stats.mode(all_st)[0][0] mm = stats.mode(all_mm)[0][0] # check site is not urban using anthrome map from 2000 anthfile = "/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc" anthload = Dataset(anthfile) class_valid, anthrome_class_name = modules.anthrome_classify(anthload, [lat], [lon]) if class_valid == "invalid": data_valid = False print "Site Invalid, site classed as urban by anthrome map." # get measurement type and sampling type (take mode from collected list) if (st == "continuous") or ( st == "continuous(carbondioxide),remotespectroscopicmethod(methaneandsurfaceozone)" ): st = "average" elif st == "flask": st = "flask" elif st == "filter": st = "filter" else: print st 1 + "a" if mm == "Lightabsorptionanalysis(UV)": mm = "ultraviolet photometry" elif mm == "CavityRingdownSpectroscopy": mm = "cavity ringdown spectroscopy" elif mm == "NDIR": site_mm = "non-dispersive infrared spectroscopy" elif mm == "GasChromatography(FID)": site_mm = "gas chromatography flame ionisation detection" elif mm == "Gas Chromatography (RGD)": site_mm = "gas chromatography reduction gas detection" elif mm == "Chemiluminescence": mm = "chemiluminescence" elif (mm == "Spectrophotometry") or ( mm == "spectrophotometry,naphthyl-ethylenediaminedihydrochloridemethod" ): mm = "spectrophotometry" elif mm == "": if species == "O3": mm = "ultraviolet photometry" if species == "CO": mm = "non-dispersive infrared spectroscopy" if species == "NO2": mm = "chemiluminescence" if species == "NO": mm = "chemiluminescence" if species == "ISOP": mm = "gas chromatography flame ionisation detection" # do data quality checks full_data, data_valid = modules.quality_check( o3_data, data_valid, data_resolution, alt, grid_dates, start_year, end_year ) # convert file res to standard format if file_res == "hr": file_res = "H" elif file_res == "da": file_res = "D" elif file_res == "mo": file_res = "M" # no raw class so set as na raw_class_name = "na" return c, full_data, data_valid, lat, lon, alt, raw_class_name, anthrome_class_name, mm, st, file_res
np.int64(sec) / 3600.0) * dir conv = np.around(conv, 5) conv_array = np.append(conv_array, conv) return conv_array met_lats = conversion(met_lats_degs, met_lats_mins, met_lats_secs, met_lats_dirs) met_lons = conversion(met_lons_degs, met_lons_mins, met_lons_secs, met_lons_dirs) #check site is not urban using anthrome map from 2000 anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_result, class_name = modules.anthrome_classify( anthload, met_lats.astype('float64'), met_lons.astype('float64')) del_list = np.where(class_result == 'invalid') del_list = del_list[0] alt_meta_refs = np.delete(met_refs, del_list) valid_refs = [x for x in valid_refs if x in alt_meta_refs] print 'n refs after class remove = ', len(valid_refs) #read files site at a time for ref_i in range(len(valid_refs)): site_ref = valid_refs[ref_i] print 'Current Ref is = ', valid_refs[ref_i] #find if sites have full valid range from start year and finishing in end year s_files = glob.glob('/work/home/db876/observations/surface/%s/EMEP/%s*' % (species, site_ref))
def site_iter_process(valid_refs,c): #for ref_i in range(len(valid_refs)): data_valid = True site_ref = valid_refs[c] print 'Current Ref is = ', site_ref s_files = glob.glob('/work/home/db876/observations/surface/%s/CAPMON/ozon_smpls_%s*'%(species,site_ref)) site_files = [] for y in year_array: for f in s_files: if str(y) in f: site_files.append(f) site_files = modules.natsorted(site_files) yymmdd = [] hhmm = [] vals = [] #create max possible o3 grid full_data = np.empty(n_hours) full_data[:] = -99999 for file_i in range(len(site_files)): count = 0 meta_start = -99999 start_read_1 = False start_read_2 = False with open(site_files[file_i], 'rb') as f: reader = csv.reader(f,delimiter=',') print site_files[file_i] for row in reader: #print count #break out of loop at bottom of file if (start_read_2 == True) & (row[0] == '*TABLE ENDS'): break #get metadata try: if (row[0] =='*TABLE NAME') & (row[1] == 'Site information'): meta_start = count+2 except: pass if count == meta_start: lat_i = row.index('Latitude: decimal degrees') lon_i = row.index('Longitude: decimal degrees') try: alt_i = row.index('Ground elevation: above mean sea level') except: alt_i = row.index('Ground altitude') class_i = row.index('Site land use') if count == (meta_start+6): latitude = row[lat_i] longitude = row[lon_i] altitude = row[alt_i] raw_class_name = row[class_i] #get data if start_read_2 == True: #read dates, times, and vals date = row[8] time = row[9] yymmdd.append(date[:4]+date[5:7] + date[8:]) hhmm.append(time[:2]+time[3:]) quality_code = row[13] if quality_code == 'V0': vals = np.append(vals,np.float64(row[12])) else: vals = np.append(vals,-99999) try: if (row[0] == '*TABLE NAME') & (row[1] == 'OZONE_HOURLY'): start_read_1 = True except: pass if (start_read_1 == True) & (row[0] == '*TABLE COLUMN UNITS'): unit = row[12] if (start_read_1 == True) & (row[0] == '*TABLE BEGINS'): start_read_2 = True count+=1 #convert all invalids to -99999 test_inv = vals < 0 vals[test_inv] = -99999 #put o3 vals into full grid date_con = np.array(yymmdd).astype(int) time_con = np.array(hhmm).astype(int) #find matching times between actual times and grid of times, return big array of indices of matched indices in grid converted_time = modules.date_process(date_con,time_con,start_year) converted_time = np.round(converted_time,decimals=5) syn_grid_time = np.arange(0,n_days,1./24) syn_grid_time = np.round(syn_grid_time,decimals=5) #find matching times between actual times and grid of times, return big array of indices of matched indices in grid indices = np.searchsorted(syn_grid_time, converted_time, side='left') vals = np.array(vals) #make sure no data is past end year index_test = indices < len(full_data) indices = indices[index_test] vals = vals[index_test] full_data[indices] = vals #get metadata lat = np.float64(latitude) lon = np.float64(longitude) alt = np.float64(altitude) #check site is valid by class if ('Urban' in raw_class_name) or ('urban' in raw_class_name): data_valid=False print 'Data is invalid. Raw Class is Urban.' #check site is not urban using anthrome map from 2000 anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_result,anthrome_class_name = modules.anthrome_classify(anthload,[lat],[lon]) if class_result == 'invalid': data_valid = False print 'Site Invalid, site classed as urban by anthrome map.' #do data quality checks full_data,data_valid,data_complete = modules.quality_check_periodic(full_data,data_valid,data_resolution,np.float64(altitude),grid_dates,start_year,end_year) #set measurement method mm = 'ultraviolet photometry' #set site file resolution file_res = 'H' #set sampling as average st = 'average' return c,full_data,data_valid,lat,lon,alt,raw_class_name,anthrome_class_name,mm,st,file_res,data_complete
def site_iter_process(valid_refs,c): #read files site at a time #for ref_i in range(len(valid_refs)): site_ref = valid_refs[c] all_latitudes = [] all_longitudes = [] all_altitudes = [] all_mm = [] print 'Current Ref is = ', site_ref #find if sites have full valid range from start year and finishing in end year s_files = glob.glob('/work/home/db876/observations/surface/%s/EMEP/%s*'%(species,site_ref)) year_files = [file.replace("/work/home/db876/observations/surface/%s/EMEP/"%(species), "") for file in s_files] cut_year_files = [file[8:12] for file in year_files] site_files = [] for y in year_array: for i in range(len(s_files)): if str(y) in cut_year_files[i]: site_files.append(s_files[i]) site_files = modules.natsorted(site_files) year_files = modules.natsorted(year_files) file_startdate = [] file_height = [] instr_names = [] file_lasttime = [] data_valid = True yyyymmdd = [] hhmm = [] vals = [] flags = [] #create max possible o3 grid full_data = np.empty(n_hours) full_data[:] = -99999 if site_files == []: print 'No valid files for site\n' return for y in year_array: print 'Processing Year %s'%y got_year = False for file in site_files: last_file_split = file.split('/')[-1] if str(y) in last_file_split[8:12]: got_year = True break if got_year == False: #fill in data for missing year timedelta_diff = datetime.date(y+1, 1, 1) - datetime.date(y, 1, 1) ndays_missing = timedelta_diff.days print 'ndays missing = ', ndays_missing continue if data_valid == True: data_start = 9999999 count = 0 start_read = False with open(file, 'rb') as f: read_count = 0 reader = csv.reader(f,delimiter=' ') print file for row in reader: try: row = filter(lambda a: a != '', row) except: pass try: row = filter(lambda a: a != ',', row) except: pass #get start date of file if row[0] == 'Startdate:': data = row[1] s_yyyy = data[:4] s_mm = data[4:6] s_dd = data[6:8] s_hh = data[8:10] s_min = data[10:12] start_datetime = datetime.datetime(int(s_yyyy),1,1,0,0) #get unit if row[0] == 'Unit:': try: unit_part1 = row[1] unit_part2 = row[2] unit = unit_part1+'_'+unit_part2 except: unit = row[1] #get resolution if row[0] == 'Resolution': if row[1] == 'code:': file_res = row[2] print 'Resolution = %s'%file_res if (output_res == 'H'): if (file_res == '1d') or (file_res == '1mo'): print 'File resolution has to be Minimum Hourly. Skipping' data_valid = False return c,full_data,data_valid,-999,-999,-999,'na','na','na','na','na' elif (output_res == 'D'): if (file_res == '1mo'): print 'File resolution has to be Minimum Daily. Skipping' data_valid = False return c,full_data,data_valid,-999,-999,-999,'na','na','na','na','na' #get latitude if row[0] == 'Station': if row[1] == 'latitude:': latitude = row[2] all_latitudes.append(latitude) #get longitude if row[0] == 'Station': if row[1] == 'longitude:': longitude = row[2] all_longitudes.append(longitude) #get altitude if row[0] == 'Station': if row[1] == 'altitude:': altitude = row[2][:-1] all_altitudes.append(altitude) #get period if row[0] == 'Period': period_code = row[2] #get stats method if row[0] == 'Statistics:': try: st = row[1] + row[2] if st != 'arithmeticmean': print 'Not Arithmetic Mean!' print row[1] print 1+'a' except: print 'Not Arithmetic Mean!' print row[1] print 1+'a' #get instrument method if row[0] == 'Instrument': if row[1] == 'type:': mm_list = row[2:] if len(mm_list) > 1: site_mm = '' for x in range(len(mm_list)): site_mm = site_mm+mm_list[x]+' ' site_mm = site_mm.strip() else: site_mm = mm_list[0] all_mm.append(site_mm) #get data if start_read == True: #calc dates, times, and take o3 vals time_since_start = np.float64(row[0]) days_since_start = math.trunc(time_since_start) remainder = time_since_start - days_since_start unrounded_hour = remainder*24 hour = np.round(unrounded_hour) time_delta = datetime.timedelta(days = days_since_start,hours = hour) calc_datetime = start_datetime + time_delta calc_yyyymmdd = calc_datetime.strftime("%Y%m%d") calc_hhmm = calc_datetime.strftime("%H%M") line_val = np.float64(row[2]) #convert units by line (only if value is >= than 0 if line_val >= 0: if (unit.lower() != 'ppb') & (unit.lower() != 'ppbv'): if unit == 'ug/m3': #print 'converting units, temp = 20degC' #calculate conversion factor from mg/m3 assuming 20 degC and 1 atm - default for O3 instruments #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+20)/(1013.25/10) line_val = conv_fact*line_val #print 'Converting Units from ug/m3 20degC to ppbv' elif unit == 'ug_N/m3': conv_fact = 8.3144/mol_mass*(273.15+20)/(1013.25/10) line_val = conv_fact*line_val #print 'Converting Units from ug/Nm3 20degC to ppbv' elif (unit == 'ppm') or (unit == 'ppmv'): line_val = line_val*1e3 #print 'Converting Units from ppmv to ppbv' elif (unit == 'ppt') or (unit == 'pptv'): line_val = line_val/1e3 #print 'Converting Units from pptv to ppbv' else: print 'Unknown Unit' data_valid = False 1+'a' if file_res == '1h': yyyymmdd=np.append(yyyymmdd,calc_yyyymmdd) hhmm=np.append(hhmm,calc_hhmm) vals = np.append(vals,line_val) flags = np.append(flags,np.float64(row[3])) elif file_res == '1d': yyyymmdd=np.append(yyyymmdd,calc_yyyymmdd) hhmm=np.append(hhmm,'0000') vals = np.append(vals,line_val) flags = np.append(flags,np.float64(row[3])) for j in range(1,24): time_delta = datetime.timedelta(days = days_since_start,hours = j) calc_datetime = start_datetime + time_delta vals = np.append(vals,vals[-1]) flags = np.append(flags,flags[-1]) yyyymmdd = np.append(yyyymmdd,calc_datetime.strftime("%Y%m%d")) hhmm = np.append(hhmm,calc_datetime.strftime("%H%M")) elif file_res == '1mo': yyyymmdd=np.append(yyyymmdd,calc_yyyymmdd) hhmm=np.append(hhmm,'0000') vals = np.append(vals,line_val) flags = np.append(flags,np.float64(row[3])) month_days = monthrange(int(yyyymmdd[-1][:4]), int(yyyymmdd[-1][4:6]))[1] for j in range(1,24*month_days): time_delta = datetime.timedelta(days = days_since_start,hours = j) calc_datetime = start_datetime + time_delta vals = np.append(vals,vals[-1]) flags = np.append(flags,flags[-1]) yyyymmdd = np.append(yyyymmdd,calc_datetime.strftime("%Y%m%d")) hhmm = np.append(hhmm,calc_datetime.strftime("%H%M")) if row[0] == 'starttime': start_read = True count+=1 if (y == year_array[-1]): #convert all invalids by flags to -99999 test_inv = flags != 0 if len(test_inv) != 0: vals[test_inv] = -99999 #any values less than zero are -99999 test_inv = vals < 0 if len(test_inv) != 0: vals[test_inv] = -99999 #do additional invalid test, as flags not always correct #test_inv_2 = vals > 300 #vals[test_inv_2] = -99999 #put o3 vals into full grid date_con = np.array(yyyymmdd).astype(int) time_con = np.array(hhmm).astype(int) #find matching times between actual times and grid of times, return big array of indices of matched indices in grid converted_time = date_process(date_con,time_con,start_year) converted_time = np.round(converted_time,decimals=5) syn_grid_time = np.arange(0,n_days,1./24) syn_grid_time = np.round(syn_grid_time,decimals=5) #find matching times between actual times and grid of times, return big array of indices of matched indices in grid indices = np.searchsorted(syn_grid_time, converted_time, side='left') vals = np.array(vals) #make sure no data is past end year index_test = indices < len(full_data) indices = indices[index_test] vals = vals[index_test] full_data[indices] = vals #get mode of metadata lat = np.float64(stats.mode(all_latitudes)[0][0]) lon = np.float64(stats.mode(all_longitudes)[0][0]) alt = np.float64(stats.mode(all_altitudes)[0][0]) mm = stats.mode(all_mm)[0][0] #check site is not urban using anthrome map from 2000 anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_valid,anthrome_class_name = modules.anthrome_classify(anthload,[lat],[lon]) if class_valid == 'invalid': data_valid = False print 'Site Invalid, site classed as urban by anthrome map.' #get measurement method if (mm == 'uv_abs') or (mm == 'chemiluminesc') or (mm == 'uv_fluoresc'): if species == 'O3': mm = 'ultraviolet photometry' if (species == 'NO') or (species == 'NO2') or (species == 'CO'): mm = 'chemiluminescence' elif (mm == 'ndir') or (mm == 'infrared_absorption'): mm = 'non-dispersive infrared spectroscopy' elif (mm == 'GC-HgO'): mm = 'gas chromatography reduction gas detection' elif (mm == 'tracegas_monitor'): mm = 'cavity attenuated phase shift spectroscopy' elif (mm == 'filter_1pack') or (mm == 'filter_2pack') or (mm == 'filter_3pack'): if species == 'NO2': mm = 'griess saltzman colorimetric' elif species == 'CO': mm = 'ion chromatography' elif (mm == 'steel_canister'): mm = 'gas chromatography flame ionisation detection' elif (mm == 'online_gc'): mm = 'online gas chromatography' elif (mm == 'glass_sinter') or (mm == 'abs_solution') or (mm == 'filter_abs_solution') or (mm == 'abs_tube') or (mm == 'continuous_colorimetric'): mm = 'griess saltzman colorimetric' elif (mm == 'NaJ_solution'): mm = 'flame ionisation detection' elif (mm == 'doas'): mm = 'differential optical absorption spectrosocopy' elif (mm == 'diffusion_tube'): mm = 'diffusive sampler' elif (mm == 'NA') or (mm == ''): if species == 'O3': mm = 'ultraviolet photometry' if species == 'CO': mm = 'non-dispersive infrared spectroscopy' if species == 'NO2': mm = 'chemiluminescence' if species == 'NO': mm = 'chemiluminescence' if species == 'ISOP': mm = 'gas chromatography flame ionisation detection' else: print mm 1+'a' #do data quality checks full_data,data_valid = modules.quality_check(full_data,data_valid,data_resolution,alt,grid_dates,start_year,end_year) #convert file res to standard format if file_res == '1h': file_res = 'H' elif file_res == '1d': file_res = 'D' elif file_res == '1mo': file_res = 'M' #no raw class so set as na raw_class_name = 'na' #set sampling as average st = 'average' return c,full_data,data_valid,lat,lon,alt,raw_class_name,anthrome_class_name,mm,st,file_res
def site_iter_process(valid_refs,c): #for each valid location process #limit obs data due for each site in valid_obs_site_names #for c in range(len(valid_refs)): all_lat = [] all_lon = [] all_alt = [] all_st = [] all_mm = [] site_ref = valid_refs[c] file_valid = True data_valid = True print site_ref file_res = data_resolutions[c] print file_res #read files for each valid site s_files = sorted(glob.glob('/work/home/db876/observations/surface/%s/GAW/%s**.%s**.dat'%(species,site_ref.lower(),file_res))) print s_files if file_res == 'hr': site_files = sorted(s_files, key = lambda x: x.split(".hr")[1]) else: site_files = sorted(s_files) delete_inds = [] if file_res == 'hr': #limit site files before and after year limit for i in range(len(site_files)): f = site_files[i] year = f.split(".hr")[1][:4] if int(year) < int(start_year): delete_inds.append(i) if int(year) > int(end_year): delete_inds.append(i) site_files = np.delete(site_files,delete_inds) print site_files if len(site_files) == 0: print 'No valid files in date range. Skipping.' data_valid = False return c,[],data_valid,-999,-999,-999,'na','na','na','na','na',-999 site_file_len = len(site_files) s_count = 0 start_ind = 0 end_ind = 0 for f in site_files: print f read = np.loadtxt(f,dtype="S10,S5,f8",comments='C',usecols=(0,1,4),unpack =True) read = np.array(read) dates = read[0,:] times = read[1,:] conc = read[2,:] conc = np.array(conc) conc = conc.astype(float) #change all vals < 0 to np.NaN inv_test = conc < 0 conc[inv_test] = np.NaN start_ind = end_ind end_ind+=len(conc) s_count+=1 units = [] mycsv = csv.reader(open(f)) row_count = 0 for row in mycsv: if row_count == 11: val = " ".join(row) lat = val.replace(" ", "") lat = lat[12:] lat = float(lat) all_lat.append(lat) # get lon if row_count == 12: val = " ".join(row) lon = val.replace(" ", "") lon = lon[13:] lon = float(lon) all_lon.append(lon) # get altitude if row_count == 13: val = " ".join(row) alt = val.replace(" ", "") alt = alt[12:] alt = float(alt) all_alt.append(alt) # get units if row_count == 20: val = " ".join(row) unit = val.replace(" ", "") unit = unit[19:] # get measurement method if row_count == 21: val = " ".join(row) mm = val.replace(" ", "") mm = mm[21:] all_mm.append(mm) # get sampling type if row_count == 22: val = " ".join(row) st = val.replace(" ", "") st = st[16:] all_st.append(st) if row_count == 23: val = " ".join(row) tz = val.replace(" ", "") tz = tz[12:] row_count+=1 # test if units are in ppb for each file - if not convert if (unit != 'ppb') & (unit != 'ppbv'): if (unit == 'ug/m3') or (unit == 'ugN/m3'): print 'converting units, temp = 20degC' #calculate conversion factor from mg/m3 assuming 20 degC and 1 atm - default for GAW site O3 instruments #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+20)/(1013.25/10) conc = conv_fact*conc elif (unit == 'ug/m3-20C') or (unit == 'ugN/m3-20C'): print 'converting units, temp = 20degC' #calculate conversion factor from mg/m3 assuming 20 degC and 1 atm - default for GAW site O3 instruments #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+20)/(1013.25/10) conc = conv_fact*conc elif (unit == 'ug/m3-25C') or (unit == 'ugN/m3-25C') or (unit == 'ug/m3at25C'): print 'converting units, temp = 25degC' #calculate conversion factor from mg/m3 assuming 25 degC and 1 atm #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+25)/(1013.25/10) conc = conv_fact*conc elif (unit == 'mg/m3-20C') or (unit == 'mgN/m3-20C'): print 'converting units, temp = 25degC' #calculate conversion factor from mg/m3 assuming 25 degC and 1 atm #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+20)/(1013.25/10) conc = (conv_fact*conc)*1e3 elif (unit == 'mg/m3-25C') or (unit == 'mgN/m3-25C'): print 'converting units, temp = 25degC' #calculate conversion factor from mg/m3 assuming 25 degC and 1 atm #R/MW*(TEMP0C(K)*TEMP(degC)/P(hPa)/10 conv_fact = 8.3144/mol_mass*(273.15+25)/(1013.25/10) conc = (conv_fact*conc)*1e3 elif (unit == 'ppm') or (unit == 'ppmv'): conc = conc*1.e3 elif (unit == 'ppt') or (unit == 'pptv'): conc = conc/1.e3 else: print 'Unknown Unit' print unit 1+'a' break if tz != 'UTC': if tz == '': if site_ref.lower() in ['plm']: tz = -5 if site_ref.lower() in ['kos','edm','vdl','nwr']: tz = 0 if site_ref.lower() in ['jfj','kps','rig','pay','glh','cmn','zep','dig','hhe','ktb','stp','ivn','jcz','kam','lzp','snz','zbl','kmw','don','mhn','nia','roq','spm']: tz = 1 if site_ref.lower() in ['rcv','aht','oul','uto','vir','fdt','sem','stn']: tz = 2 if site_ref.lower() in ['dak']: tz = 3 if site_ref.lower() in ['shp']: tz = 4 if site_ref.lower() in ['isk']: tz = 5 if site_ref.lower() in ['hkg']: tz = 8 if site_ref.lower() in ['cgo']: tz = 10 else: tz = tz.replace('LocaltimeUTC', '') tz = tz.replace('OtherUTC', '') tz = tz.replace('Localtime', '') tz = tz.replace(':', '.') try: before, sep, after = tz.rpartiton('.') after = int(after) conv = (100./60) * after tz = before+sep+str(conv) except: 1+1 tz = float(tz) else: tz = 0 #check tz is whole number else skip site if (tz % 1) != 0: print 'File Invalid, timezone is not a whole number.' conc[:] = -99999 #process dates from date, time to days since start year dates = [s.replace('-', '') for s in dates] times = [s.replace(':', '') for s in times] if file_res == 'hr': #some times go from 0100 to 2400, assume this is when sites report ave for hour previous. Thus all times should have hour minused for i in range(len(times)): if times[i] == '2400': current_date = dates[i] test = np.array(dates) == current_date indices = [i for i, x in enumerate(test) if x] for x in indices: current_time = times[x] if current_time == '2400': current_time = '0000' date_datetime = datetime.datetime(int(current_date[0:4]),int(current_date[4:6]),int(current_date[6:]),int(current_time[:2]),int(current_time[2:])) date_datetime = date_datetime - datetime.timedelta(hours = 1) times[x] = date_datetime.strftime("%H%M") #adjust dates and times if tz is not equal to 0 if tz != 0: for i in range(len(dates)): #create datetime dt = datetime.datetime(int(dates[i][:4]),int(dates[i][4:6]),int(dates[i][6:]),int(times[i][:2]),int(times[i][2:])) if tz > 0: #print 'Old dt', dt dt = dt - datetime.timedelta(hours = int(tz)) #print 'New dt', dt elif tz < 0: #print 'Old dt', dt dt = dt + datetime.timedelta(hours = np.abs(int(tz))) #print 'New dt', dt dates[i] = dt.strftime("%Y%m%d") times[i] = dt.strftime("%H%M") data = [dates,times,conc] try: big_list = np.hstack((big_list,data)) except: big_list = np.array(data) if (s_count == site_file_len): #make sure big list exists try: big_list except: data_valid = False if data_valid == True: #get dates and times date_con = big_list[0,:] time_con = big_list[1,:] #get vals vals = np.array(big_list[2,:]).astype(float) #delete big list del big_list #if dates outside what asked for exclude first_date_val = int('%s0101'%(start_year)) last_date_val = int('%s1231'%(end_year)) test_valid = (np.array(date_con).astype(int) >= first_date_val) & (np.array(date_con).astype(int) <= last_date_val) date_con = date_con[test_valid] time_con = time_con[test_valid] vals = vals[test_valid] #Check if any times are duplicate, if so delete all but first del_list = [] for d in range(len(date_con)-1): if (date_con[d] == date_con[d+1]) & (time_con[d] == time_con[d+1]): del_list.append(d+1) if len(del_list) > 0: print 'Deleting duplicate timepoints' print date_con[del_list],time_con[del_list] date_con = np.delete(date_con,del_list) time_con = np.delete(time_con,del_list) vals = np.delete(vals,del_list) #if file resolution is daily or monthly then replicate times after point, to fill hourly data array. count=0 if file_res == 'da': file_hours = len(date_con) for i in range(file_hours): current_hh = int(time_con[count][:2]) current_mm = int(time_con[count][2:]) s = datetime.datetime(year = start_year, month = 1, day = 1, hour = current_hh, minute = current_mm) e = datetime.datetime(year = start_year, month = 1, day = 2, hour = current_hh, minute = current_mm) day_hours = [d.strftime('%H%M') for d in pd.date_range(s,e,freq='H')][1:-1] date_con = np.insert(date_con,count+1,[date_con[count]]*23) time_con = np.insert(time_con,count+1,day_hours) vals = np.insert(vals,count+1,[vals[count]]*23) count +=24 if file_res == 'mo': file_hours = len(date_con) for i in range(file_hours): current_year = int(date_con[count][:4]) current_month = int(date_con[count][4:6]) next_month = current_month+1 if next_month > 12: next_month = 1 next_year = current_year+1 else: next_year = current_year s = datetime.datetime(year = current_year, month = current_month, day = 1, hour = 1, minute = 0) e = datetime.datetime(year = next_year, month = next_month, day = 1, hour = 0, minute = 0) day_date = [d.strftime('%Y%m%d') for d in pd.date_range(s,e,freq='H')][:-1] day_hour = [d.strftime('%H%M') for d in pd.date_range(s,e,freq='H')][:-1] date_con = np.insert(date_con,count+1,day_date) time_con = np.insert(time_con,count+1,day_hour) vals = np.insert(vals,count+1,[vals[count]]*len(day_date)) count += (len(day_date)+1) date_con = np.array(date_con).astype(int) time_con = np.array(time_con).astype(int) #create max possible o3 grid o3_data = np.empty(n_hours) o3_data[:] = -99999 #delete dates,times and var outside date range val_test = (date_con >= int(output_res_dates_strings[0])) & (date_con <= int(output_res_dates_strings[-1])) date_con = date_con[val_test] time_con = time_con[val_test] vals = vals[val_test] print date_con #find matching times between actual times and grid of times, return big array of indices of matched indices in grid converted_time = modules.date_process(date_con,time_con,start_year) converted_time = np.round(converted_time,decimals=5) syn_grid_time = np.arange(0,n_days,1./24) syn_grid_time = np.round(syn_grid_time,decimals=5) #find matching times between actual times and grid of times, return big array of indices of matched indices in grid indices = np.searchsorted(syn_grid_time, converted_time, side='left') o3_data[indices] = vals #convert all Nans back to -99999 test = np.isnan(o3_data) o3_data[test] = -99999 #get mode of metadata lat = np.float64(stats.mode(all_lat)[0][0]) lon = np.float64(stats.mode(all_lon)[0][0]) alt = np.float64(stats.mode(all_alt)[0][0]) st = stats.mode(all_st)[0][0] mm = stats.mode(all_mm)[0][0] #check site is not urban using anthrome map from 2000 anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_valid,anthrome_class_name = modules.anthrome_classify(anthload,[lat],[lon]) if class_valid == 'invalid': data_valid = False print 'Site Invalid, site classed as urban by anthrome map.' #get measurement type and sampling type (take mode from collected list) if (st == 'continuous') or (st == 'continuous(carbondioxide),remotespectroscopicmethod(methaneandsurfaceozone)' or (st == 'continuous(carbondioxide)remotespectroscopicmethod(methaneandsurfaceozone)')): st = 'average' elif st == 'flask': st = 'flask' elif st == 'filter': st = 'filter' else: print st 1+'a' if mm == 'Lightabsorptionanalysis(UV)': mm = 'ultraviolet photometry' elif mm == 'CavityRingdownSpectroscopy': mm = 'cavity ringdown spectroscopy' elif mm == 'NDIR': site_mm = 'non-dispersive infrared spectroscopy' elif (mm == 'GasChromatography(FID)'): site_mm = 'gas chromatography flame ionisation detection' elif (mm == 'Gas Chromatography (RGD)'): site_mm = 'gas chromatography reduction gas detection' elif mm == 'Chemiluminescence': mm = 'chemiluminescence' elif (mm == 'Spectrophotometry') or (mm == 'spectrophotometry,naphthyl-ethylenediaminedihydrochloridemethod'): mm = 'spectrophotometry' elif mm == 'continuous(carbondioxide)remotespectroscopicmethod(methaneandsurfaceozone)': mm = 'near infrared spectroscopy' elif mm == '': if species == 'O3': mm = 'ultraviolet photometry' if species == 'CO': mm = 'non-dispersive infrared spectroscopy' if species == 'NO2': mm = 'chemiluminescence' if species == 'NO': mm = 'chemiluminescence' if species == 'ISOP': mm = 'gas chromatography flame ionisation detection' #do data quality checks full_data,data_valid,data_complete = modules.quality_check_periodic(o3_data,data_valid,data_resolution,alt,grid_dates,start_year,end_year) #convert file res to standard format if file_res == 'hr': file_res = 'H' elif file_res == 'da': file_res = 'D' elif file_res == 'mo': file_res = 'M' #no raw class so set as na raw_class_name = 'na' return c,full_data,data_valid,lat,lon,alt,raw_class_name,anthrome_class_name,mm,st,file_res,data_complete
giss = np.ravel(data.variables['giss'][:]) mirocchem = np.ravel(data.variables['mirocchem'][:]) if typ == 'abs': z = abs_std elif typ == 'pc': z = frac_std all_lat_c = [[i]*len(lon_c) for i in lat_c] all_lat_c = [item for sublist in all_lat_c for item in sublist] all_lon_c = [lon_c] * len(lat_c) all_lon_c = [item for sublist in all_lon_c for item in sublist] anthfile = '/work/home/db876/plotting_tools/core_tools/anthro2_a2000.nc' anthload = Dataset(anthfile) class_result,class_name = modules.anthrome_classify(anthload,all_lat_c,all_lon_c) areas = ['ANT','OC','S_O','AF','SE_US','S_US','W_US','N_US','NE_US','W_CAN','E_CAN','S_EU','C_EU','NW_EU','N_EU','E_EU','AS','N_O','ARC'] plot_type = raw_input('\nd, s or full?\n') if plot_type == 'd': obs_datetimes = obs_datetimes[:24] model_datetimes = model_datetimes[:24] if plot_type == 's': obs_datetimes = obs_datetimes[:8766] model_datetimes = model_datetimes[:8766] obs_time_pd = pd.date_range(start = obs_datetimes[0],end = obs_datetimes[-1], freq = 'H') model_time_pd = pd.date_range(start = model_datetimes[0],end = model_datetimes[-1], freq = 'H')