def _concat_rules(arm_data_objs): # create class out = ArmDatasetSub(False) # populate class with concatinated data out.relative_humidity = _timeseries.concat([i.relative_humidity for i in arm_data_objs]) out.relative_humidity._data_period = out._data_period out.temperature = _timeseries.concat([i.temperature for i in arm_data_objs]) out.temperature._data_period = out._data_period out.vapor_pressure = _timeseries.concat([i.vapor_pressure for i in arm_data_objs]) out.vapor_pressure._data_period = out._data_period # use time stamps from one of the variables out.time_stamps = out.relative_humidity.data.index return out
def load_netCDF(folder, prod_name, time_window, site = 'sgp', verbose = False): all_files = _os.listdir(folder) all_ts = [] for file in all_files: if _os.path.splitext(file)[-1] != '.cdf': txt = '\t %s is not a netCDF file ... skipping'%file if verbose: print(txt) continue if not _atm_arm._is_in_time_window(file, time_window, verbose): continue site_check = _atm_arm._is_site(file, site, verbose) if not site_check: continue # test for correct product if not file.split('.')[0][3:] == prod_name: continue fname = folder + file #print(fname) ts = _timeseries.load_netCDF(fname) all_ts.append(ts) # print('found one: ', folder + file) if len(all_ts) == 0: raise ValueError('no file meets criteria') ts_concat = _timeseries.concat(all_ts) ts_concat.data.sort_index(inplace=True) ts_concat = ts_concat.close_gaps() return ts_concat
def _calculate_all(self, overwrite, time_window=False, verbose=False): fofrh_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmaapssize = all_files[_np.char.find(all_files, 'tdmaapssize') > -1] test_done = False for e, fname_tdmaapssize in enumerate(all_files_tdmaapssize): if self.test: if fname_tdmaapssize == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmaapssize, verbose): continue fname_others = _get_other_filenames(fname_tdmaapssize, ['aosacsm', 'tdmahyg'], all_files) if not fname_others: continue name_addon = '%s_hyg%s_rh%sv%s' % (self.diameter_cutoff, self.hygroscopicity_diameter, self.RH_wet, self.RH_dry) name_addon # = '1um_hyg400_rh85v40' my_prod_name = self.folder_out + '/' + fname_others['site'] + '_HT_tdmaapshyg_' + name_addon + '.' + fname_others['date'] + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue verbose = False tdmaapssize = _atm_arm.read_cdf(self.folder + fname_tdmaapssize, data_quality=self.data_quality, verbose=verbose) aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) tdmahyg = _atm_arm.read_cdf(self.folder + fname_others['tdmahyg']['fname'], data_quality=self.data_quality, verbose=verbose) fofrh = self._calculate_one(tdmaapssize, tdmahyg, aosacsm, diameter_cutoff=self.diameter_cutoff, hygroscopicity_diameter=self.hygroscopicity_diameter, RH_dry=self.RH_dry, RH_wet=self.RH_wet) fofrh_list.append(fofrh) fofrh.save_netCDF(my_prod_name) # if self.test: # if len(fofrh_list) == 1: # break print(my_prod_name) fofrh_cat = _timeseries.concat(fofrh_list) self.result = fofrh_cat.close_gaps(verbose=False) return # all_files_tdmaapssize
def _calculate_all(self, overwrite, time_window=False, verbose=False): kappa_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmahyg = all_files[_np.char.find(all_files, 'tdmahyg') > -1] test_done = False for e, fname_tdmahyg in enumerate(all_files_tdmahyg): if self.test: if fname_tdmahyg == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmahyg, verbose): continue splitname = _splitup_filename(fname_tdmahyg) site = splitname['site'] date = splitname['date'] name_addon = ('%s_d%s_%s' % (self.method, self.diameter, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'tdmahyg2kappa_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue tdmahyg = _atm_arm.read_cdf(self.folder + fname_tdmahyg, data_quality=self.data_quality, verbose=verbose) kappa = self._calculate_one(tdmahyg) kappa_list.append(kappa) if not self.test: kappa.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(kappa_list) > 1: extcoeff_cat = _timeseries.concat(kappa_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = kappa_list[0]
def _calculate_all(self, overwrite, time_window=False, verbose=False): if self.f_of_rh_product == 'f_RH_scatt_2p_85_40': RH_for_name = 'RH_85_40' kappa_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_aipfitrh = all_files[_np.char.find(all_files, 'aipfitrh1ogrenC1') > -1] test_done = False for e, fname_aipfitrh in enumerate(all_files_aipfitrh): if self.test: if fname_aipfitrh == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_aipfitrh, verbose): continue splitname = _splitup_filename(fname_aipfitrh) site = splitname['site'] date = splitname['date'] name_addon = ('%s_%s_RI%s_%s_%snm_%s' % (RH_for_name, self.sizedistribution, self.refractive_index, self.diameter_cutoff, self.wavelength, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'aipfitrh2kappa_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue if self.sizedistribution == 'tdmaapssize': fname_others = _get_other_filenames(fname_aipfitrh, ['tdmaapssize'], all_files) if not fname_others: continue else: tdmaaps = _atm_arm.read_cdf(self.folder + fname_others['tdmaapssize']['fname'], data_quality=self.data_quality, verbose=verbose) sizedist = tdmaaps.size_distribution else: txt = "Unknown sizedistribution type (%s). Try one of these: 'tdmaapssize'"%(self.sizedistribution) raise ValueError(txt) if self.refractive_index == 'aosacsm': fname_others = _get_other_filenames(fname_aipfitrh, ['aosacsm'], all_files) if not fname_others: continue else: aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) refractive_index = aosacsm.refractive_index elif type(self.refractive_index).__name__ == 'float': refractive_index = self.refractive_index aipfitrh = _atm_arm.read_cdf(self.folder + fname_aipfitrh, data_quality=self.data_quality, verbose=verbose) kappa = self._calculate_one(aipfitrh, sizedist, refractive_index) kappa_list.append(kappa) if not self.test: kappa.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(kappa_list) > 1: extcoeff_cat = _timeseries.concat(kappa_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = kappa_list[0]
def _calculate_all(self, overwrite, time_window=False, verbose=False): extcoeff_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmaapssize = all_files[_np.char.find(all_files, 'tdmaapssize') > -1] test_done = False for e, fname_tdmaapssize in enumerate(all_files_tdmaapssize): if self.test: if fname_tdmaapssize == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmaapssize, verbose): continue splitname = _splitup_filename(fname_tdmaapssize) site = splitname['site'] date = splitname['date'] if self.data_quality == 'patchy': name_addon = ('RI%s_%s_%snm' % (self.refractive_index, self.diameter_cutoff, self.wavelength)).replace('.','o') else: name_addon = ('RI%s_%s_%snm_%s' % (self.refractive_index, self.diameter_cutoff, self.wavelength, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'tdmaaps2scatteringcoeff_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue if self.refractive_index == 'aosacsm': fname_others = _get_other_filenames(fname_tdmaapssize, ['aosacsm'], all_files) if not fname_others: continue else: aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) refractive_index = aosacsm.refractive_index elif type(self.refractive_index).__name__ == 'float': refractive_index = self.refractive_index tdmaapssize = _atm_arm.read_cdf(self.folder + fname_tdmaapssize, data_quality=self.data_quality, verbose=verbose) extcoeff = self._calculate_one(tdmaapssize, refractive_index) extcoeff_list.append(extcoeff) if not self.test: extcoeff.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(extcoeff_list) > 1: extcoeff_cat = _timeseries.concat(extcoeff_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = extcoeff_list[0]