def _calculate_all(self, overwrite, time_window=False, verbose=False): fofrh_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmaapssize = all_files[_np.char.find(all_files, 'tdmaapssize') > -1] test_done = False for e, fname_tdmaapssize in enumerate(all_files_tdmaapssize): if self.test: if fname_tdmaapssize == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmaapssize, verbose): continue fname_others = _get_other_filenames(fname_tdmaapssize, ['aosacsm', 'tdmahyg'], all_files) if not fname_others: continue name_addon = '%s_hyg%s_rh%sv%s' % (self.diameter_cutoff, self.hygroscopicity_diameter, self.RH_wet, self.RH_dry) name_addon # = '1um_hyg400_rh85v40' my_prod_name = self.folder_out + '/' + fname_others['site'] + '_HT_tdmaapshyg_' + name_addon + '.' + fname_others['date'] + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue verbose = False tdmaapssize = _atm_arm.read_cdf(self.folder + fname_tdmaapssize, data_quality=self.data_quality, verbose=verbose) aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) tdmahyg = _atm_arm.read_cdf(self.folder + fname_others['tdmahyg']['fname'], data_quality=self.data_quality, verbose=verbose) fofrh = self._calculate_one(tdmaapssize, tdmahyg, aosacsm, diameter_cutoff=self.diameter_cutoff, hygroscopicity_diameter=self.hygroscopicity_diameter, RH_dry=self.RH_dry, RH_wet=self.RH_wet) fofrh_list.append(fofrh) fofrh.save_netCDF(my_prod_name) # if self.test: # if len(fofrh_list) == 1: # break print(my_prod_name) fofrh_cat = _timeseries.concat(fofrh_list) self.result = fofrh_cat.close_gaps(verbose=False) return # all_files_tdmaapssize
def test_1twr10xC1(): out = read_data.read_cdf(test_data_folder, data_product='1twr10xC1') out = out['1twr10xC1'] # rh soll = pd.read_csv(test_data_folder + '1twr10xC1_rh.csv', index_col=0, dtype={ 'rh_25m': np.float32, 'rh_60m': np.float32 }) assert np.all(out.relative_humidity.data == soll) # temp soll = pd.read_csv(test_data_folder + '1twr10xC1_temp.csv', index_col=0, dtype={ 'temp_25m': np.float32, 'temp_60m': np.float32 }) assert np.all(out.temperature.data == soll) # vapor pressure soll = pd.read_csv(test_data_folder + '1twr10xC1_p_vapor.csv', index_col=0, dtype={ 'vap_pres_25m': np.float32, 'vap_pres_60m': np.float32 }) assert np.all(out.vapor_pressure.data == soll)
def _calculate_all(self, overwrite, time_window=False, verbose=False): kappa_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmahyg = all_files[_np.char.find(all_files, 'tdmahyg') > -1] test_done = False for e, fname_tdmahyg in enumerate(all_files_tdmahyg): if self.test: if fname_tdmahyg == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmahyg, verbose): continue splitname = _splitup_filename(fname_tdmahyg) site = splitname['site'] date = splitname['date'] name_addon = ('%s_d%s_%s' % (self.method, self.diameter, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'tdmahyg2kappa_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue tdmahyg = _atm_arm.read_cdf(self.folder + fname_tdmahyg, data_quality=self.data_quality, verbose=verbose) kappa = self._calculate_one(tdmahyg) kappa_list.append(kappa) if not self.test: kappa.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(kappa_list) > 1: extcoeff_cat = _timeseries.concat(kappa_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = kappa_list[0]
def test_1twr10xC1(): out = read_data.read_cdf(test_data_folder, data_product='1twr10xC1') out = out['1twr10xC1'] # rh soll = pd.read_csv(test_data_folder + '1twr10xC1_rh.csv', index_col=0, dtype={'rh_25m': np.float32, 'rh_60m': np.float32} ) assert np.all(out.relative_humidity.data == soll) # temp soll = pd.read_csv(test_data_folder + '1twr10xC1_temp.csv', index_col=0, dtype={'temp_25m': np.float32, 'temp_60m': np.float32} ) assert np.all(out.temperature.data == soll) # vapor pressure soll = pd.read_csv(test_data_folder + '1twr10xC1_p_vapor.csv', index_col=0, dtype={'vap_pres_25m': np.float32, 'vap_pres_60m': np.float32} ) assert np.all(out.vapor_pressure.data == soll)
def _calculate_all(self, overwrite, time_window=False, verbose=False): if self.f_of_rh_product == 'f_RH_scatt_2p_85_40': RH_for_name = 'RH_85_40' kappa_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_aipfitrh = all_files[_np.char.find(all_files, 'aipfitrh1ogrenC1') > -1] test_done = False for e, fname_aipfitrh in enumerate(all_files_aipfitrh): if self.test: if fname_aipfitrh == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_aipfitrh, verbose): continue splitname = _splitup_filename(fname_aipfitrh) site = splitname['site'] date = splitname['date'] name_addon = ('%s_%s_RI%s_%s_%snm_%s' % (RH_for_name, self.sizedistribution, self.refractive_index, self.diameter_cutoff, self.wavelength, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'aipfitrh2kappa_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue if self.sizedistribution == 'tdmaapssize': fname_others = _get_other_filenames(fname_aipfitrh, ['tdmaapssize'], all_files) if not fname_others: continue else: tdmaaps = _atm_arm.read_cdf(self.folder + fname_others['tdmaapssize']['fname'], data_quality=self.data_quality, verbose=verbose) sizedist = tdmaaps.size_distribution else: txt = "Unknown sizedistribution type (%s). Try one of these: 'tdmaapssize'"%(self.sizedistribution) raise ValueError(txt) if self.refractive_index == 'aosacsm': fname_others = _get_other_filenames(fname_aipfitrh, ['aosacsm'], all_files) if not fname_others: continue else: aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) refractive_index = aosacsm.refractive_index elif type(self.refractive_index).__name__ == 'float': refractive_index = self.refractive_index aipfitrh = _atm_arm.read_cdf(self.folder + fname_aipfitrh, data_quality=self.data_quality, verbose=verbose) kappa = self._calculate_one(aipfitrh, sizedist, refractive_index) kappa_list.append(kappa) if not self.test: kappa.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(kappa_list) > 1: extcoeff_cat = _timeseries.concat(kappa_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = kappa_list[0]
def _calculate_all(self, overwrite, time_window=False, verbose=False): extcoeff_list = [] all_files = _os.listdir(self.folder) all_files = _np.array(all_files) all_files_tdmaapssize = all_files[_np.char.find(all_files, 'tdmaapssize') > -1] test_done = False for e, fname_tdmaapssize in enumerate(all_files_tdmaapssize): if self.test: if fname_tdmaapssize == self.test_file: test_done = True else: if test_done: break else: continue if time_window: if not _atm_arm._is_in_time_window(fname_tdmaapssize, verbose): continue splitname = _splitup_filename(fname_tdmaapssize) site = splitname['site'] date = splitname['date'] if self.data_quality == 'patchy': name_addon = ('RI%s_%s_%snm' % (self.refractive_index, self.diameter_cutoff, self.wavelength)).replace('.','o') else: name_addon = ('RI%s_%s_%snm_%s' % (self.refractive_index, self.diameter_cutoff, self.wavelength, self.data_quality)).replace('.', 'o') my_prod_name = self.folder_out + site + 'tdmaaps2scatteringcoeff_' + name_addon + '.' + date + '.000000.cdf' if not overwrite: if _os.path.isfile(my_prod_name): if verbose: print('product %s already exists' % my_prod_name) continue if self.refractive_index == 'aosacsm': fname_others = _get_other_filenames(fname_tdmaapssize, ['aosacsm'], all_files) if not fname_others: continue else: aosacsm = _atm_arm.read_cdf(self.folder + fname_others['aosacsm']['fname'], data_quality=self.data_quality, verbose=verbose) refractive_index = aosacsm.refractive_index elif type(self.refractive_index).__name__ == 'float': refractive_index = self.refractive_index tdmaapssize = _atm_arm.read_cdf(self.folder + fname_tdmaapssize, data_quality=self.data_quality, verbose=verbose) extcoeff = self._calculate_one(tdmaapssize, refractive_index) extcoeff_list.append(extcoeff) if not self.test: extcoeff.save_netCDF(my_prod_name) # if len(extcoeff_list) == 2: # break print(my_prod_name) if len(extcoeff_list) > 1: extcoeff_cat = _timeseries.concat(extcoeff_list) self.result = extcoeff_cat.close_gaps(verbose=False) else: self.result = extcoeff_list[0]