def test_configer(self): from configer import Configer default_ps_fname = pkg_resources.resource_filename( 'tests', 'sample_settings.ini') ps1 = Configer(default_ps_fname=default_ps_fname) self.assertIsNone(ps1.status) ps2 = Configer(default_ps_fname=default_ps_fname, status=False) self.assertFalse(ps2.status) ps2.status = True self.assertTrue(ps2.status) ps2.new_status = True self.assertTrue(ps2.new_status) ps3 = Configer(default_ps_fname=default_ps_fname, **{'status': True}) self.assertTrue(ps3.status) ps4 = Configer(default_ps_fname=default_ps_fname, status=False, **{'somethings': [1.0, 2.0]}) self.assertFalse(ps4.status) ps5 = ps4 + ps3 self.assertFalse(ps5.status) self.assertEqual(ps5.somethings, [1.0, 2.0]) ps6 = ps4.overload(ps3) self.assertTrue(ps6.status)
def test_samples(self): ''' given the same network weights, the random pose generator must produce the same pose for a seed''' ps = Configer(default_ps_fname='../human_body_prior/train/vposer_smpl_defaults.ini') vposer = VPoser(num_neurons=ps.num_neurons, latentD=ps.latentD, data_shape = ps.data_shape) body_pose_rnd = vposer.sample_poses(num_poses=1, seed=100) body_pose_gt = np.load('samples/body_pose_rnd.npz')['data'] self.assertAlmostEqual(np.square((c2c(body_pose_rnd) - body_pose_gt)).sum(), 0.0)
def __init__(self, dataset_dir, data_fields=[]): assert os.path.exists(dataset_dir) self.ds = {} for data_fname in glob.glob(os.path.join(dataset_dir, '*.pt')): k = os.path.basename(data_fname).replace('.pt', '') if len(data_fields) != 0 and k not in data_fields: continue self.ds[k] = torch.load(data_fname).type(torch.float32) dataset_ps_fname = glob.glob(os.path.join(dataset_dir, '..', '*.ini')) if len(dataset_ps_fname): self.ps = Configer(default_ps_fname=dataset_ps_fname[0], dataset_dir=dataset_dir)
def expid2model(expr_dir): from configer import Configer if not os.path.exists(expr_dir): raise ValueError('Could not find the experiment directory: %s' % expr_dir) best_model_fname = sorted(glob.glob(os.path.join(expr_dir, 'snapshots', '*.pt')), key=os.path.getmtime)[-1] print(('Found CCPCM Trained Model: %s' % best_model_fname)) default_ps_fname = glob.glob(os.path.join(expr_dir,'*.ini'))[0] if not os.path.exists( default_ps_fname): raise ValueError('Could not find the appropriate ccpcm_settings: %s' % default_ps_fname) ps = Configer(default_ps_fname=default_ps_fname, work_dir = expr_dir, best_model_fname=best_model_fname) return ps, best_model_fname
def run_vposer_trainer(ps): if not isinstance(ps, Configer): ps = Configer(default_ps_fname=ps) vp_trainer = VPoserTrainer(ps.work_dir, ps) ps.dump_settings(os.path.join(ps.work_dir, 'TR%02d_%s.ini' % (ps.try_num, ps.expr_code))) vp_trainer.logger(ps.expr_message) vp_trainer.perform_training() ps.dump_settings(os.path.join(ps.work_dir, 'TR%02d_%s.ini' % (ps.try_num, ps.expr_code))) vp_trainer.logger(ps.expr_message) test_loss_dict = vp_trainer.evaluate(split_name='test') vp_trainer.logger('Final loss on test set is %s' % (' | '.join(['%s = %.2e' % (k, v) for k, v in test_loss_dict.items()])))
def __init__(self): self.distributions = ["stable", "testing", "unstable"] self.config = Configer().config self.monitors = [] for i in self.distributions: self.monitors.append(Fmonitor(self.config.workdir + i, i))
passwords = result["passwords"] if all(len(i) == len(passwords[0]) for i in passwords): print( bcolors.CYAN + "Password column contains hashes, no plain passwords detected" ) self.db_file = file_ return True else: print( bcolors.FAIL + f"Password column in {file_} contains plain passwords!" + bcolors.OKGREEN) self.db_file = file_ return False if __name__ == "__main__": from configer import Configer settings = { "local": False, "page_limit": None, "vocabulary": False, } c = Configer("http://leafus.com.ua/", settings) log = Loginer(c) # log.bruteforce_attack(['https://id.bigmir.net/', 'http://leafus.com.ua/wp-admin', 'https://www.ukr.net/']) log.start_hack()
# # If you use this code in a research publication please consider citing the following: # # Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866> # # # Code Developed by: # Nima Ghorbani <https://nghorbani.github.io/> # # 2018.01.02 from human_body_prior.train.vposer_smpl import run_vposer_trainer from configer import Configer expr_code = 'SOME_UNIQUE_ID' args = { 'expr_code' : expr_code, 'base_lr': 0.005, 'dataset_dir': 'VPOSER_DATA_DIR_PRODUCED_BEFORE', 'work_dir': 'BASE_WORKing_DIR/%s'%expr_code, # Later you will give this pass to vposer_loader to load the model } ps = Configer(default_ps_fname='./vposer_smpl_defaults.ini', **args) # This is the default configuration # Make a message to describe the purpose of this experiment expr_message = '\n[%s] %d H neurons, latentD=%d, batch_size=%d, kl_coef = %.1e\n' \ % (ps.expr_code, ps.num_neurons, ps.latentD, ps.batch_size, ps.kl_coef) expr_message += '\n' ps.expr_message = expr_message run_vposer_trainer(ps)
def __init__(self, help = False, version = False, rebuild = False, update = False, generate = False, default = False, md5 = False, sha = False, kill = False, watch = False, time = 10.0, path = None, config = None, exclude = set(), increase = set()): try: # Print version information if version: return jprint('version: 1.0.0.000 (20150621)') # Print help information if help: # TODO: add version number to help text _, width = (int(v) for v in check_output('stty size', shell=True).decode('utf-8').split()) return help_printer(_HELP, width, _INDENT) # Set working-path path = abspath(expanduser(expandvars(path or getcwd()))) chdir(path) jprint('Sets work path:\n{}{}'.format(_INDENT, path)) # Create cache directory if does not exist cache_dir = join(path, _CACHE_DIR) makedirs(cache_dir, exist_ok=True) jprint('Sets cache directory:\n{}{}'.format(_INDENT, cache_dir)) # Remove everything and return if kill: jprint('Removes every janitor data from work path') rmtree(cache_dir, ignore_errors=True) raise Janitor.FinishedWithoutError # Create sample config file if specified if generate: configer = Configer.from_default(config_dir_path=path) jprint('Generates configuration file:' '\n{}{}'.format(_INDENT, configer.to_file())) raise Janitor.FinishedWithoutError # Set configuration try: if default: configer = Configer.from_default() jprint('Uses default configuration') elif config: config = expanduser(expandvars(config)) configer = Configer(config_file_path=config) jprint('Uses manually specified configuration file:' '\n{}{}'.format(_INDENT, config)) else: configer = Configer(config_dir_path=path) jprint('Uses configuration file:' '\n{}{}'.format(_INDENT, join(path, Configer.FILE_NAME))) except Configer.InvalidConfigFileFormat as e: jerror('Invalid JSON format in the configuration file') jerror(e) exit(EX_CONFIG) ## If use the `versioner` module #if configer['versioner']['use']: # pass ## Check increase options #for option in increase: # if option not in _SPEC_VALID['increase']: # return jerror("{!r} is invalid for 'increase'".format(option)) ## Increase version number #if increase: # versioner = Versioner(path=cache_dir, # options=increase, # **configer['versioner']) # versioner.to_file() # return jprint('Current version is: {}'.format(versioner.version)) # Import hashing algorithm if md5: hash_id = 0 from hashlib import md5 as hasher jprint('Uses MD5 hashing algorithm') elif sha: hash_id = 1 from hashlib import sha1 as hasher jprint('Uses SHA1 hashing algorithm') else: hash_id = 2 try: from pyhashxx import Hashxx as hasher jprint('Uses xxHash hashing algorithm') except ImportError: jerror('cannot use default hashing: ' 'pyhashxx is not installed') exit(EX_CONFIG) # Create checker checker = Checker(cache_dir, hash_id, hasher) # Rebuild or update if rebuild: jprint('Rebuilds cache files') checker.rebuild() # Collect all-excludes all_exclude = configer['exclude'] for module in _MOD_USE_FILE: if configer[module]['use']: mod_exclude = configer[module]['exclude'] for type in all_exclude: try: all_exclude[type] &= mod_exclude[type] except KeyError: pass # If watching look for time if watch: try: time = float(time) jprint('Sets watch time interval to: {} seconds'.format(time)) except ValueError: jerror('Invalid value for `time`: ' '{!r} is not a floating point number'.format(time)) exit(EX_CONFIG) # Go through each module and pass the necessary infos to them first_cycle = True while True: if first_cycle: jprint('Walks through all files and folders in work path:') changed_files = [] for root, dirs, files in walk(path): # If skip this folder and all subfolders for every module if root in all_exclude['folders']: if first_cycle: jskip(_INDENT, '<ALL>', join(root, '*'), sep='') dirs.clear() continue # If skip subfolder for dir in dirs: if dir in all_exclude['folders']: if first_cycle: jskip(_INDENT, '<ALL>', join(root, dir, '*')) dirs.remove(dir) # Go through all files for file in files: _, ext = splitext(file) # If extension or the filepath is on the black-list if (ext in all_exclude['extensions'] or ext[1:] in all_exclude['extensions'] or file in all_exclude['names']): if first_cycle: jskip(_INDENT, '<ALL>', join(root, file)) continue # If file changed file = join(root, file) if checker.is_changed(file): changed_files.append(file) # If this is an update cycle only if update: continue # Go through each module for module in _MOD_USE_FILE: mod_exclude = configer[module]['exclude'] # If file should be processed try: # If this folder is excluded for module exclude = mod_exclude.get('folders', ()) if (root in exclude or basename(root) in exclude): raise Janitor.Skip # If this extension is excluded for module exclude = mod_exclude.get('extensions', ()) if (ext in exclude or ext[1:] in exclude): raise Janitor.Skip # If this file is excluded for module exclude = mod_exclude.get('names', ()) if (file in exclude): raise Janitor.Skip # Use this file juse(_INDENT, module, file) # If file should be skipped except Janitor.Skip: jskip(_INDENT, module, file) continue # If any file changed since last # check, update and save cache if changed_files: #for module in _MODULES: # module.done() # module.to_file() if update: jprint('Updates cache files') checker.update(changed_files) checker.to_file() if (watch and not first_cycle): jprint('Watching for changes...') # If constantly watching if watch: if first_cycle: jprint('Watching for changes...') first_cycle = False update = False sleep(time) else: break # If no error occured except KeyboardInterrupt: print() except Janitor.FinishedWithoutError: pass # Report to user return jprint('Finished')
'n_neurons': 256, 'batch_size': 64, # each batch will be 120 frames 'n_workers': 10, 'cuda_id': 0, 'use_multigpu':False, 'reg_coef': 5e-4, 'base_lr': 5e-3, 'best_model_fname': None, 'log_every_epoch': 2, 'expr_code': expr_code, 'work_dir': work_dir, 'num_epochs': 100, 'dataset_dir': '../data/dataset', } supercap_trainer = CCPCM_Trainer(work_dir, ps=Configer(default_ps_fname=default_ps_fname, **params)) ps = supercap_trainer.ps ps.dump_settings(os.path.join(work_dir, 'TR%02d_%s' % (ps.try_num, os.path.basename(default_ps_fname)))) expr_message = '\n[%s] batch_size=%d\n'% (ps.expr_code, ps.batch_size) expr_message += 'Using sigmoid instead of softmax.\n' expr_message += '\n' supercap_trainer.logger(expr_message) supercap_trainer.perform_training() ps.dump_settings(os.path.join(work_dir, 'TR%02d_%s' % (ps.try_num, os.path.basename(default_ps_fname))))
def prepare_vposer_datasets(vposer_dataset_dir, amass_splits, amass_dir, logger=None): if dataset_exists(vposer_dataset_dir): if logger is not None: logger('VPoser dataset already exists at {}'.format( vposer_dataset_dir)) return ds_logger = log2file(makepath(vposer_dataset_dir, 'dataset.log', isfile=True), write2file_only=True) logger = ds_logger if logger is None else logger_sequencer( [ds_logger, logger]) logger('Creating pytorch dataset at %s' % vposer_dataset_dir) logger('Using AMASS body parameters from {}'.format(amass_dir)) shutil.copy2(__file__, vposer_dataset_dir) # class AMASS_ROW(pytables.IsDescription): # # # gender = pytables.Int16Col(1) # 1-character String # root_orient = pytables.Float32Col(3) # float (single-precision) # pose_body = pytables.Float32Col(21 * 3) # float (single-precision) # # pose_hand = pytables.Float32Col(2 * 15 * 3) # float (single-precision) # # # betas = pytables.Float32Col(16) # float (single-precision) # # trans = pytables.Float32Col(3) # float (single-precision) def fetch_from_amass(ds_names): keep_rate = 0.3 npz_fnames = [] for ds_name in ds_names: mosh_stageII_fnames = glob.glob( osp.join(amass_dir, ds_name, '*/*_poses.npz')) npz_fnames.extend(mosh_stageII_fnames) logger('Found {} sequences from {}.'.format( len(mosh_stageII_fnames), ds_name)) for npz_fname in npz_fnames: cdata = np.load(npz_fname) N = len(cdata['poses']) # skip first and last frames to avoid initial standard poses, e.g. T pose cdata_ids = np.random.choice(list( range(int(0.1 * N), int(0.9 * N), 1)), int(keep_rate * 0.8 * N), replace=False) if len(cdata_ids) < 1: continue fullpose = cdata['poses'][cdata_ids].astype(np.float32) yield { 'pose_body': fullpose[:, 3:66], 'root_orient': fullpose[:, :3] } for split_name, ds_names in amass_splits.items(): if dataset_exists(vposer_dataset_dir, split_names=[split_name]): continue logger('Preparing VPoser data for split {}'.format(split_name)) data_fields = {} for data in fetch_from_amass(ds_names): for k in data.keys(): if k not in data_fields: data_fields[k] = [] data_fields[k].append(data[k]) for k, v in data_fields.items(): outpath = makepath(vposer_dataset_dir, split_name, '{}.pt'.format(k), isfile=True) v = np.concatenate(v) torch.save(torch.tensor(v), outpath) logger('{} datapoints dumped for split {}. ds_meta_pklpath: {}'.format( len(v), split_name, osp.join(vposer_dataset_dir, split_name))) Configer(**{ 'amass_splits': amass_splits.toDict(), 'amass_dir': amass_dir, }).dump_settings(makepath(vposer_dataset_dir, 'settings.ini', isfile=True)) logger('Dumped final pytorch dataset at %s' % vposer_dataset_dir)
def __init__(self, help=False, version=False, rebuild=False, update=False, generate=False, default=False, md5=False, sha=False, kill=False, watch=False, time=10.0, path=None, config=None, exclude=set(), increase=set()): try: # Print version information if version: return jprint('version: 1.0.0.000 (20150621)') # Print help information if help: # TODO: add version number to help text _, width = (int(v) for v in check_output( 'stty size', shell=True).decode('utf-8').split()) return help_printer(_HELP, width, _INDENT) # Set working-path path = abspath(expanduser(expandvars(path or getcwd()))) chdir(path) jprint('Sets work path:\n{}{}'.format(_INDENT, path)) # Create cache directory if does not exist cache_dir = join(path, _CACHE_DIR) makedirs(cache_dir, exist_ok=True) jprint('Sets cache directory:\n{}{}'.format(_INDENT, cache_dir)) # Remove everything and return if kill: jprint('Removes every janitor data from work path') rmtree(cache_dir, ignore_errors=True) raise Janitor.FinishedWithoutError # Create sample config file if specified if generate: configer = Configer.from_default(config_dir_path=path) jprint('Generates configuration file:' '\n{}{}'.format(_INDENT, configer.to_file())) raise Janitor.FinishedWithoutError # Set configuration try: if default: configer = Configer.from_default() jprint('Uses default configuration') elif config: config = expanduser(expandvars(config)) configer = Configer(config_file_path=config) jprint('Uses manually specified configuration file:' '\n{}{}'.format(_INDENT, config)) else: configer = Configer(config_dir_path=path) jprint('Uses configuration file:' '\n{}{}'.format(_INDENT, join(path, Configer.FILE_NAME))) except Configer.InvalidConfigFileFormat as e: jerror('Invalid JSON format in the configuration file') jerror(e) exit(EX_CONFIG) ## If use the `versioner` module #if configer['versioner']['use']: # pass ## Check increase options #for option in increase: # if option not in _SPEC_VALID['increase']: # return jerror("{!r} is invalid for 'increase'".format(option)) ## Increase version number #if increase: # versioner = Versioner(path=cache_dir, # options=increase, # **configer['versioner']) # versioner.to_file() # return jprint('Current version is: {}'.format(versioner.version)) # Import hashing algorithm if md5: hash_id = 0 from hashlib import md5 as hasher jprint('Uses MD5 hashing algorithm') elif sha: hash_id = 1 from hashlib import sha1 as hasher jprint('Uses SHA1 hashing algorithm') else: hash_id = 2 try: from pyhashxx import Hashxx as hasher jprint('Uses xxHash hashing algorithm') except ImportError: jerror('cannot use default hashing: ' 'pyhashxx is not installed') exit(EX_CONFIG) # Create checker checker = Checker(cache_dir, hash_id, hasher) # Rebuild or update if rebuild: jprint('Rebuilds cache files') checker.rebuild() # Collect all-excludes all_exclude = configer['exclude'] for module in _MOD_USE_FILE: if configer[module]['use']: mod_exclude = configer[module]['exclude'] for type in all_exclude: try: all_exclude[type] &= mod_exclude[type] except KeyError: pass # If watching look for time if watch: try: time = float(time) jprint( 'Sets watch time interval to: {} seconds'.format(time)) except ValueError: jerror('Invalid value for `time`: ' '{!r} is not a floating point number'.format(time)) exit(EX_CONFIG) # Go through each module and pass the necessary infos to them first_cycle = True while True: if first_cycle: jprint('Walks through all files and folders in work path:') changed_files = [] for root, dirs, files in walk(path): # If skip this folder and all subfolders for every module if root in all_exclude['folders']: if first_cycle: jskip(_INDENT, '<ALL>', join(root, '*'), sep='') dirs.clear() continue # If skip subfolder for dir in dirs: if dir in all_exclude['folders']: if first_cycle: jskip(_INDENT, '<ALL>', join(root, dir, '*')) dirs.remove(dir) # Go through all files for file in files: _, ext = splitext(file) # If extension or the filepath is on the black-list if (ext in all_exclude['extensions'] or ext[1:] in all_exclude['extensions'] or file in all_exclude['names']): if first_cycle: jskip(_INDENT, '<ALL>', join(root, file)) continue # If file changed file = join(root, file) if checker.is_changed(file): changed_files.append(file) # If this is an update cycle only if update: continue # Go through each module for module in _MOD_USE_FILE: mod_exclude = configer[module]['exclude'] # If file should be processed try: # If this folder is excluded for module exclude = mod_exclude.get('folders', ()) if (root in exclude or basename(root) in exclude): raise Janitor.Skip # If this extension is excluded for module exclude = mod_exclude.get('extensions', ()) if (ext in exclude or ext[1:] in exclude): raise Janitor.Skip # If this file is excluded for module exclude = mod_exclude.get('names', ()) if (file in exclude): raise Janitor.Skip # Use this file juse(_INDENT, module, file) # If file should be skipped except Janitor.Skip: jskip(_INDENT, module, file) continue # If any file changed since last # check, update and save cache if changed_files: #for module in _MODULES: # module.done() # module.to_file() if update: jprint('Updates cache files') checker.update(changed_files) checker.to_file() if (watch and not first_cycle): jprint('Watching for changes...') # If constantly watching if watch: if first_cycle: jprint('Watching for changes...') first_cycle = False update = False sleep(time) else: break # If no error occured except KeyboardInterrupt: print() except Janitor.FinishedWithoutError: pass # Report to user return jprint('Finished')
def do_(): rc('font', **{'family': 'serif'}) rc('text', usetex=True) rc('text.latex', unicode=True) rc('text.latex', preamble=r"\usepackage[T2A]{fontenc}") rc('text.latex', preamble=r"\usepackage[utf8]{inputenc}") rc('text.latex', preamble=r"\usepackage[russian]{babel}") path = easygui.fileopenbox() p = Parser1("tb_timeP", path) DATA = p.parse() p.shift_tb(DATA) QW_freqs = [18, 21, 22, 27] freqs2show = [18, 19.2, 20.4, 21.6, 22.2, 22.4, 23.2, 24.4, 25.6, 26.8] Drawer().draw(DATA, freqs2show, title=u"$T_{b}$ - без калибровки (экспериментальные данные)", xlabel=u"время (сек.)", ylabel="") config_path = easygui.fileopenbox() info = Configer(config_path) info.get_info() # ================================================================== theta = info.theta model_ = Model(Temperature=info.T, Pressure=info.P, Rho=info.rho) proc = Proc(DATA, model_, T_avg=info.Tavg, T_obl=info.Tobl, start_coordinate=info.start, stop_coordinate=info.stop, is_data_calibred=False) # ================================================================== plt.title("") Freqs, TauO, TauH2O = [], [], [] for f in np.arange(5, 350, 0.5): Freqs.append(f) TauO.append(model_.tauO_theory(f, theta=51) / model_.dB2np) TauH2O.append(model_.tauRho_theory(f, theta=51) / model_.dB2np) plt.plot(Freqs, TauO, label=u"Кислород", color="red") plt.plot(Freqs, TauH2O, label=u"Водяной пар", color="blue") ax = plt.axes() ax.set_xscale("log", nonposx='clip') ax.set_yscale("log", nonposy='clip') plt.xlabel(u"ГГц") plt.ylabel(u"Дб") plt.legend(loc="best") plt.show() plt.close() plt.title( u"$T_{b}$ - модельные значения (безоблачная атм.)\n Температура " + str(model_.T) + u" °C, Давление " + str(int(model_.P / 1.33322)) + u" мм.рт.ст., Абс. влажность " + str(model_.rho) + u" $g/m^{3}$") FREQS, TBR, TBRnQ = [], [], [] for freq in np.arange(18, 27.2, 0.2): T_br = model_.get_Tb_Q(freq, proc.T_avg, theta) Tbrnq = model_.get_Tb(freq, proc.T_avg, theta) FREQS.append(freq) TBR.append(T_br) TBRnQ.append(Tbrnq) Q = model_.get_Q() plt.plot(FREQS, TBRnQ, "b+", label=u'$T_{b}$ : $T_{avg}(1-e^{-tau})$') plt.plot(FREQS, TBR, "r+", label=u'$T_{b}$ : Q = ' + str(Q) + u' $g/m^{2}$') print("Q_model = ", Q) plt.xlabel(u'ГГц') plt.ylabel(u'K') plt.legend() plt.show() plt.close() plt.title(u"Поглощение в кислороде - модель (безобл. атм.)") Freqs, Tbs = [], [] for f in np.arange(18.0, 27.2, 0.2): Freqs.append(f) Tbs.append(model_.tauO_theory(f, theta)) plt.plot(Freqs, Tbs, "r+") plt.xlabel(u"ГГц") plt.ylabel(u"непер") plt.show() plt.close() plt.title(u"Поглощение в водяном паре - модель (безобл. атм.)") Freqs, Tbs = [], [] for f in np.arange(18.0, 27.2, 0.2): Freqs.append(f) Tbs.append(model_.tauRho_theory(f, theta)) plt.plot(Freqs, Tbs, "r+") plt.xlabel(u"ГГц") plt.ylabel(u"непер") plt.show() plt.close() plt.title("$k_{w}$") Freqs, K = [], [] for f in np.arange(18.0, 27.2, 0.01): Freqs.append(f) K.append(model_.kw(f, -2)) plt.plot(Freqs, K) plt.xlabel(u"ГГц") plt.ylabel(u"Значение") plt.show() plt.close() print(info.nclbeg, info.nclend) proc.set_T_t0_nocluds_interval(time_begin=info.nclbeg, time_end=info.nclend) proc.calibr_Tb(theta=theta) Drawer().draw(proc.data, freqs2show, title=u"$T_{b}$ - яркостные температуры", xlabel=u"время", ylabel=u"температура [K]") # ==================================================================== # # ======================= Generate DataSet =========================== # # ==================================================================== # proc.generate_dataset("QW__f1_18GHz__f2_21GHz.txt", freq1=18, freq2=21, theta=theta) proc.generate_dataset("QW__f1_18GHz__f2_22GHz.txt", freq1=18, freq2=22, theta=theta) proc.generate_dataset("QW__f1_21GHz__f2_27GHz.txt", freq1=21, freq2=27, theta=theta) proc.generate_dataset("QW__f1_22GHz__f2_27GHz.txt", freq1=22, freq2=27, theta=theta) # ==================================================================== # QW = defaultdict(list) QW["18, 21"] = proc.get_resolved_QW(freq1=18, freq2=21, theta=theta) QW["18, 22"] = proc.get_resolved_QW(freq1=18, freq2=22, theta=theta) QW["21, 27"] = proc.get_resolved_QW(freq1=21, freq2=27, theta=theta) QW["22, 27"] = proc.get_resolved_QW(freq1=22, freq2=27, theta=theta) # QW["18, 27"] = proc.get_resolved_QW(freq1=18, freq2=27, theta=theta) # QW["18, 27"] = proc.get_QW(freq1=18, freq2=27, theta=theta) QW_err = defaultdict(list) QW_err["18, 21"] = proc.get_QW_errors(freq1=18, freq2=21, theta=theta, Tbr_error=1, Tavg_error=2) QW_err["18, 22"] = proc.get_QW_errors(freq1=18, freq2=22, theta=theta, Tbr_error=1, Tavg_error=2) QW_err["21, 27"] = proc.get_QW_errors(freq1=21, freq2=27, theta=theta, Tbr_error=1, Tavg_error=2) QW_err["22, 27"] = proc.get_QW_errors(freq1=22, freq2=27, theta=theta, Tbr_error=1, Tavg_error=2) # QW_err["18, 27"] = proc.get_QW_errors(freq1=18, freq2=27, theta=theta, Tbr_error=1, Tavg_error=2) plt.figure(1) plt.title(u"Q - полная масса водяного пара") k = 0 for key in QW.keys(): TIME, Q = [], [] dTIME, dQ, dQ_err = [], [], [] i = 0 for time, q, _ in QW[key]: TIME.append(time) Q.append(q) if (i - k) % 100 == 0: dTIME.append(time) _, dq, _ = QW_err[key][i] dQ.append(q) dq = dq / 10 #!!! dQ_err.append(dq) i += 1 plt.plot(TIME, Q, label=key + u" ГГц") ecolor = "" if k == 0: ecolor = "blue" if k == 25: ecolor = "orange" if k == 50: ecolor = "green" if k == 75: ecolor = "red" if k == 100: ecolor = "purple" # plt.errorbar(dTIME, dQ, yerr=dQ_err, fmt='o', ecolor="black", mfc=ecolor, mec=ecolor, ms=2, mew=3, capsize=2, capthick=3, elinewidth=3) k += 25 plt.xlabel(u"время") plt.ylabel("$g/cm^{2}$") axes = plt.gca() # axes.set_ylim([1., 2.5]) plt.legend() plt.figure(2) plt.title(u"W - водозапас облаков") k = 0 for key in QW.keys(): TIME, W = [], [] dTIME, dW, dW_err = [], [], [] i = 0 for time, _, w in QW[key]: TIME.append(time) w = w * 10 W.append(w) if (i - k) % 100 == 0: dTIME.append(time) _, _, dw = QW_err[key][i] dW.append(w) dW_err.append(dw) i += 1 plt.plot(TIME, W, label=key + u" ГГц") ecolor = "" if k == 0: ecolor = "blue" if k == 25: ecolor = "orange" if k == 50: ecolor = "green" if k == 75: ecolor = "red" if k == 100: ecolor = "purple" # plt.errorbar(dTIME, dW, yerr=dW_err, fmt='o', ecolor="black", mfc=ecolor, mec=ecolor, ms=2, mew=3, capsize=2, capthick=3, elinewidth=3) k += 25 axes = plt.gca() # axes.set_ylim([-0.5, 1]) plt.xlabel(u"время") plt.ylabel("$kg/m^{2}$") plt.legend() plt.show() QW_opt = proc.get_opt_QW(QW_freqs, theta) plt.figure(1) TIME, Q, W = [], [], [] for time, q, w in QW_opt: TIME.append(time) Q.append(q) W.append(w * 10) plt.plot(TIME, Q, color="red", label="Q") plt.plot(TIME, W, color="blue", label="W") plt.xlabel(u"время") plt.ylabel("Q [$g/cm^{2}$], W [$kg/m^{2}$]") plt.legend() ''' plt.figure(3) plt.title("dQ") for key in QW_err: TIME, dQ = [], [] for time, dq, _ in QW_err[key]: TIME.append(time) dQ.append(dq) plt.plot(TIME, dQ, label=key + " GHz") plt.xlabel("time") plt.ylabel("$value$") plt.legend() plt.figure(4) plt.title("dW") for key in QW_err: TIME, dW = [], [] for time, _, dw in QW_err[key]: TIME.append(time) dW.append(dw) plt.plot(TIME, dW, label=key + " GHz") plt.xlabel("time") plt.ylabel("$value$") plt.legend() ''' plt.show() ''' plt.title("") TIME, TAU1821, TAU1822, TAU2721, TAU2722, TAU1827 = [], [], [], [], [], [] min_len = len(proc.data[18]) for f in [18, 21, 22, 27]: if len(proc.data[f]) < min_len: min_len = len(proc.data[f]) for i in range(min_len): t18, Tb18 = proc.data[18][i] t21, Tb21 = proc.data[21][i] t22, Tb22 = proc.data[22][i] t27, Tb27 = proc.data[27][i] tau18 = model_.tau_experiment(Tb18, proc.T_avg + 273, theta) tau21 = model_.tau_experiment(Tb21, proc.T_avg + 273, theta) tau22 = model_.tau_experiment(Tb22, proc.T_avg + 273, theta) tau27 = model_.tau_experiment(Tb27, proc.T_avg + 273, theta) tau18 = model_.tauRho_theory(18, theta) tau21 = model_.tauRho_theory(21, theta) tau22 = model_.tauRho_theory(22, theta) tau27 = model_.tauRho_theory(27, theta) theta = 0 tau1821 = math.fabs(model_.krho(18, theta) * model_.kw(21, -2) - model_.krho(21, theta) * model_.kw(18, -2)) tau1822 = math.fabs(model_.krho(18, theta) * model_.kw(22, -2) - model_.krho(22, theta) * model_.kw(18, -2)) tau2721 = math.fabs(model_.krho(21, theta) * model_.kw(27, -2) - model_.krho(27, theta) * model_.kw(21, -2)) tau2722 = math.fabs(model_.krho(22, theta) * model_.kw(27, -2) - model_.krho(27, theta) * model_.kw(22, -2)) tau1827 = math.fabs(model_.krho(18, theta) * model_.kw(27, -2) - model_.krho(27, theta) * model_.kw(18, -2)) t = (t18 + t21 + t22 +t27) / 4 TIME.append(t) TAU1821.append(tau1821) TAU1822.append(tau1822) TAU1827.append(tau1827) TAU2721.append(tau2721) TAU2722.append(tau2722) plt.plot(TIME, TAU1821, label="18 - 21") plt.plot(TIME, TAU1822, label="18 - 22") plt.plot(TIME, TAU2721, label="21 - 27") plt.plot(TIME, TAU2722, label="22 - 27") plt.plot(TIME, TAU1827, label="18 - 27") plt.legend() plt.show() print("18-21", TAU1821[0]) print("18-22", TAU1822[0]) print("21-27", TAU2721[0]) print("22-27", TAU2722[0]) print("18-27", TAU1827[0]) ''' print("QW avg's - no clouds interval") avgQ, avgW = 0, 0 qavg, wavg = proc.get_QW_err_avg_noclouds(18, 21, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("18-21:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_noclouds(18, 22, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("18-22:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_noclouds(21, 27, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("21-27:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_noclouds(22, 27, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("22-27:\tQ = ", qavg / 10, "\tW = ", wavg) avgQ /= 4 avgW /= 4 print("AVG:\tQ = ", avgQ, "\tW = ", avgW) print("QW avg's - clouds interval") avgQ, avgW = 0, 0 qavg, wavg = proc.get_QW_err_avg_clouds(18, 21, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("18-21:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_clouds(18, 22, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("18-22:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_clouds(21, 27, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("21-27:\tQ = ", qavg / 10, "\tW = ", wavg) qavg, wavg = proc.get_QW_err_avg_clouds(22, 27, theta, 1, 2) avgQ += math.fabs(qavg / 10) avgW += math.fabs(wavg) print("22-27:\tQ = ", qavg / 10, "\tW = ", wavg) avgQ /= 4 avgW /= 4 print("AVG:\tQ = ", avgQ, "\tW = ", avgW)