def get_config(author_name='oliver2213', app_name='mqn'): confname = app_name + ".conf" # dir is a path to files included with the application, and should work whether or not the app is bundled if getattr(sys, 'frozen', False): # we are frozen dir = sys._MEIPASS else: dir = os.path.dirname(os.path.abspath(__file__)) # check the working directory for a config first if os.path.exists(os.path.join(os.getcwd(), confname)) and os.path.isfile( os.path.join(os.getcwd(), confname)): with open(os.path.join(os.getcwd(), confname), 'r') as f: config = toml.load(f) return config, os.path.join( os.getcwd(), confname ) # return the configuration in the current working directory # then check the user's config directory ucd = appdirs.AppDirs(appname=app_name, appauthor=author_name).user_config_dir if os.path.exists(os.path.join(ucd, confname)) and os.path.isfile( os.path.join(ucd, confname)): with open(os.path.join(ucd, confname), 'r') as f: config = toml.load(f) return config, os.path.join(ucd, confname) # then check the program directory (if running from source, this will be the directory containing this program; if bundled, it will be the directory of the bundle or the temp directory for an one-file bundle) if os.path.exists(os.path.join(dir, confname)) and os.path.isfile( os.path.join(dir, confname)): with open(os.path.join(dir, confname), 'r') as f: config = toml.load(f) return config, os.path.join(dir, confname) # from app directory # if none of that worked return None, None # no config found
def get_results(self, cfg_file): # Ugly hack: replace analyzers configuration with user's analyzer # configuration with open('config.toml') as user_cfg: user_conf = pytoml.load(user_cfg) with open(cfg_file) as data_cfg: data_conf = pytoml.load(data_cfg) data_conf['analyzers'] = user_conf['analyzers'] with tempfile.NamedTemporaryFile(mode='w+') as final_cfg: pytoml.dump(data_conf, final_cfg) final_cfg.flush() inv_idx = metapy.index.make_inverted_index(final_cfg.name) fwd_idx = metapy.index.make_forward_index(final_cfg.name) dset = metapy.classify.MulticlassDataset(fwd_idx) training_bounds = data_conf['training-set'] testing_bounds = data_conf['testing-set'] train_view = dset[training_bounds[0]:training_bounds[1]] test_view = dset[testing_bounds[0]:testing_bounds[1]] classifier = make_classifier(train_view, inv_idx, fwd_idx) return [classifier.classify(inst.weights) for inst in test_view]
def load_config(path, defaults_path='simulation.defaults.toml'): with open(defaults_path) as f: defaults = toml.load(f)['collimator'] with open(path) as f: config = toml.load(f) config.update(defaults) return config
def _load_configs(path) -> dict: confs = {} gui_confs = {} for item in os.listdir(path): if os.path.isdir(os.path.join(path, item)): name_short = "" try: with open(os.path.join(path, item, "config.toml")) as conffile: config = toml.load(conffile) name_short = config["name_short"] confs[name_short] = config except IOError: continue try: with open(os.path.join(path, item, "gui_config.toml")) as conffile: config = toml.load(conffile) gui_confs[name_short] = config except IOError: print("Failed to open GUI conf file.") continue return confs, gui_confs
def _main(): ap = argparse.ArgumentParser() ap.add_argument('-d', '--dir', action='append') ap.add_argument('testcase', nargs='*') args = ap.parse_args() if not args.dir: args.dir = [os.path.join(os.path.split(__file__)[0], 'toml-test/tests')] succeeded = [] failed = [] for path in args.dir: if not os.path.isdir(path): print('error: not a dir: {0}'.format(path)) return 2 for top, dirnames, fnames in os.walk(path): for fname in fnames: if not fname.endswith('.toml'): continue if args.testcase and not any(arg in fname for arg in args.testcase): continue parse_error = None try: with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin) except toml.TomlError: parsed = None parse_error = sys.exc_info() else: dumped = toml.dumps(parsed) parsed2 = toml.loads(dumped) if parsed != parsed2: failed.append((fname, None)) continue with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin, translate=_testbench_literal) try: with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: bench = json.load(fin) except IOError: bench = None if parsed != adjust_bench(bench): failed.append((fname, parsed, bench, parse_error)) else: succeeded.append(fname) for f, parsed, bench, e in failed: print('failed: {}\n{}\n{}'.format(f, json.dumps(parsed, indent=4), json.dumps(bench, indent=4))) if e: traceback.print_exception(*e) print('succeeded: {0}'.format(len(succeeded))) return 1 if failed or not succeeded else 0
def _main(): ap = argparse.ArgumentParser() ap.add_argument('-d', '--dir', action='append') ap.add_argument('testcase', nargs='*') args = ap.parse_args() if not args.dir: args.dir = [os.path.join(os.path.split(__file__)[0], 'toml-test/tests')] succeeded = [] failed = [] for path in args.dir: if not os.path.isdir(path): print('error: not a dir: {}'.format(path)) return 2 for top, dirnames, fnames in os.walk(path): for fname in fnames: if not fname.endswith('.toml'): continue if args.testcase and not any(arg in fname for arg in args.testcase): continue parse_error = None try: with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin) except toml.TomlError: parsed = None parse_error = sys.exc_info() else: dumped = toml.dumps(parsed) parsed2 = toml.loads(dumped) if parsed != parsed2: failed.append((fname, None)) continue with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin, translate=_testbench_literal) try: with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: bench = json.load(fin) except IOError: bench = None if parsed != adjust_bench(bench): failed.append((fname, parsed, bench, parse_error)) else: succeeded.append(fname) for f, parsed, bench, e in failed: print('failed: {}\n{}\n{}'.format(f, json.dumps(parsed, indent=4), json.dumps(bench, indent=4))) if e: traceback.print_exception(*e) print('succeeded: {}'.format(len(succeeded))) return 1 if failed or not succeeded else 0
async def main(): parser = argparse.ArgumentParser() parser.add_argument('--config', default='screenshots.toml') # parser.add_argument('scad') args = parser.parse_args() with open(args.config) as fp: shots = toml.load(fp)['shots'] with open('simulations.toml') as fp: simulations = toml.load(fp)['simulations'] for sim in simulations: d = os.path.abspath(os.path.join('reports', sim['name'].replace(' - ', '-').replace(' ', '-'))) scad_path = os.path.join(d, 'collimator.scad') await make_screenshots(shots, scad_path)
def parse_settings_toml(config_filepath, defaults=None): """ Grabs key/value pairs from a TOML format config file, returns an argparse options-style data container object with the config. A dictionary can be provided with defaults, which any option specified in the config file will override. (The defaults dictionary might can be generated via passing a pre-configured argparse.ArgumentParser into get_parser_defaults) :param config_filepath: The path to a TOML config file. :type config_filepath: basestring :param defaults: A dictionary of defaults :type defaults: dict :return: A data container object of config options. :rtype: object """ if defaults is None: defaults = dict() else: # Copy defaults = dict(defaults) with open(config_filepath, 'r') as f: config = toml.load(f) config = recursive_dict_merge(defaults, config) config = AttrDict(config) return config
def process_files(filenames): features = {} errors = [] for filename in filenames: try: with open(filename, "r") as f: feature_data = pytoml.load(f) voluptuous.humanize.validate_with_humanized_errors( feature_data, feature_schema ) for feature_id, feature in feature_data.items(): feature["id"] = feature_id features[feature_id] = expand_feature(feature) except (voluptuous.error.Error, IOError, FeatureGateException) as e: # Wrap errors in enough information to know which file they came from errors.append(FeatureGateException(e, filename)) except pytoml.TomlError as e: # Toml errors have file information already errors.append(e) if errors: raise ExceptionGroup(errors) return features
def load_rule_files(verbose=True, paths=None): """Load the rule YAML files, but without parsing the EQL query portion.""" file_lookup = {} # type: dict[str, dict] if verbose: print("Loading rules from {}".format(RULES_DIR)) if paths is None: paths = sorted( glob.glob(os.path.join(RULES_DIR, '**', '*.toml'), recursive=True)) for rule_file in paths: try: # use pytoml instead of toml because of annoying bugs # https://github.com/uiri/toml/issues/152 # might also be worth looking at https://github.com/sdispater/tomlkit with io.open(rule_file, "r", encoding="utf-8") as f: file_lookup[rule_file] = pytoml.load(f) except Exception: print(u"Error loading {}".format(rule_file)) raise if verbose: print("Loaded {} rules".format(len(file_lookup))) return file_lookup
def _load_local_version_data() -> dict: if os.path.isfile(path_finder.get_local_version_data_path()) != True: _create_dummy_version_data_file() with open(path_finder.get_local_version_data_path()) as local_version_data_file: data_as_dict = toml.load(local_version_data_file) return data_as_dict
def get_data(hostname): # parse config file_name = "/usr/local/dstat/config.toml" with open(file_name, 'rb') as fin: obj = toml.load(fin) print obj # connect to database try: database = obj['database'] username = database['user'] password = database['password'] host = database['host'] conn_str = "dbname='stats' user="******" host=" + host + " password="******"Unexpected error:", sys.exc_info()[0] # get cursor cur = conn.cursor() # get and print rows now = datetime.datetime.now() d = now.strftime("%Y-%m-%d") types = ["cpu", "memory", "disk"] data = {} for t in types: query = "SELECT * from metrics WHERE datetime ::date >= to_date('{}' ,'YYYY-MM-DD') and datetime::date <= to_date('{}','YYYY-MM-DD') AND hostname = '{}' and type = '{}'".format( d, d, hostname, t) cur.execute(query) rows = cur.fetchall() data[t] = rows return data
def read_conf(folder: Path) -> Dict[str, Any]: conf_path = folder / CONF_NAME if not conf_path.exists(): return {} with open(conf_path, "rb") as fd: return toml.load(fd)[_SECTION]
def persistent_loop(f, path, initialstate=()): def load(): from pickle import load as _load with open(path, 'rb') as f: return _load(f) def save(st): from pickle import dump from os import rename tmppath = path + '.tmp' with open(tmppath, 'wb') as f: dump(st, f) rename(tmppath, path) if not exists(path): save(initialstate) while True: state = load() if isinstance(state, EndLoop): return state.value state = f(*state) # Support returning just EndLoop (without instantiation) if state is EndLoop: state = EndLoop(None) save(state)
def main(): ''' Run test ''' config_filename = "load.toml" if len(sys.argv) > 1: config_filename = sys.argv[1] filename_components = config_filename.split('.') if len(filename_components) > 1: extension = filename_components[-1] with open(config_filename, 'rb') as fin: if extension == 'toml': config = pytoml.load(fin) elif extension in ['yaml', 'yml']: config = yaml.load(fin) elif extension == 'json': config = json.load(fin) else: print("Config file has unsupported format: %s" % extension) else: print( "Config file should have one of the following extensions:" " .toml, .json, .yaml") return 1 init_logging() lt = LoadTest(config) lt.run_test()
def parseTomlModule(modulefile): modules = list() with open(modulefile, 'r') as f: content = toml.load(f) for name in content['module']: fields = content['module'][name] mod = Module(name, modulefile) for field in fields: if field.endswith('files'): mod.files[field.upper()] = set(fields[field]) elif field == 'copy': mod.copyfiles = set(fields['copy']) elif field == 'requires': mod.requires = set(fields['requires']) elif field == 'provides': mod.provides = set(fields['provides']) elif field == 'modules': mod.modules = set(fields['modules']) elif field == 'destdir': mod.destdir = fields['destdir'] elif field == 'makefile_head': mod.makefile_head = fields['makefile_head'] elif field == 'makefile_body': mod.makefile_body = fields['makefile_body'] else: mod.extra[field] = fields[field] #mod.provides.add(name) # should not require specific modules, just include them directly! mod.init() modules.append(mod) return modules
def from_serialized(cls, config_path, result_to_extract=None): """`result_to_extract` is a total lazy hack for the training tool.""" import pytoml with Path(config_path).open('rt') as f: info = pytoml.load(f) inst = cls() inst.pmaps = [] inst.rmaps = [] extracted_info = None for subinfo in info['params']: inst.pmaps.append(mapping_from_dict(subinfo)) for i, subinfo in enumerate(info['results']): if result_to_extract is not None and subinfo[ 'name'] == result_to_extract: extracted_info = subinfo extracted_info['_index'] = i inst.rmaps.append(mapping_from_dict(subinfo)) inst.n_params = len(inst.pmaps) inst.n_results = len(inst.rmaps) if result_to_extract is not None: return inst, extracted_info return inst
def load_casefile(path): result = {} with open(path) as casefile: cases = pytoml.load(casefile) for case_name, case_data in cases.items(): # print(case_name) # print(case_data) # print('*************' * 5) """ 11clincomplete_body { 'data': 'POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\nI', 'method': 'POST', 'path': '/', 'version': '1.1', 'headers': {'Content-Length': '5'}, 'error': 'incomplete_body' } """ case_data['data'] = case_data['data'].encode('utf-8') case_data['body'] = case_data['body'].encode( 'utf-8') if 'body' in case_data else None case = HttpTestCase._make( case_data.get(f) for f in testcase_fields.split(',')) result[case_name] = case return result
def compare_formatted(self, data, callback=None, kwargs=None): """Compare formatted vs expected.""" try: toml_write(copy.deepcopy(data), tmp_file) with open(tmp_file, 'r') as f: formatted_contents = pytoml.load(f) # callbacks such as nested normalize leave in line breaks, so this must be manually done query = data.get('rule', {}).get('query') if query: data['rule']['query'] = query.strip() original = json.dumps(copy.deepcopy(data), sort_keys=True) if callback: kwargs = kwargs or {} formatted_contents = callback(formatted_contents, **kwargs) # callbacks such as nested normalize leave in line breaks, so this must be manually done query = formatted_contents.get('rule', {}).get('query') if query: formatted_contents['rule']['query'] = query.strip() formatted = json.dumps(formatted_contents, sort_keys=True) self.assertEqual(original, formatted, 'Formatting may be modifying contents') finally: os.remove(tmp_file)
def iter_code_lessons(path='.', unit_glob='unit-*', grep=None, units=None, lesson_glob='lesson-*', rmotr_toml_name='.rmotr'): grep = grep or [] p = Path(path) for unit in p.glob(unit_glob): unit_number = _get_unit_number(unit.name) if units and unit_number not in units: continue for lesson_path in unit.glob(lesson_glob): rmotr_toml = lesson_path / rmotr_toml_name if not rmotr_toml.exists(): raise InvalidLessonException( ("Lessons must contain a .rmotr file. " "Lesson {} doesn't contain any").format(lesson_path.name)) with rmotr_toml.open() as rmotr_f: lesson_data = toml.load(rmotr_f) lesson_name = lesson_data['name'] if lesson_data['type'] == 'assignment': add_lesson = True for keyword in grep: if keyword not in lesson_name.lower(): add_lesson = False break if not add_lesson: continue logger.info("Added lesson", extra={'lesson_name': lesson_name}) yield Lesson(name=lesson_name, uuid=lesson_data['uuid'], path=lesson_path, _lesson_data=lesson_data)
def update_agents(self): """ this method used to update agents list with the new data """ LOG.debug('updating agents') agents = [] for path in self.get_file_paths(): try: with open(path, 'rb') as f: toml = pytoml.load(f) escalation = toml['escalation'][0] agent = Agent( name="{} {}".format(toml['first_name'], toml['last_name']), telegram=toml['telegram'].strip("@"), backup=escalation['backup'], backup_time=escalation['backup_time'], escalation_path=escalation['escalation_path'], working_period=escalation['period'], exclude_period=escalation['exclude'], reports_into=toml['reports_into'], ) agents.append(agent) except: LOG.error("Can not load toml file at {}".format(path)) self.agents = agents
def load_config(path): """ Load config file and return configuration parameters dict. """ path = get_full_path(path) file_extension = os.path.splitext(path)[-1] config = DEFAULT_CONFIG if not os.path.exists(path): warning_message = """The "{file}" does not exist! The default config will be used.""".format(file=path) logging.warning(warning_message) return config with open(path, 'r') as config_file: if file_extension == ".json": config = json.load(config_file) elif file_extension == ".toml": config = pytoml.load(config_file) else: warning_message = """The "{file}" does not match the format of toml in json! The default config will be used.""".format(file=path) logging.warning(warning_message) return config
def main(): ''' Run test ''' config_filename = "load.yaml" if len(sys.argv) > 1: config_filename = sys.argv[1] filename_components = config_filename.split('.') if len(filename_components) > 1: extension = filename_components[-1] with open(config_filename, 'rb') as fin: if extension == 'toml': config = pytoml.load(fin) elif extension in ['yaml', 'yml']: config = yaml.load(fin) elif extension == 'json': config = json.load(fin) else: print("Config file has unsupported format: %s" % extension) else: print("Config file should have one of the following extensions:" " .toml, .json, .yaml") return 1 init_logging() lt = LoadTest(config) lt.run_test()
def setUp(self): conf_path = os.path.join(os.path.dirname(__file__), 'common_unittest.toml') with open(conf_path, 'rb') as config: config = pytoml.load(config) conf = get_config(config) log.init_log(conf, logging.DEBUG)
def parse_toml(self, file): with open(file, 'rb') as fi: conf = toml.load(fi) print(conf) masker_conf = conf['mask_loader'] masker_conf['mode'] = masker_conf.get('mode', 'NN') masker_conf['use_gpu'] = masker_conf.get('use_gpu', True) masker_conf['nn_weight_file'] = masker_conf.get( 'nn_weight_file', 'None') net_conf = conf['network'] net_conf['model'] = net_conf.get('model', 'Wide_ResNet') net_conf['model_saved_dir'] = net_conf.get('model_saved_dir', 'model') net_conf['model_save_steps'] = net_conf.get( 'model_save_steps', 500) train_conf = conf['train'] train_conf['learning_rate'] = train_conf.get('learning_rate', 0.01) train_conf['tensorboard_folder'] = train_conf.get( 'tensorboard_folder', 'runs/exp1') data_aug_conf = conf['data_augmentation'] data_aug_conf['transform'] = data_aug_conf.get( 'transform', ['vflip', 'hflip', 'rot90']) self.data_aug_conf = data_aug_conf self.masker_conf = masker_conf self.net_conf = net_conf self.dataset_conf = conf['dataset'] self.train_conf = train_conf self.conf = conf
def get_available_encounters(self): available_encounter_files = glob.glob(f"{self.base_dir}/*.toml") encounters = [ Encounter(**toml.load(open(filename, 'r'))) for filename in sorted(available_encounter_files) ] return encounters
def loadConfigs(self): config_path = os.path.join(self.target.git.workdir, "config.toml") repo_configs = toml.load(open(config_path))["repo"] self.repos = [ SourceRepository(config, root=self.source) for config in repo_configs ]
def load(cls) -> 'Config': with default_file.open() as f: config = pytoml.load(f) user_config = cls._load_user_config() if user_config: config.update(user_config) return Config(config)
def __init__(self): data_config = os.path.join(os.path.dirname(__file__), 'config.toml') with open(data_config, "rb") as file: self.config = toml.load(file) if self.config: self.k_path = os.path.join(self.config['config']['path'], self.config['config']['k']) self.profile_path = os.path.join(self.config['config']['path'], self.config['config']['profile']) self.tick_path = os.path.join(self.config['config']['path'], self.config['config']['tick']) if not os.path.exists(self.k_path): os.mkdir(self.k_path) if not os.path.exists(self.profile_path): os.mkdir(self.profile_path) if not os.path.exists(self.tick_path): os.mkdir(self.tick_path) # config = { 'db': 'fregata', 'user': '******', 'passwd': 'root', 'host': '127.0.0.1', 'port': 3306 } db = MySQLdb.connect(**config) self.db = db
def parse_config(path: str) -> Dict[str, Any]: if not os.path.isfile(path): log.critical("parse_config: config %s is not a file", path) raise SystemExit(1) with open(path) as handle: return dict(pytoml.load(handle))
def loadConfigs(): global mqttServer, mqttPort, siteId, hotwordId if os.path.isfile(SNIPS_CONFIG_PATH): with open(SNIPS_CONFIG_PATH) as confFile: configs = pytoml.load(confFile) if 'mqtt' in configs['snips-common']: if ':' in configs['snips-common']['mqtt']: mqttServer = configs['snips-common']['mqtt'].split(':')[0] mqttPort = int( configs['snips-common']['mqtt'].split(':')[1]) elif '@' in configs['snips-common']['mqtt']: mqttServer = configs['snips-common']['mqtt'].split('@')[0] mqttPort = int( configs['snips-common']['mqtt'].split('@')[1]) if 'bind' in configs['snips-audio-server']: if ':' in configs['snips-audio-server']['bind']: siteId = configs['snips-audio-server']['bind'].split( ':')[0] elif '@' in configs['snips-audio-server']['bind']: siteId = configs['snips-audio-server']['bind'].split( '@')[0] if 'hotword_id' in configs['snips-hotword']: hotwordId = configs['snips-hotword']['hotword_id'] else: logger.warning('Snips configs not found')
def read_config_file(args): filename = None if args.config: filename = args.config if not os.path.exists(filename): raise ConfigError('%s' % filename, [], 'file not found') else: env_config = 'PUBLIC_WRAPPERS_CONFIG' attempt_files = ([os.environ[env_config]] if env_config in os.environ else []) + [ os.path.expanduser('~/.public-wrappers.toml'), '/etc/public-wrappers.toml', ] for attempt in attempt_files: if os.path.exists(attempt): filename = attempt break if filename is None: raise ConfigError('', [], 'none of %s found' % ', '.join(attempt_files)) with open(filename, 'rb') as f: try: obj = toml.load(f) except toml.TomlError as e: raise ConfigError(filename, [], 'TOML error at line %d' % e.line) return ConfigObject(filename, [], obj)
def load_user_config_file(config_filepath) -> Dict[str, Any]: if not os.path.isfile(config_filepath): print(f"No config file {config_filepath}", file=sys.stderr) exit(1) with open(config_filepath) as config_file: return toml.load(config_file)
def test_init(): responses = [ 'foo', # Module name 'Test Author', # Author '*****@*****.**', # Author email 'http://example.com/', # Home page '1' # License (1 -> MIT) ] with TemporaryDirectory() as td, \ patch_data_dir(), \ faking_input(responses): ti = init.TerminalIniter(td) ti.initialise() generated = Path(td) / 'pyproject.toml' assert_isfile(generated) with generated.open() as f: data = pytoml.load(f) assert data['tool']['flit']['metadata'][ 'author-email'] == "*****@*****.**" license = Path(td) / 'LICENSE' assert_isfile(license) with license.open() as f: license_text = f.read() assert license_text.startswith("The MIT License (MIT)") assert "{year}" not in license_text assert "Test Author" in license_text
def parse_toml(self, file): with open(file, 'rb') as fi: conf = toml.load(fi) print (conf) masker_conf = conf['mask_loader'] masker_conf['mode'] = masker_conf.get('mode', 'NN') masker_conf['nn_model'] = masker_conf.get('nn_model', 'MDUnetDilat') masker_conf['use_gpu'] = masker_conf.get('use_gpu', True) # masker_conf['nn_weight_file'] = masker_conf.get('nn_weight_file', 'None') net_conf = conf['network'] net_conf['model'] = net_conf.get('model', 'DUnet') net_conf['first_out_ch']= net_conf.get('first_out_ch',32) net_conf['model_saved_dir'] = net_conf.get('model_saved_dir', 'model') net_conf['load_train_iter'] = net_conf.get('load_train_iter', None) net_conf['model_save_steps'] = net_conf.get('model_save_steps', 500) net_conf['patch_size'] = net_conf.get('patch_size', [320, 320, 3]) # net_conf['trained_file'] = masker_conf.get('trained_file', 'None') # net_conf['learning_rate'] = net_conf.get('learning_rate',0.01) train_conf = conf['train'] train_conf['learning_rate'] = train_conf.get('learning_rate', 0.01) train_conf['tensorboard_folder'] = train_conf.get('tensorboard_folder', 'runs/exp1') data_aug_conf = conf['data_augmentation'] data_aug_conf['transform'] = data_aug_conf.get('transform', ['vflip', 'hflip', 'rot90']) self.data_aug_conf = data_aug_conf self.masker_conf = masker_conf self.net_conf = net_conf self.dataset_conf = conf['dataset'] self.train_conf = train_conf self.conf = conf
def Parse(self, path = 'COMAKE'): try: with codecs.open(path, 'r', 'utf-8') as f: comake = toml.load(f) except toml.TomlError as e: print RedIt("[ERROR] {} load failed".format(path)) raise InvalidComake("COMAKE has something wrong") else: if 'output' in comake.keys(): size = len(comake['output']) for i in range(size): sources_set = _parsePath(comake['output'][i]['sources']) comake['output'][i]['sources'] = ' '.join(sources_set) headers_set = _parsePath(comake['output'][i]['headers']) comake['output'][i]['headers'] = ' '.join(headers_set) comake['output'][i]['a'] = comake['output'][i]['a'].strip() comake['output'][i]['so'] = comake['output'][i]['so'].strip() self.total_sources.update(sources_set) self.total_headers.update(headers_set) comake['total_sources'] = ' '.join(self.total_sources) comake['total_headers'] = ' '.join(self.total_headers) comake['include_path'] = ' '.join(['-I' + s for s in comake['include_path'].split()]) comake['library_path'] = ' '.join(['-L' + s for s in comake['library_path'].split()]) self.comake = comake if 'use_local_makefile' not in self.comake.keys(): self.comake['use_local_makefile'] = 0 if 'use_local_copy' not in self.comake.keys(): self.comake['use_local_copy'] = 1 self._parseDepPath() return self.comake
def read_params(self, user=None, cid=None): u'''Read the TOML file and return as a dictionary''' if user is None or cid is None: sim_dir = self.appdir else: sim_dir = os.path.join(user_dir, user, self.appname, cid) file_name = os.path.join(sim_dir, self.simfn) if not os.path.isfile(file_name): print u'ERROR: input file does not exist: {}'.format(file_name) params = {} blockmap = {} blockorder = [] with open(file_name, u'r') as f: toml_dict = toml.load(f) blockorder = toml_dict.keys() for k, v in toml_dict.iteritems(): blockmap[k] = v.keys() params.update(v) return params, blockmap, blockorder
def main(options, args): with open(options.config, "rb") as file: toml = pytoml.load(file) if not toml: print("Bad toml file") exit() perform(toml, options)
def _main(): succeeded = [] failed = [] for top, dirnames, fnames in os.walk('.'): for fname in fnames: if not fname.endswith('.toml'): continue if sys.argv[1:] and not any(arg in fname for arg in sys.argv[1:]): continue parse_error = None try: with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin) except toml.TomlError: parsed = None parse_error = sys.exc_info() else: dumped = toml.dumps(parsed) parsed2 = toml.loads(dumped) if parsed != parsed2: failed.append((fname, None)) continue with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin, translate=_testbench_literal) try: with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: bench = json.load(fin) except IOError: bench = None if parsed != bench: failed.append((fname, parsed, bench, parse_error)) else: succeeded.append(fname) for f, parsed, bench, e in failed: print('failed: {}\n{}\n{}'.format(f, json.dumps(parsed, indent=4), json.dumps(bench, indent=4))) if e: traceback.print_exception(*e) print('succeeded: {}'.format(len(succeeded))) return 1 if failed else 0
def load_settings_table(path): """Loads settings table into dict""" try: table_open_object = open(path, 'r') except FileNotFoundError: return None table = pytoml.load(table_open_object) return table
def main(argv): with open("Builder.toml", "r") as fin: obj = toml.load(fin) project = parse(obj) transform(project, "Ninja") if len(argv) > 1: sys.exit(0) build("Ninja")
def generate(file): ''' generates json from a toml ''' input_file = file output_file = input_file.replace('.toml', '.json') with open(input_file, 'r') as tom, open(output_file, 'w') as son: tom_tree = toml.load(tom) son.write(json.dumps(tom_tree, indent=4, sort_keys=True))
def parse_config(path): if path is None or not os.path.exists(path): return {} try: with io.open(path, 'r', encoding='utf-8') as stream: return pytoml.load(stream)['general'] except IOError as error: raise click.ClickException(error)
def _load_config(cls): if cls._config is None: cfg_path = os.path.expanduser('~/.cliist.toml') if not os.path.isfile(cfg_path): msg = "Configuration not found! Please run 'cliist configure'!" raise ConfigurationError(msg) with open(cfg_path) as fs: cls._config = pytoml.load(fs) return cls._config
def load_blog(path): from overviewer.models import BlogPost import pytoml with open(path) as f: dat = pytoml.load(f) for p in dat['posts']: db.session.add(BlogPost(**p)) print('adding', p['title']) db.session.commit()
def parse_config(): with open('config.toml', 'rb') as fin: obj = pytoml.load(fin) if not obj['key']: raise Exception('Key must be defined in config file') if not 'start' in obj: obj['start'] = 0 if not 'end' in obj: obj['end'] = 10000 return obj
def from_file(cls, file): """Load settings from the given ``file`` and instantiate an :class:`Configuration` instance from that. :param file: the file object that contains TOML settings :return: an instantiated configuration :rtype: :class:`Configuration` """ return cls(load(file))
def loadConfig(stages=[], configDir="./"): for stage_name in stages: configPath = os.path.join(configDir, "%s.toml" % (stage_name)) with open(configPath, 'rb') as fin: config = toml.load(fin) dump = json.dumps(config) json_str = os.path.expandvars(dump) config = json.loads(json_str) env.roledefs[stage_name] = config
def __init__(self, config_filename, event_loop): self.event_loop = event_loop with open(config_filename, 'rb') as fin: self.config = pytoml.load(fin) self.factories = { 'schedule': ScheduleFactory(self), 'ammo': AmmoFactory(self), 'gun': GunFactory(self), 'bfg': BFGFactory(self), 'aggregator': AggregatorFactory(self), }
def get(): global _CONFIG if _CONFIG is not None: return _CONFIG c_path = _config_fpath() if not os.path.isfile(c_path): _CONFIG = {} else: with open(c_path, 'r') as fs: _CONFIG = pytoml.load(fs) return _CONFIG
def __init__(self, f, **kwargs): super(Settings, self).__init__() d = toml.load(f) # Ordering is import to consistently build a hash for caching self._schema = OrderedDict(sorted(d.items(), key=lambda t: t[0])) self._aliases = self._index_aliases() self._user_settings = {} # Self-validate self.validate() # Add any user settings self.update(kwargs)
def read_config(configfile): """ Parses the config file. Parameters: config - path to config file Returns: a dictionary with config options """ with open(configfile, 'rb') as fin: return toml.load(fin)
def simulator_face(cfgfile): global config cwd = os.path.dirname(os.path.realpath(__file__)) f = os.path.join(cwd, cfgfile + '.cfg') if (os.path.isfile(f)): with open(f, 'rb') as f: config = toml.load(f) config['datadir'] = os.path.join(cwd, 'data', cfgfile) else: print("not found config file [%s], simulator not changed" % f)
def _main(): succeeded = [] failed = [] for top, dirnames, fnames in os.walk('.'): for fname in fnames: if not fname.endswith('.toml'): continue try: with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin) except toml.TomlError: parsed = None else: dumped = toml.dumps(parsed) parsed2 = toml.loads(dumped) if parsed != parsed2: failed.append(fname) continue with open(os.path.join(top, fname), 'rb') as fin: parsed = toml.load(fin, _testbench_literal, _testbench_array) try: with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: bench = json.load(fin) except IOError: bench = None if parsed != bench: failed.append(fname) else: succeeded.append(fname) for f in failed: print('failed: {}'.format(f)) print('succeeded: {}'.format(len(succeeded))) return 1 if failed else 0
def _extract_info_from_package(dependency, extract_type=None, debug=False, include_build_requirements=False ): """ Internal function to extract metainfo from a package. Currently supported info types: - name - dependencies (a list of dependencies) """ output_folder = tempfile.mkdtemp(prefix="pythonpackage-metafolder-") try: extract_metainfo_files_from_package( dependency, output_folder, debug=debug ) with open(os.path.join(output_folder, "METADATA"), "r", encoding="utf-8" ) as f: # Get metadata and cut away description (is after 2 linebreaks) metadata_entries = f.read().partition("\n\n")[0].splitlines() if extract_type == "name": name = None for meta_entry in metadata_entries: if meta_entry.lower().startswith("name:"): return meta_entry.partition(":")[2].strip() if name is None: raise ValueError("failed to obtain package name") return name elif extract_type == "dependencies": requirements = [] if os.path.exists(os.path.join(output_folder, 'pyproject.toml') ) and include_build_requirements: with open(os.path.join(output_folder, 'pyproject.toml')) as f: build_sys = pytoml.load(f)['build-system'] if "requires" in build_sys: requirements += build_sys["requires"] # Add requirements from metadata: requirements += [ entry.rpartition("Requires-Dist:")[2].strip() for entry in metadata_entries if entry.startswith("Requires-Dist") ] return list(set(requirements)) # remove duplicates finally: shutil.rmtree(output_folder)
def _AddWheel(env, tomlfile, pyver='36'): import enscons import pytoml with open(File(tomlfile).srcnode().abspath) as fp: metadata = pytoml.load(fp)['tool']['enscons'] name = metadata['name'] version = metadata['version'] # obtain wheel tag using specified python version wmod = 'wheel' if pyver.startswith('2') else 'setuptools' exe = 'python%s' % '.'.join(pyver) tag = subprocess.check_output([exe, '-c', 'import %s.pep425tags as wp; tags=wp.get_supported(); best=[t for t in tags if "manylinux" not in "".join(t)][0]; print("-".join(best))' % wmod]).strip() # set things up for enscons. env.Replace( PACKAGE_NAME = name, PACKAGE_NAME_SAFE = name, PACKAGE_VERSION = version, PACKAGE_METADATA = metadata, WHEEL_TAG = tag, ROOT_IS_PURELIB = False, WHEEL_BASE = 'dist', DIST_BASE = 'dist', ) env.Append(WHEEL_PYVER=[pyver]) wheel_meta = enscons.init_wheel(env) wheel_targets = list() for category, elems in _wheel_targets.items(): if category == 'platlib': target_dir = env['WHEEL_PATH'].get_path() else: target_dir = env['WHEEL_DATA_PATH'].Dir(category).get_path() for targets, prefix in elems: for tgts in targets: for node in env.arg2nodes(tgts): relpath = os.path.relpath(node.get_path(), prefix) args = (os.path.join(target_dir, relpath), node) wheel_targets.append(env.InstallAs(*args)) whl = env.Zip( target = env['WHEEL_FILE'], source = wheel_meta + wheel_targets, ZIPROOT = env['WHEEL_PATH']) env.AddPostAction(whl, Action(enscons.add_manifest)) if env.get('PREFIX'): out = env.Install('$PREFIX/dist/wheel', whl) env.Alias('install', out) return whl
def flags_changed(self, ext): toml = self.build_toml(ext.name) if not os.path.exists(toml): return True with open(toml) as f: flags = pytoml.load(f) ext_flags = { "extra_compile_args": ext.extra_compile_args, "extra_link_args": ext.extra_link_args, "define_macros": dict(ext.define_macros), "sources": ext.sources} return flags != ext_flags
def load_config(name): base = os.path.dirname(os.path.realpath(__file__)) fmt = '{base}{sep}environment{sep}{name}.cfg' config_file_path = fmt.format(sep=os.sep, base=base, name=name) try: with open(config_file_path, 'rb') as fin: config = toml.load(fin) return config except: print("ERROR: Did you remember to generate config files with credstmpl?", file=sys.stderr) print("Check out credstmpl at https://github.com/qadium/credstmpl", file=sys.stderr) print("you'll need to run `credstmpl filename.extension.j2`", file=sys.stderr)