def config(file_params='database.ini', section='postgresql'): parser = CP() parser.read(file_params) db_prms={} if parser.has_section(section): params = parser.items(section) for key,val in params: db_prms[key] = val return db_prms
def add_library(self, library): from fusesoc.provider import get_provider if not hasattr(self, "_path"): raise RuntimeError("No FuseSoC config file found - can't add library") section_name = "library." + library.name config = CP() config.read(self._path) if section_name in config.sections(): logger.warning( "Not adding library. {} already exists in configuration file".format( library.name ) ) return config.add_section(section_name) config.set(section_name, "location", library.location) if library.sync_type: config.set(section_name, "sync-uri", library.sync_uri) if library.sync_version is not None: config.set(section_name, "sync-version", library.sync_version) config.set(section_name, "sync-type", library.sync_type) _auto_sync = "true" if library.auto_sync else "false" config.set(section_name, "auto-sync", _auto_sync) try: provider = get_provider(library.sync_type) except ImportError as e: raise RuntimeError("Invalid sync-type '{}'".format(library["sync-type"])) provider.init_library(library) with open(self._path, "w") as conf_file: config.write(conf_file)
def main(): ap = AP() ap.add_argument('--dir', type=str, default='testnet_tmp') ap.add_argument('--svc', type=int, default=20, help='number of service nodes') ap.add_argument('--baseport', type=int, default=19000) ap.add_argument('--clients', type=int, default=200, help='number of client nodes') ap.add_argument('--bin', type=str, required=True) ap.add_argument('--out', type=str, required=True) ap.add_argument('--connect', type=int, default=10) args = ap.parse_args() basedir = os.path.abspath(args.dir) for nodeid in range(args.svc): config = CP() config['bind'] = { 'lo': str(args.baseport + nodeid) } config['netdb'] = { 'dir': 'netdb' } config['connect'] = {} for otherid in range(args.svc): if otherid != nodeid: name = svcNodeName(otherid) config['connect'][name] = os.path.join( basedir, name, 'rc.signed') d = os.path.join(args.dir, svcNodeName(nodeid)) if not os.path.exists(d): os.mkdir(d) fp = os.path.join(d, 'daemon.ini') with open(fp, 'w') as f: config.write(f) for nodeid in range(args.clients): config = CP() config['netdb'] = { 'dir': 'netdb' } config['connect'] = {} for otherid in range(args.connect): otherid = (nodeid + otherid) % args.svc name = svcNodeName(otherid) config['connect'][name] = os.path.join( basedir, name, 'rc.signed') d = os.path.join(args.dir, clientNodeName(nodeid)) if not os.path.exists(d): os.mkdir(d) fp = os.path.join(d, 'daemon.ini') with open(fp, 'w') as f: config.write(f) with open(args.out, 'w') as f: f.write('''[program:svc-node] directory = {} command = {} redirect_stderr=true stdout_logfile=/dev/fd/1 stdout_logfile_maxbytes=0 process_name = svc-node-%(process_num)03d numprocs = {} '''.format(os.path.join(args.dir, 'svc-node-%(process_num)03d'), args.bin, args.svc)) f.write('''[program:client-node] directory = {} command = {} redirect_stderr=true stdout_logfile=/dev/fd/1 stdout_logfile_maxbytes=0 process_name = client-node-%(process_num)03d numprocs = {} '''.format(os.path.join(args.dir, 'client-node-%(process_num)03d'), args.bin, args.clients)) f.write('[supervisord]\ndirectory=.\n')
def __init__(self, conf_path): self.fpath = conf_path # 配置文件路径,要求是绝对路径 self.cf = CP() # ConfigParser对象实例 self.cf.read(self.fpath) # 一启动就读取配置文件
def __init__(self, path=None, file=None): self.build_root = None self.cache_root = None cores_root = [] systems_root = [] self.library_root = None self.libraries = [] config = CP() if file is None: if path is None: xdg_config_home = os.environ.get( "XDG_CONFIG_HOME") or os.path.join(os.path.expanduser("~"), ".config") config_files = [ "/etc/fusesoc/fusesoc.conf", os.path.join(xdg_config_home, "fusesoc", "fusesoc.conf"), "fusesoc.conf", ] else: logger.debug(f"Using config file '{path}'") if not os.path.isfile(path): with open(path, "a"): pass config_files = [path] logger.debug("Looking for config files from " + ":".join(config_files)) files_read = config.read(config_files) logger.debug("Found config files in " + ":".join(files_read)) if files_read: self._path = files_read[-1] else: logger.debug("Using supplied config file") config.read_file(file) file.seek(0) self._path = file.name for item in [ "build_root", "cache_root", "systems_root", "library_root" ]: try: setattr(self, item, os.path.expanduser(config.get("main", item))) if item == "systems_root": systems_root = [ os.path.expanduser(config.get("main", item)) ] logger.warning( "The systems_root option in fusesoc.conf is deprecated. Please migrate to libraries instead" ) except configparser.NoOptionError: pass except configparser.NoSectionError: pass try: cores_root = config.get("main", "cores_root").split() logger.warning( "The cores_root option in fusesoc.conf is deprecated. Please migrate to libraries instead" ) except configparser.NoOptionError: pass except configparser.NoSectionError: pass # Set fallback values if self.build_root is None: self.build_root = os.path.abspath("build") if self.cache_root is None: xdg_cache_home = os.environ.get("XDG_CACHE_HOME") or os.path.join( os.path.expanduser("~"), ".cache") self.cache_root = os.path.join(xdg_cache_home, "fusesoc") os.makedirs(self.cache_root, exist_ok=True) if not cores_root and os.path.exists("cores"): cores_root = [os.path.abspath("cores")] if (not systems_root) and os.path.exists("systems"): systems_root = [os.path.abspath("systems")] if self.library_root is None: xdg_data_home = os.environ.get("XDG_DATA_HOME") or os.path.join( os.path.expanduser("~"), ".local/share") self.library_root = os.path.join(xdg_data_home, "fusesoc") # Parse library sections libraries = [] library_sections = [ x for x in config.sections() if x.startswith("library") ] for section in library_sections: name = section.partition(".")[2] try: location = config.get(section, "location") except configparser.NoOptionError: location = os.path.join(self.library_root, name) try: auto_sync = config.getboolean(section, "auto-sync") except configparser.NoOptionError: auto_sync = True except ValueError as e: _s = "Error parsing auto-sync '{}'. Ignoring library '{}'" logger.warning(_s.format(str(e), name)) continue try: sync_uri = config.get(section, "sync-uri") except configparser.NoOptionError: # sync-uri is absent for local libraries sync_uri = None try: sync_type = config.get(section, "sync-type") except configparser.NoOptionError: # sync-uri is absent for local libraries sync_type = None libraries.append( Library(name, location, sync_type, sync_uri, auto_sync)) # Get the environment variable for further cores env_cores_root = [] if os.getenv("FUSESOC_CORES"): env_cores_root = os.getenv("FUSESOC_CORES").split(":") env_cores_root.reverse() for root in cores_root + systems_root + env_cores_root: self.libraries.append(Library(root, root)) self.libraries += libraries logger.debug("cache_root=" + self.cache_root) logger.debug("library_root=" + self.library_root)
def __init__(self,_filename): self._filename = _filename self.Config = CP(allow_no_value=True) self.Config.optionxform = str self.Config.read(_filename,encoding='utf-8')
def config_parser() -> CP: cp = CP(allow_no_value=True) cp.optionxform = str return cp
def main(): ap = AP() ap.add_argument('--valgrind', type=bool, default=False) ap.add_argument('--dir', type=str, default='testnet_tmp') ap.add_argument('--svc', type=int, default=20, help='number of service nodes') ap.add_argument('--baseport', type=int, default=19000) ap.add_argument('--clients', type=int, default=200, help='number of client nodes') ap.add_argument('--bin', type=str, required=True) ap.add_argument('--out', type=str, required=True) ap.add_argument('--connect', type=int, default=10) ap.add_argument('--ip', type=str, default=None) ap.add_argument('--ifname', type=str, default='lo') ap.add_argument('--netid', type=str, default=None) args = ap.parse_args() if args.valgrind: exe = 'valgrind {}'.format(args.bin) else: exe = args.bin basedir = os.path.abspath(args.dir) for nodeid in range(args.svc): config = CP() config['router'] = { 'net-threads': '1', 'worker-threads': '4', 'nickname': svcNodeName(nodeid) } if args.netid: config['router']['netid'] = args.netid if args.ip: config['router']['public-ip'] = args.ip config['router']['public-port'] = str(args.baseport + nodeid) config['bind'] = {args.ifname: str(args.baseport + nodeid)} config['netdb'] = {'dir': 'netdb'} config['network'] = {'type': 'null'} d = os.path.join(args.dir, svcNodeName(nodeid)) if not os.path.exists(d): os.mkdir(d) fp = os.path.join(d, 'daemon.ini') with open(fp, 'w') as f: config.write(f) if nodeid > 0: f.write("[bootstrap]\nadd-node={}\n".format( os.path.join(basedir, svcNodeName(0), 'rc.signed'))) for nodeid in range(args.clients): config = CP() config['router'] = { 'net-threads': '1', 'worker-threads': '2', 'nickname': clientNodeName(nodeid) } if args.netid: config['router']['netid'] = args.netid config['netdb'] = {'dir': 'netdb'} config['network'] = {'type': 'null'} d = os.path.join(args.dir, clientNodeName(nodeid)) if not os.path.exists(d): os.mkdir(d) hiddenservice = os.path.join(d, 'service.ini') config['services'] = {'testnet': hiddenservice} fp = os.path.join(d, 'daemon.ini') with open(fp, 'w') as f: config.write(f) f.write("[bootstrap]\nadd-node={}\n".format( os.path.join(basedir, svcNodeName(0), 'rc.signed'))) with open(hiddenservice, 'w') as f: f.write('''[test-service] tag=test prefetch-tag=test type=null ''') with open(args.out, 'w') as f: f.write('''[program:svc-node] directory = {} command = {} daemon.ini autorestart=true redirect_stderr=true #stdout_logfile=/dev/fd/1 stdout_logfile={}/svc-node-%(process_num)03d-log.txt stdout_logfile_maxbytes=0 process_name = svc-node-%(process_num)03d numprocs = {} '''.format(os.path.join(args.dir, 'svc-node-%(process_num)03d'), exe, args.dir, args.svc)) f.write('''[program:client-node] directory = {} command = {} daemon.ini autorestart=true redirect_stderr=true #stdout_logfile=/dev/fd/1 stdout_logfile={}/client-node-%(process_num)03d-log.txt stdout_logfile_maxbytes=0 process_name = client-node-%(process_num)03d numprocs = {} '''.format(os.path.join(args.dir, 'client-node-%(process_num)03d'), exe, args.dir, args.clients)) f.write('[supervisord]\ndirectory=.\n')
from configparser import ConfigParser as CP cp = CP() cp.read("config.ini") SKIP = cp['Default'].getint('skip_frames') SHOW = cp['Default'].getboolean('show') detector_name = cp['Default']['detector_name'] tracker_name = cp['Default']['tracker_name'] # Creating the desired detector if detector_name == "FaceboxesTensorflow": from detectors.tensorflow_detectors import FaceboxesTensorflow DETECTOR = FaceboxesTensorflow(cp['FaceboxesTensorflow']['weights'], cp['FaceboxesTensorflow'].getfloat('score_threshold')) elif detector_name == "OpenCVHaar": from detectors.opencv_detectors import OpenCVHaarFaceDetector DETECTOR = OpenCVHaarFaceDetector(model_path=cp["OpenCVHaarCascades"]["path"]) elif detector_name == "DlibHog": from detectors.dlib_detectors import DlibHogDetector DETECTOR = DlibHogDetector() else: print("Invalid Detector") exit(0) # Creating desired tracker if tracker_name == "MEDIANFLOW": from trackers.opencv_trackers import OpenCVMultitrackerMedianFlow TRACKER = OpenCVMultitrackerMedianFlow() elif tracker_name == "BOOSTING": from trackers.opencv_trackers import OpenCVMultitrackerBoosting TRACKER = OpenCVMultitrackerBoosting()