def test_hierarchical_configparser6(): fname = MYDIR + "/hcf_test/child/hcf_file6.ini" hcf = HierarchicalConfigParser(debug=True) hcf.read(fname) print("and we got:") hcf.write(sys.stdout) hcf.explain() # what was originally in child/hcf_file4: assert hcf['level']['location'] == 'child' # child has a section [file5] that includes ../../hcf_file5.ini which has a file5 section assert hcf['file5']['special'] == '10' # child [default] includes ../hcf_file6.ini: assert hcf['parent']['children'] == '1' # ../hcf_file6.ini has a DEFAULT include to ../hcf_file5.ini: assert hcf['b']['filename'] == 'hcf_file5' # ../hcf_file5.ini has a DEFAULT include to hcf_file6.ini assert hcf['a']['filename'] == 'hcf_file6' assert hcf['c']['filename'] == 'hcf_file6' assert hcf['d']['filename'] == 'hcf_file6' print("Explaination:") hcf.explain(sys.stdout)
def test_hierarchical_configparser1(): hcf = HierarchicalConfigParser() hcf.read(MYDIR + "/hcf_file2.ini") assert sorted(list(hcf.sections())) == ['a', 'b', 'c'] assert hcf['a']['color'] == 'file2-a' assert hcf['a']['second'] == 'file2-a' assert hcf['b']['color'] == 'file2-b' assert hcf['b']['second'] == 'file2-b' assert hcf['c']['color'] == 'file2-c' assert hcf['c']['second'] == 'file2-c'
def test_hierarchical_configparser2(): fname = MYDIR + "/hcf_file1.ini" # includes hcf_file2.ini as a default assert os.path.exists(fname) hcf = HierarchicalConfigParser() hcf.read(fname) # Validate what's in hcf_file1.ini assert hcf['a']['INCLUDE'] == 'hcf_file2.ini' assert hcf['a']['color'] == 'file1-a' assert hcf['b']['color'] == 'file1-b' # Validate what was included in section 'a' and was not overwritten assert hcf['a']['second'] == 'file2-a' # Validate that additional tag in section 'b' was not included assert 'second' not in hcf['b'] # Validate that section 'c' was not included assert 'c' not in hcf.sections()
def test_hierarchical_configparser4(): fname = MYDIR + "/hcf_file4.ini" hcf = HierarchicalConfigParser(debug=True) hcf.read(fname) print("and we got:") hcf.write(sys.stdout) assert hcf['a']['filename'] == 'hcf_file4' assert hcf['b']['filename'] == 'hcf_file5' assert hcf['c']['filename'] == 'hcf_file6' assert hcf['d']['filename'] == 'hcf_file6' print("Explaination:") hcf.explain(sys.stdout)
def test_hierarchical_configparser5(): fname = MYDIR + "/hcf_filea.ini" hcf = HierarchicalConfigParser(debug=True) hcf.read(fname) print("and we got:") hcf.write(sys.stdout) hcf.explain() # what was originally in hcf_filea.ini: assert hcf['a']['file'] == 'hcf_filea' # what was included in [a] from fileb.ini assert hcf['a']['name'] == 'hcf_fileb' # what was included from [default] include filec.ini assert hcf['b']['file'] == 'hcf_filec' assert hcf['c']['file'] == 'hcf_filec' assert hcf['d']['file'] == 'hcf_filed'
def test_hierarchical_configparser3(): hcf = HierarchicalConfigParser() hcf.read(MYDIR + "/hcf_file3.ini") print("and we got:") hcf.write(open("/dev/stdout", "w")) assert hcf['a']['color'] == 'file2-a' assert hcf['a']['second'] == 'file2-a' assert hcf['b']['color'] == 'file2-b' assert hcf['b']['second'] == 'file2-b' assert hcf['c']['color'] == 'file2-c' assert hcf['c']['second'] == 'file2-c' assert hcf['d']['color'] == 'file1-d'
def test_hierarchical_configparser3(): fname = MYDIR + "/hcf_file3.ini" print("Original config file:") print(open(fname, "r").read()) print("--------------------------\n\n") hcf = HierarchicalConfigParser(debug=True) hcf.read(fname) print("and we got:") hcf.write(sys.stdout) # The include is in DEFAULT, so we start with ALL of the tags in hcf_file3.ini: assert hcf['c']['sound'] == 'file3-c' assert hcf['d']['color'] == 'file3-d' # and then we get those in hcf_file2 that were not overwritten assert hcf['a']['color'] == 'file2-a' assert hcf['a']['second'] == 'file2-a' assert hcf['b']['color'] == 'file2-b' assert hcf['b']['second'] == 'file2-b' assert hcf['c']['color'] == 'file2-c' assert hcf['c']['second'] == 'file2-c' print("Explaination:") hcf.explain(sys.stdout)
B: 2. C: true D: sssstttrrring E: F: [lists] A: a,b , c, d B: e f g h C: i;j; k; l D: m-n-o-p E: 1,2,3,4 F: 1 2,3;4 """ config = HierarchicalConfigParser() config.read_file(io.StringIO(s_config)) class ARGH: def __init__(self, d): self.setup = d self.reader = d self.engine = d self.writer = d self.validator = d self.takedown = d assert config[driver.SETUP][driver.SETUP] == "demo_pandas.setup" assert config[driver.READER][driver.READER] == "demo_pandas.reader"
def main_setup(additional_args = []): """ Setup the DAS system logging, parses arguments and loads the configuration file, returning the args and config objects. """ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("config", help="Main Config File") parser.add_argument("--print_bom", help="Output a bill of materials", action='store_true') parser.add_argument("--make_release", help="Create a zip file with all of the files necessary to run the DAS. Similar to print_bom") parser.add_argument("--experiment", help="Run an experiment according to the [experiment] section, with the results in this directory") parser.add_argument("--isolation", help="Specifies isolation mode for experiments", choices=['sameprocess', 'subprocess'], default='sameprocess') parser.add_argument("--graphdata", help="Just draw the graph from the data that was already collected.", action='store_true') parser.add_argument("--logfilename", help="Specify logfilename, otherwise auto-generate") parser.add_argument("--nohierconfig", help='Use regular Python configparser. ConfigParser instead of ctools.HierarchicalConfigParser', action="store_true") parser.add_argument("--dump_config", help="dump the config file, then exit", action='store_true') parser.add_argument("--get", help="output the section:option:default from the config file, then exit") parser.add_argument("--dry-run", help="Dry run; do not run the algorithm", action='store_true') for (args,kwargs) in additional_args: parser.add_argument(*args, **kwargs) clogging.add_argument(parser) args = parser.parse_args() if not os.path.exists(args.config): raise RuntimeError("{} does not exist".format(args.config)) if args.graphdata and args.experiment is None: parser.error("--graphdata requires --experiment") ### ### Read the configuration file and handle config-related options ### config = ConfigParser() if args.nohierconfig else HierarchicalConfigParser() config.read(args.config) if args.dump_config: config.write(sys.stdout) exit(0) if args.get: if args.get.count(":")!=2: raise ValueError("Specify section:option:default as the --get argument") (section, option, default) = args.get.split(":") if (section in config) and (option in config[section]): print(config[section][option]) else: print(default) exit(0) ### ### Logging must be set up before any logging is done ### By default it is in the current directory, but if we run an experiment, put the logfile in that directory ### Added option to put logs in a subfolder specified in the config if not args.logfilename: isodate = datetime.datetime.now().isoformat()[0:19] if (config.has_section(LOGGING_SECTION) and config.has_option(LOGGING_SECTION, LOGFOLDER_OPTION) and config.has_option(LOGGING_SECTION, LOGFILENAME_OPTION)): args.logfilename = (f"{config[LOGGING_SECTION][LOGFOLDER_OPTION]}/" f"{config[LOGGING_SECTION][LOGFILENAME_OPTION]}-{isodate}-{os.getpid()}.log") else: args.logfilename = f"{isodate}-{os.getpid()}.log" # CB: Code needs to be removed. # Left here for backward compatibility, to be removed in future versions if args.experiment: if not os.path.exists(args.experiment): os.makedirs(args.experiment) if not os.path.isdir(args.experiment): raise RuntimeError("{} is not a directory".format(args.experiment)) config[config.default_section][ROOT] = args.experiment args.logfilename = os.path.join(args.experiment, args.logfilename) if EXPERIMENT not in config: config.add_section(EXPERIMENT) config[EXPERIMENT][RUN_EXPERIMENT_FLAG] = "1" # If we are making the BOM, make a DAS object so the config file gets processed, then make the bom and exit if args.print_bom: print_bom(config=config, args=args) exit(0) if args.make_release: make_release(config=config, zipfilename=args.make_release, args=args) print("Release: {}".format(args.make_release)) exit(0) # # # Make sure the directory for the logfile exists. If not, make it. logdirname = os.path.dirname(args.logfilename) if logdirname and not os.path.exists(logdirname): os.mkdir(logdirname) clogging.setup(args.loglevel, syslog=False, filename=args.logfilename) logging.info("Config path: {}".format(os.path.abspath(args.config))) return args, config
def main(): """Driver. Typically run from __main__ in the program that uses the driver.""" parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("config", help="Main Config File") parser.add_argument( "--experiment", help= "Run an experiment according to the [experiment] section, with the results in this directory", action='store_true') parser.add_argument("--isolation", help="Specifies isolation mode for experiments", choices=['sameprocess', 'subprocess'], default='sameprocess') parser.add_argument( "--graphdata", help="Just draw the graph from the data that was already collected.", action='store_true') ctools.clogging.add_argument(parser) args = parser.parse_args() if not os.path.exists(args.config): raise RuntimeError("{} does not exist".format(args.config)) if args.graphdata and args.experiment is None: parser.error("--graphdata requires --experiment") ### ### Read the configuration file ### config = HierarchicalConfigParser() config.read(args.config) ### ### Logging must be set up before any logging is done ### By default is is in the current directory, but if we run an experiment, put the logfile in that directory ### Added option to put logs in a subfolder specified in the config isodate = datetime.datetime.now().isoformat()[0:19] if config.has_section(LOGGING_SECTION) and config.has_option( LOGGING_SECTION, LOGFOLDER_OPTION) and config.has_option( LOGGING_SECTION, LOGFILENAME_OPTION): logfname = f"{config[LOGGING_SECTION][LOGFOLDER_OPTION]}/{config[LOGGING_SECTION][LOGFILENAME_OPTION]}-{isodate}-{os.getpid()}.log" else: logfname = f"{isodate}-{os.getpid()}.log" dfxml = DFXMLWriter(filename=logfname.replace(".log", ".dfxml"), prettyprint=True) # Left here for backward compatibility, to be removed in future versions if args.experiment: if not os.path.exists(args.experiment): os.makedirs(args.experiment) if not os.path.isdir(args.experiment): raise RuntimeError("{} is not a directory".format(args.experiment)) config['DEFAULT'][ROOT] = args.experiment logfname = os.path.join(args.experiment, logfname) #### # Make sure the directory for the logfile exists. If not, make it. logdirname = os.path.dirname(logfname) if logdirname and not os.path.exists(logdirname): print("driver.py: os.mkdir({})".format(logdirname)) os.mkdir(logdirname) ctools.clogging.setup(args.loglevel, syslog=True, filename=logfname) logging.info("START {} log level: {}".format(os.path.abspath(__file__), args.loglevel)) t0 = time.time() log_testpoint("T03-002S") ######################### # Set up the experiment # ######################### # if there is no experiment section in the config file, add one if EXPERIMENT not in config: config.add_section(EXPERIMENT) # If there is no run experiment flag in the config section, add it run_experiment = config[EXPERIMENT].getint(RUN_EXPERIMENT_FLAG, 0) # If --experiment was specified, set run_experiment to run if args.experiment: run_experiment = 1 ### Now validate and apply the config file config_validate(config) config_apply_environment(config) ############################# # Create the DAS ############################# das = DAS(config) ############################# # DAS Running Section. # Option 1 - run_experiment # Option 2 - just run the das ############################# logging.debug("Just before Experiment") if run_experiment: # set up the Experiment module try: (experiment_file, experiment_class_name) = config[EXPERIMENT][EXPERIMENT].rsplit( ".", 1) except KeyError: (experiment_file, experiment_class_name) = ('driver', 'AbstractDASExperiment') try: experiment_module = __import__(experiment_file, fromlist=[experiment_class_name]) except ImportError as e: print("Module import failed.") print("current directory: {}".format(os.getcwd())) print("__file__: {}".format(__file__)) raise e experiment = getattr(experiment_module, experiment_class_name)(das=das, config=das.config, name=EXPERIMENT) logging.debug("Running DAS Experiment. Logfile: {}".format(logfname)) experiment_data = experiment.runExperiment() else: #### Run the DAS without an experiment logging.debug( "Running DAS without an experiment. Logfile: {}".format(logfname)) try: data = das.run() except Exception as e: log_testpoint("T03-005F") raise (e) ### ### Shutdown ### t1 = time.time() t = t1 - t0 logging.info("Elapsed time: {:.2} seconds".format(t)) logging.info("END {}".format(os.path.abspath(__file__))) logging.shutdown() print("*****************************************************") print("driver.py: Run completed in {:,.2f} seconds. Logfile: {}".format( t, logfname))