def runValidator(self, original_data, written_data): # Now run the validator on the read data and the written results logging.info("Creating and running DAS validator") if self.validator.willValidate() == False: logging.info("self.validator.willValidate() returned false") raise RuntimeError("validator willValidate() returned False") log_testpoint("T03-004S", "Running Validator module") valid = self.validator.validate(original_data, written_data) logging.debug("valid={}".format(valid)) if not valid: logging.info("self.validator.validate() returned false") raise RuntimeError("Did not validate.") self.validator.didValidate() # If we were asked to get graphx and graphy, get it. data = {} if EXPERIMENT in self.config: for var in ['graphx', 'graphy']: if var in self.config[EXPERIMENT]: (a, b) = self.config[EXPERIMENT][var].split('.') assert a == 'validator' func = getattr(self.validator, b) data[var] = func() # Finally take down return valid
def runWriter(self, privatized_data): logging.info("Creating and running DAS writer") if self.writer.willWrite() == False: logging.info("self.writer.willWrite() returned false") raise RuntimeError("engine willWrite() returned False") log_testpoint("T03-004S", "Running Writer module") written_data = self.writer.write(privatized_data) logging.debug("written_data={}".format(written_data)) self.writer.didWrite() return written_data
def runErrorMetrics(self, privatized_data): logging.info("Creating and running DAS error_metrics") if self.error_metrics.willRun() == False: logging.info("self.error_metrics.willRun() returned false") raise RuntimeError("error_metrics willRun() returned False") log_testpoint("T03-004S", "Running Error Metrics module") errorMetrics_data = self.error_metrics.run(privatized_data) logging.debug("Error Metrics data = {}".format(errorMetrics_data)) self.error_metrics.didRun() return errorMetrics_data
def runEngine(self, original_data): logging.info("Creating and running DAS engine") if self.engine.willRun() == False: logging.info("self.engine.willRun() returned false") raise RuntimeError("engine willRun() returned False") log_testpoint("T03-004S", "Running Engine module") privatized_data = self.engine.run(original_data) logging.debug("privatized_data={}".format(privatized_data)) self.engine.didRun() return privatized_data
def runReader(self): logging.info("Creating and running DAS reader") if self.reader.willRead() == False: logging.info("self.reader.willRead() returned false") raise RuntimeError("reader willRead() returned False") log_testpoint("T03-004S", "Running Reader module") original_data = self.reader.read() logging.debug("original_data={}".format(original_data)) self.reader.didRead() return original_data
def run(self): """ Run the DAS. Returns data collected as a dictionary if an EXPERIMENT section is specified in the config file.""" # First run the engine and write the results # Create the instances is now done when running original_data = self.runReader() privatized_data = self.runEngine(original_data) errorMetrics_data = self.runErrorMetrics(privatized_data) written_data = self.runWriter(privatized_data) valid = self.runValidator(original_data, written_data) self.runTakedown(written_data) log_testpoint("T03-005S") data = {} return data
def test_gurobi(): # Get the model using our environment variables try: env = Env.OtherEnv("gurobi.log", os.environ.get('GRB_ISV_NAME', ''), os.environ.get('GRB_APP_NAME', ''), 0, "") log_testpoint('T01-001S') except GurobiError as err: log_testpoint('T01-001F') raise err m = Model("diet", env=env) # Create decision variables for the foods to buy buy = m.addVars(foods, name="buy") # You could use Python looping constructs and m.addVar() to create # these decision variables instead. The following would be equivalent # # buy = {} # for f in foods: # buy[f] = m.addVar(name=f) # The objective is to minimize the costs m.setObjective(buy.prod(cost), GRB.MINIMIZE) # Using looping constructs, the preceding statement would be: # # m.setObjective(sum(buy[f]*cost[f] for f in foods), GRB.MINIMIZE) # Nutrition constraints m.addConstrs( (quicksum(nutritionValues[f, c] * buy[f] for f in foods) == [minNutrition[c], maxNutrition[c]] for c in categories), "_") # Using looping constructs, the preceding statement would be: # # for c in categories: # m.addRange( # sum(nutritionValues[f,c] * buy[f] for f in foods), minNutrition[c], # maxNutrition[c], c) def printSolution(): if m.status == GRB.Status.OPTIMAL: print('\nCost: %g' % m.objVal) print('\nBuy:') buyx = m.getAttr('x', buy) for f in foods: if buy[f].x > 0.0001: print('%s %g' % (f, buyx[f])) else: print('No solution') # Solve m.optimize() printSolution() assert m.status == GRB.Status.OPTIMAL print('\nAdding constraint: at most 6 servings of dairy') m.addConstr(buy.sum(['milk', 'ice cream']) <= 6, "limit_dairy") # Solve m.optimize() printSolution() assert m.status == GRB.Status.INFEASIBLE
def main(): """Driver. Typically run from __main__ in the program that uses the driver.""" parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("config", help="Main Config File") parser.add_argument( "--experiment", help= "Run an experiment according to the [experiment] section, with the results in this directory", action='store_true') parser.add_argument("--isolation", help="Specifies isolation mode for experiments", choices=['sameprocess', 'subprocess'], default='sameprocess') parser.add_argument( "--graphdata", help="Just draw the graph from the data that was already collected.", action='store_true') ctools.clogging.add_argument(parser) args = parser.parse_args() if not os.path.exists(args.config): raise RuntimeError("{} does not exist".format(args.config)) if args.graphdata and args.experiment is None: parser.error("--graphdata requires --experiment") ### ### Read the configuration file ### config = HierarchicalConfigParser() config.read(args.config) ### ### Logging must be set up before any logging is done ### By default is is in the current directory, but if we run an experiment, put the logfile in that directory ### Added option to put logs in a subfolder specified in the config isodate = datetime.datetime.now().isoformat()[0:19] if config.has_section(LOGGING_SECTION) and config.has_option( LOGGING_SECTION, LOGFOLDER_OPTION) and config.has_option( LOGGING_SECTION, LOGFILENAME_OPTION): logfname = f"{config[LOGGING_SECTION][LOGFOLDER_OPTION]}/{config[LOGGING_SECTION][LOGFILENAME_OPTION]}-{isodate}-{os.getpid()}.log" else: logfname = f"{isodate}-{os.getpid()}.log" dfxml = DFXMLWriter(filename=logfname.replace(".log", ".dfxml"), prettyprint=True) # Left here for backward compatibility, to be removed in future versions if args.experiment: if not os.path.exists(args.experiment): os.makedirs(args.experiment) if not os.path.isdir(args.experiment): raise RuntimeError("{} is not a directory".format(args.experiment)) config['DEFAULT'][ROOT] = args.experiment logfname = os.path.join(args.experiment, logfname) #### # Make sure the directory for the logfile exists. If not, make it. logdirname = os.path.dirname(logfname) if logdirname and not os.path.exists(logdirname): print("driver.py: os.mkdir({})".format(logdirname)) os.mkdir(logdirname) ctools.clogging.setup(args.loglevel, syslog=True, filename=logfname) logging.info("START {} log level: {}".format(os.path.abspath(__file__), args.loglevel)) t0 = time.time() log_testpoint("T03-002S") ######################### # Set up the experiment # ######################### # if there is no experiment section in the config file, add one if EXPERIMENT not in config: config.add_section(EXPERIMENT) # If there is no run experiment flag in the config section, add it run_experiment = config[EXPERIMENT].getint(RUN_EXPERIMENT_FLAG, 0) # If --experiment was specified, set run_experiment to run if args.experiment: run_experiment = 1 ### Now validate and apply the config file config_validate(config) config_apply_environment(config) ############################# # Create the DAS ############################# das = DAS(config) ############################# # DAS Running Section. # Option 1 - run_experiment # Option 2 - just run the das ############################# logging.debug("Just before Experiment") if run_experiment: # set up the Experiment module try: (experiment_file, experiment_class_name) = config[EXPERIMENT][EXPERIMENT].rsplit( ".", 1) except KeyError: (experiment_file, experiment_class_name) = ('driver', 'AbstractDASExperiment') try: experiment_module = __import__(experiment_file, fromlist=[experiment_class_name]) except ImportError as e: print("Module import failed.") print("current directory: {}".format(os.getcwd())) print("__file__: {}".format(__file__)) raise e experiment = getattr(experiment_module, experiment_class_name)(das=das, config=das.config, name=EXPERIMENT) logging.debug("Running DAS Experiment. Logfile: {}".format(logfname)) experiment_data = experiment.runExperiment() else: #### Run the DAS without an experiment logging.debug( "Running DAS without an experiment. Logfile: {}".format(logfname)) try: data = das.run() except Exception as e: log_testpoint("T03-005F") raise (e) ### ### Shutdown ### t1 = time.time() t = t1 - t0 logging.info("Elapsed time: {:.2} seconds".format(t)) logging.info("END {}".format(os.path.abspath(__file__))) logging.shutdown() print("*****************************************************") print("driver.py: Run completed in {:,.2f} seconds. Logfile: {}".format( t, logfname))
def __init__(self, config): """ Initialize a DAS given a config file. This creates all of the objects that will be used""" assert type(config) in [HierarchicalConfigParser, ConfigParser] self.config = config # Get the input file and the class for each logging.debug("Reading filenames and class names from config file") # This section can possibly combined with the following section importing the modules and creating the objects, # so that the default objects can be created by just using AbstractDASxxxxxx() constructor try: (setup_file, setup_class_name) = config[SETUP][SETUP].rsplit(".", 1) except KeyError: (setup_file, setup_class_name) = ('driver', 'AbstractDASSetup') try: (reader_file, reader_class_name) = config[READER][READER].rsplit(".", 1) except KeyError: (reader_file, reader_class_name) = ('driver', 'AbstractDASReader') try: (engine_file, engine_class_name) = config[ENGINE][ENGINE].rsplit(".", 1) except KeyError: (engine_file, engine_class_name) = ('driver', 'AbstractDASEngine') try: (error_metrics_file, error_metrics_class_name ) = config[ERROR_METRICS][ERROR_METRICS].rsplit(".", 1) except KeyError: (error_metrics_file, error_metrics_class_name) = ('driver', 'AbstractDASErrorMetrics') try: (writer_file, writer_class_name) = config[WRITER][WRITER].rsplit(".", 1) except KeyError: (writer_file, writer_class_name) = ('driver', 'AbstractDASWriter') try: (validator_file, validator_class_name) = config[VALIDATOR][VALIDATOR].rsplit( ".", 1) except KeyError: (validator_file, validator_class_name) = ('driver', 'AbstractDASValidator') try: (takedown_file, takedown_class_name) = config[TAKEDOWN][TAKEDOWN].rsplit(".", 1) except KeyError: (takedown_file, takedown_class_name) = ('driver', 'AbstractDASTakedown') logging.debug("classes: {} {} {} {} {} {} {}".format( setup_class_name, engine_class_name, error_metrics_class_name, reader_class_name, writer_class_name, validator_class_name, takedown_class_name)) # Import the modules logging.debug("__import__ files: {} {} {} {} {} {} {}".format( setup_file, engine_file, error_metrics_file, reader_file, writer_file, validator_file, takedown_file)) try: setup_module = __import__(setup_file, fromlist=[setup_class_name]) engine_module = __import__(engine_file, fromlist=[engine_class_name]) reader_module = __import__(reader_file, fromlist=[reader_class_name]) error_metrics_module = __import__( error_metrics_file, fromlist=[error_metrics_class_name]) writer_module = __import__(writer_file, fromlist=[writer_class_name]) validator_module = __import__(validator_file, fromlist=[validator_class_name]) takedown_module = __import__(takedown_file, fromlist=[takedown_class_name]) except ImportError as e: print("Module import failed.") print("current directory: {}".format(os.getcwd())) print("__file__: {}".format(__file__)) raise e # Create the instances logging.debug("modules: {} {} {} {} {} {} {}".format( setup_module, engine_module, error_metrics_module, reader_module, writer_module, validator_module, takedown_module)) logging.info("Creating and running DAS setup object") setup_obj = getattr(setup_module, setup_class_name)(config=config, name=SETUP) setup_data = setup_obj.setup_func() logging.debug("DAS setup returned {}".format(setup_data)) # Now create the other objects self.reader = getattr(reader_module, reader_class_name)(config=config, setup=setup_data, name=READER) self.engine = getattr(engine_module, engine_class_name)(config=config, setup=setup_data, name=ENGINE) self.error_metrics = getattr( error_metrics_module, error_metrics_class_name)(config=config, setup=setup_data, name=ERROR_METRICS) self.writer = getattr(writer_module, writer_class_name)(config=config, setup=setup_data, name=WRITER) self.validator = getattr(validator_module, validator_class_name)(config=config, setup=setup_data, name=VALIDATOR) self.takedown = getattr(takedown_module, takedown_class_name)(config=config, setup=setup_data, name=TAKEDOWN) log_testpoint("T03-003S") logging.debug("DAS object complete")