def run(self): logger.info("WST visualization subcommand") logger.info("---------------------------") # set start time nStartTime = time.time() # validate input logger.info("Validating configuration file") self.validate() if self.bInputValidationFailed: for sError in self.dError: print "ERROR: " + sError return # logger.info("Creating HTML") svg = inp2svg.inp2svg(self.dInpFile) svg.setBackgroundColor(self.dBackgroundColor) # svg.setNodeOpacity(self.dNodeOpacity) svg.setLinkOpacity(self.dLinkOpacity) # svg.setNodeSize(self.dNodeSize) svg.setLinkSize(self.dLinkSize) # svg.setNodeColor(self.dNodeColor) svg.setLinkColor(self.dLinkColor) # svg.setReservoirSize(self.dReservoirSize) svg.setTankSize(self.dTankSize) svg.setJunctionSize(self.dJunctionSize) svg.setPumpSize(self.dPumpSize) svg.setValveSize(self.dValveSize) svg.setPipeSize(self.dPipeSize) # svg.setReservoirColor(self.dReservoirColor) svg.setTankColor(self.dTankColor) svg.setJunctionColor(self.dJunctionColor) svg.setPumpColor(self.dPumpColor) svg.setValveColor(self.dValveColor) svg.setPipeColor(self.dPipeColor) # svg.setReservoirOpacity(self.dReservoirOpacity) svg.setTankOpacity(self.dTankOpacity) svg.setJunctionOpacity(self.dJunctionOpacity) svg.setPumpOpacity(self.dPumpOpacity) svg.setValveOpacity(self.dValveOpacity) svg.setPipeOpacity(self.dPipeOpacity) # svg.setLegendNodes(self.bLegendNodes) svg.setLegendLinks(self.bLegendLinks) svg.setLegendReservoirs(self.bLegendReservoirs) svg.setLegendTanks(self.bLegendTanks) svg.setLegendJunctions(self.bLegendJunctions) svg.setLegendPumps(self.bLegendPumps) svg.setLegendValves(self.bLegendValves) svg.setLegendPipes(self.bLegendPipes) # svg.useEpanetIcons(self.bUseEpanetShapes) svg.showLegend(self.bShowLegend) svg.setLegendColor(self.dLegendColor) svg.setLegendScale(self.dLegendScale) svg.setLegendXY(self.dLegendX, self.dLegendY) # i = 0 for layer in self.Layers: if layer.hide: continue #print i,') Layer = ', layer layer.visualize(svg) i += 1 # write output file prefix = os.path.basename(self.opts['configure']['output prefix']) logfilename = logger.parent.handlers[0].baseFilename outfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.yml') visfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.html') # svg.writeFile(width=self.nWidth, height=self.nHeight, output_file_name=visfilename) config = wst_config.output_config() module_blocks = ['general'] template_options = { 'general': { 'cpu time': round(time.time() - nStartTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) } } if outfilename != None: self.saveOutput(outfilename, config, module_blocks, template_options) # print solution to screen logger.info("\nWST normal termination") logger.info("---------------------------") logger.info("Directory: " + os.path.dirname(logfilename)) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename)) logger.info("Visualization file: " + os.path.basename(visfilename) + '\n') return
def run(self): logger.info("WST grabsample subcommand") logger.info("---------------------------") # set start time self.startTime = time.time() # validate input logger.info("Validating configuration file") self.validate() if self.getEventsOption('signals') not in self.none_list: files_folder = self.getEventsOption('signals') #TODO LIST SCENARIOS optimal_locations, objective = self.runSIGNALSanalysis( files_folder) # Convert node ID's to node names in Solution Solution = list() Solution = [objective, optimal_locations] else: try: enData = pyepanet.ENepanet() enData.ENopen(self.opts['network']['epanet file'], 'tmp.rpt') enData.ENclose() except: raise RuntimeError("EPANET inp file not loaded using pyepanet") # write tmp TSG file if ['scenario']['tsg file'] == none if self.opts['scenario'][ 'tsi file'] in pywst.common.problem.none_list and self.opts[ 'scenario'][ 'tsg file'] in pywst.common.problem.none_list: tmpdir = os.path.dirname( self.opts['configure']['output prefix']) tmpprefix = 'tmp_' + os.path.basename( self.opts['configure']['output prefix']) tmpTSGFile = pyutilib.services.TempfileManager.create_tempfile( prefix=tmpprefix, dir=tmpdir, suffix='.tsg') # Check if scenario list contains one or less nodes. wst_util.write_tsg(self.opts['scenario']['location'],\ self.opts['scenario']['type'],\ self.opts['scenario']['species'],\ self.opts['scenario']['strength'],\ self.opts['scenario']['start time'],\ self.opts['scenario']['end time'],\ tmpTSGFile) self.opts['scenario']['tsg file'] = tmpTSGFile # expand tsg file extTSGfile = wst_util.expand_tsg(self.opts) self.opts['scenario']['tsg file'] = extTSGfile # Run samplelocation executable nodemap = self.runSamplelocation() Solution = () greedy_solution = {} # if self.getConfigureOption('output prefix') not in self.none_list: json_result_file = self.getConfigureOption( 'output prefix') + '_grabsample.json' else: json_result_file = 'grabsample.json' # # For greedy algorithm get solution directly from the json file if self.getSampleLocationOption('greedy selection'): data_from_results = open(json_result_file).read() greedy_solution = json.loads(data_from_results) Solution = (greedy_solution['objective'], [str(i['id']) for i in greedy_solution['Nodes']]) #else run optimization algorithms else: #Get fixed sensor list . Not to be included in final solution fixed_sensor_ID = [] inv_nodemap = dict((v, k) for k, v in nodemap.iteritems()) if self.getSampleLocationOption( 'fixed sensors') not in self.none_list: sensors_filename = self.getSampleLocationOption( 'fixed sensors') sensor_file = open(sensors_filename) fixed_sensor_list = [line.strip() for line in sensor_file] sensor_file.close() for node_name in fixed_sensor_list: if len(node_name) > 0: fixed_sensor_ID.append(inv_nodemap[node_name]) solve_timelimit = None p = ( 1, "There was a problem with the 'model type' or 'model format' options" ) cmd = None not_allowed_nodes_set = set() """ if self.getSampleLocationOption('not feasible nodes') not in self.none_list \ and len(open(self.getSampleLocationOption('not feasible nodes'),'r').readlines())!=0: label_map_file = "_MERLION_LABEL_MAP.txt" name_to_id={} f = open(self.getConfigureOption('output prefix')+label_map_file,'r') for line in f: t = line.split() name_to_id[t[0]] = t[1] f.close() for line in open(self.getSampleLocationOption('not feasible nodes'),'r'): l=line.split() for n_ in l: if name_to_id.has_key(n_)!=True: print '\nERROR: Nodename ',n_,' specified in ',self.getSampleLocationOption('not feasible nodes')\ ,' is not part of the network' exit(1) not_allowed_nodes_set.add(int(name_to_id[n_])) """ #run pyomo or ampl if self.getSampleLocationOption('model format') == 'AMPL': exe = self.getConfigureOption('ampl executable') if self.getConfigureOption( 'output prefix') not in self.none_list: inp = self.getConfigureOption( 'output prefix') + '_ampl.run' out = self.getConfigureOption( 'output prefix') + '_ampl.out' else: inp = 'ampl.run' out = 'ampl.out' results_file = self.createAMPLRun(inp, not_allowed_nodes_set) print results_file cmd = '%s %s' % (exe, inp) logger.info("Launching AMPL ...") sub_logger = logging.getLogger( 'wst.grabsample.models.ampl') sub_logger.setLevel(logging.DEBUG) fh = logging.FileHandler(out, mode='w') sub_logger.addHandler(fh) p = pyutilib.subprocess.run( cmd, timelimit=solve_timelimit, stdout=pywst.common.problem.LoggingFile(sub_logger)) if (p[0] or not os.path.isfile(results_file)): message = 'An error occured when running the optimization problem.\n Error Message: ' + p[ 1] + '\n Command: ' + cmd + '\n' logger.error(message) raise RuntimeError(message) #try to load the results file #print "results file: "+ results_file filereader = open(results_file, 'r') line1 = filereader.readline().split() nodes = [] for l in xrange(0, len(line1)): if l > 0 and line1[l] not in fixed_sensor_ID: nodes.append(line1[l]) objective = float(filereader.readline().split()[1]) filereader.close() Solution = (objective, nodes) #print Solution elif self.getSampleLocationOption('model format') == 'PYOMO': logger.info("Launching PYOMO ...") Solution = self.runPYOMOmodel(fixed_sensor_ID, not_allowed_nodes_set) # Convert node ID's to node names in Solution for i in xrange(0, len(Solution[1])): Solution[1][i] = nodemap[str(Solution[1][i])] # Get information to print results to the screen if self.getEventsOption('scn file') not in self.none_list: events_filename = self.getEventsOption('scn file') scnfile = open(events_filename) content_scn = scnfile.read() scnfile.close() number_events = content_scn.count('scenario') elif self.getEventsOption('tsg file') not in self.none_list: events_filename = self.getEventsOption('tsg file') number_events = len([line for line in open(events_filename, 'r')]) elif self.getEventsOption('signals') not in self.none_list: events_filename = self.getEventsOption('signals') number_events = self.signal_scenarios # remove temporary files if debug = 0 if self.opts['configure']['debug'] == 0: pyutilib.services.TempfileManager.clear_tempfiles() # write output file prefix = os.path.basename(self.opts['configure']['output prefix']) logfilename = logger.parent.handlers[0].baseFilename outfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.yml') visymlfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '_vis.yml') # Get node list from Solution node_list = Solution[1] #for i in xrange(0,len(Solution[1])): # if self.getSampleLocationOption('greedy selection'): # node_list.append(Solution[1][i]) # else: # node_list.append(nodemap[str(Solution[1][i])]) sample_time = self.getSampleLocationOption('sample time') N_samples = self.getSampleLocationOption('num samples') threshold = self.getSampleLocationOption('threshold') # Write output yml file config = wst_config.output_config() module_blocks = ("general", "grabsample") template_options = { 'general': { 'cpu time': round(time.time() - self.startTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) }, 'grabsample': { 'nodes': node_list, 'objective': Solution[0], 'threshold': threshold, 'count': N_samples, 'time': sample_time } } if outfilename != None: self.saveOutput(outfilename, config, module_blocks, template_options) # Write output visualization yml file config = wst_config.master_config() module_blocks = ("network", "visualization", "configure") template_options = { 'network': { 'epanet file': os.path.abspath(self.opts['network']['epanet file']) }, 'visualization': { 'layers': [{ 'label': 'Optimal sample locations', 'locations': "['grabsample']['nodes'][i]", 'file': outfilename, 'location type': 'node', 'shape': 'circle', 'fill': { 'color': 'blue', 'size': 15 } }] }, 'configure': { 'output prefix': os.path.abspath(self.opts['configure']['output prefix']) } } #os.path.join('vis', os.path.basename(self.opts['configure']['output prefix']))}} if visymlfilename != None: self.saveVisOutput(visymlfilename, config, module_blocks, template_options) # Run visualization cmd = ['wst', 'visualization', visymlfilename] p = pyutilib.subprocess.run( cmd) # logging information should not be printed to the screen # print solution to screen logger.info("\nWST normal termination") logger.info("---------------------------") logger.info("Directory: " + os.path.dirname(logfilename)) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename)) logger.info("Visualization configuration file: " + os.path.basename(visymlfilename) + '\n') return Solution
def run(self, **kwds): logger.info("WST tevasim subcommand") logger.info("---------------------------") # set start time self.startTime = time.time() # validate input logger.info("Validating configuration file") self.validate() # create tevasim command line cmd = self.opts['configure']['tevasim executable'] if self.opts['scenario'][ 'tsi file'] not in pywst.common.problem.none_list: cmd = cmd + " --tsi=" + self.opts['scenario']['tsi file'] elif self.opts['scenario'][ 'tsg file'] not in pywst.common.problem.none_list: cmd = cmd + " --tsg=" + self.opts['scenario']['tsg file'] else: # write tmp TSG file tmpdir = os.path.dirname(self.opts['configure']['output prefix']) tmpTSGFile = pyutilib.services.TempfileManager.create_tempfile( dir=tmpdir, prefix='tmp_', suffix='.tsg') wst_util.write_tsg(self.opts['scenario']['location'],\ self.opts['scenario']['type'],\ self.opts['scenario']['species'],\ self.opts['scenario']['strength'],\ self.opts['scenario']['start time'],\ self.opts['scenario']['end time'],\ tmpTSGFile) self.opts['scenario']['tsg file'] = tmpTSGFile cmd = cmd + " --tsg=" + self.opts['scenario']['tsg file'] if self.opts['scenario'][ 'dvf file'] not in pywst.common.problem.none_list: cmd = cmd + " --dvf=" + self.opts['scenario']['dvf file'] if self.opts['scenario'][ 'msx file'] not in pywst.common.problem.none_list: cmd = cmd + " --msx=" + self.opts['scenario']['msx file'] if self.opts['scenario'][ 'msx species'] not in pywst.common.problem.none_list: cmd = cmd + " --mss=" + self.opts['scenario']['msx species'] if self.opts['scenario']['merlion'] is True: cmd = cmd + " --merlion" cmd = cmd + " --merlion-nsims=" + str( self.opts['scenario']['merlion nsims']) if self.opts['scenario']['ignore merlion warnings'] is True: cmd += " --merlion-ignore-warnings" if self.opts['scenario']['erd compression'] in ['rle', 'RLE']: cmd += " --rle" elif self.opts['scenario'][ 'erd compression'] in pywst.common.problem.none_list: # To maintain backward compatibility with those using EPANET # we leave the erd compression scheme as lzma when no option is # specified. In the case that Merlion is used, we want to perform # simulations as fast as possible so we default to rle. if self.opts['scenario']['merlion']: cmd += " --rle" cmd = cmd + " " + self.opts['network']['epanet file'] cmd = cmd + " " + self.opts['configure']['output prefix'] + ".rpt" cmd = cmd + " " + self.opts['configure']['output prefix'] # run tevasim logger.info("Running contaminant transport simulations") logger.debug(cmd) sub_logger = logging.getLogger('wst.tevasim.exec') sub_logger.setLevel(logging.DEBUG) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) if (p[0]): msg = 'An error occured when running the tevasim executable.\n Error Message: ' + p[ 1] + '\n Command: ' + cmd + '\n' logger.error(msg) raise RuntimeError(msg) # remove temporary files if debug = 0 if self.opts['configure']['debug'] == 0: pyutilib.services.TempfileManager.clear_tempfiles() if os.path.exists('./hydraulics.hyd'): os.remove('./hydraulics.hyd') # write output file prefix = os.path.basename(self.opts['configure']['output prefix']) logfilename = logger.parent.handlers[0].baseFilename outfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.yml') config = wst_config.output_config() module_blocks = ("general", "tevasim") template_options = { 'general': { 'cpu time': round(time.time() - self.startTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) }, 'tevasim': { 'report file': prefix + '.rpt', 'header file': prefix + '.erd', 'hydraulic file': prefix + '-1.hyd.erd', 'water quality file': prefix + '-1.qual.erd', 'index file': prefix + '.index.erd' } } if outfilename != None: self.saveOutput(outfilename, config, module_blocks, template_options) # print solution to screen logger.info("\nWST normal termination") logger.info("---------------------------") logger.info("Directory: " + os.path.dirname(logfilename)) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename) + '\n') return self.opts['configure']['output prefix'] + '.erd'
def run(prob, addDate=False): """ This is a master script that guide the execution of sensor placement optimization. """ pyutilib.services.TempfileManager.push() logger.info("WST sp subcommand") logger.info("---------------------------") # set start time startTime = time.time() # Call the validate() routine to initialize the problem data logger.info("Validating configuration file") prob.validate() # # Setup the temporary data directory # if not prob.getConfigureOption('temp directory') in prob.none_list: pyutilib.services.TempfileManager.tempdir = os.path.abspath( prob.getConfigureOption('temp directory')) else: pyutilib.services.TempfileManager.tempdir = os.path.abspath( os.getcwd()) # # Setup object to store global data # data = Options() data.prefix = pyutilib.services.TempfileManager.create_tempfile( prefix='wst_sp_') # # Collect problem statistics # ##nobj = len(prob.getProblemOption('objective')) ##ncon = len(prob.getProblemOption('constraint')) ##imperfect = prob.getProblemOption('imperfect') not in Problem.none_list ##aggregate = prob.getProblemOption('aggregate') not in Problem.none_list ##stages = 1 # TODO: figure out stages later ##bound = False # TODO: figure out bounding formulation later # # Call the preprocess() routine logger.info("Preprocessing data") prob.preprocess(data.prefix) # The problem type, modeling language and solver type are used to select the # manner in which optimization is performed. # data.ptype = lower(prob.getProblemOption('type')) data.lang = lower(prob.getProblemOption('modeling language')) data.solvertype = lower(prob.getSolverOption('type', 0)) # if (data.solvertype, data.lang, data.ptype) in problems: logger.info('Optimizing sensor locations with WST...') logger.debug(' Solver: %s' % data.solvertype) logger.debug(' Modeling Language: %s' % str(data.lang)) logger.debug(' Problem Type: %s' % data.ptype) functor = FunctorAPIFactory(problems[data.solvertype, data.lang, data.ptype]) if not functor is None: data = functor(data, prob=prob).data elif (data.solvertype, 'pyomo', data.ptype) in problems: data.lang = 'pyomo' logger.info('Optimizing sensor locations with WST...') logger.debug(' Solver: %s' % data.solvertype) logger.debug(' Modeling Language: %s' % str(data.lang)) logger.debug(' Problem Type: %s' % data.ptype) # # The default modeling language is 'none'. If that doesn't exist, then # try out 'pyomo'. # functor = FunctorAPIFactory(problems[data.solvertype, data.lang, data.ptype]) if not functor is None: data = functor(data, prob=prob).data else: # # Anything else, we use the old SP command # logger.info('Optimizing sensor locations with an external script...') logger.debug(' Solver: %s' % data.solvertype) logger.debug(' Modeling Language: %s' % str(data.lang)) logger.debug(' Problem Type: %s' % data.ptype) prob.run_sp(addDate=addDate) pyutilib.services.TempfileManager.pop( not prob.getConfigureOption('keepfiles')) return # # TODO: should we print the optimization log here, or print it as it's generated? # #if problem.printlog: # print self.run_log # # # TODO: other postprocess steps # logger.debug('Optimization Results') # output_prefix = "" evalsensor_output = "" if prob.opts['configure']['output prefix'] not in prob.none_list: output_prefix = prob.opts['configure']['output prefix'] + "_" results_fname = wst_util.get_tempfile(None, 'sp.json') #results_fname = os.path.join(os.path.abspath(os.curdir), output_prefix + 'sp.json') # if data.solutions is None: logger.debug('No results recorded!') else: if type(data.objective) is dict: pass # TODO for multi-objective elif not data.objective is None: logger.debug('Objective: ' + str(data.objective)) if not data.lower_bound is None: logger.debug('Lower Bound: ' + str(data.lower_bound)) if not data.upper_bound is None: logger.debug('Upper Bound: ' + str(data.upper_bound)) # # Translate the solutions if a junction map was provided # data.locations = prob.translate_solutions(data.solutions) for i in range(len(data.solutions)): data.solutions[i] = sorted(data.solutions[i]) for i in range(len(data.locations)): data.locations[i] = sorted(data.locations[i]) logger.debug('Solutions: ' + str(data.solutions)) logger.debug('Locations: ' + str(data.locations)) # # Summarize results # if not prob.getProblemOption('compute bound'): # # Run evalsensors # evalsensor_output = evalsensors(prob, data) logger.debug('Evalsensor output:') logger.debug(evalsensor_output) # # Translate stage2 data # if not data.stage2_solutions is None: data.stage2_locations = {} print '---------------' print "Stage 2 Results" print '---------------' for key in data.stage2_solutions: data.stage2_locations[key] = prob.translate_solution( data.stage2_solutions[key]) print "Scenario ", key print "Node IDs ", data.stage2_solutions[key] print "Junctions", data.stage2_locations[key] print "" data.CPU_time = time.time() - startTime # # Store final results in a JSON file. # Solution = {} Solution['run date'] = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S") Solution['CPU time'] = time.time() - startTime Solution['objective'] = data.objective Solution['lower bound'] = data.lower_bound Solution['upper bound'] = data.upper_bound Solution['node ID'] = data.solutions Solution['EPANET node ID'] = data.locations Solution['modeling language'] = data.lang Solution['solver type'] = data.solvertype Solution['problem type'] = data.ptype f = open(results_fname, 'w') json.dump(Solution, f, indent=2) f.close() #OUTPUT = open(results_fname, 'w') #print >>OUTPUT, json.dumps(data, indent=2, sort_keys=True) #OUTPUT.close() pyutilib.services.TempfileManager.pop( not prob.getConfigureOption('keepfiles')) # remove temporary files if debug = 0 if prob.opts['configure']['debug'] == 0: pyutilib.services.TempfileManager.clear_tempfiles() # write output file prefix = os.path.basename(prob.opts['configure']['output prefix']) logfilename = os.path.join( os.path.dirname(prob.opts['configure']['output prefix']), os.path.basename(logger.parent.handlers[0].baseFilename)) outfilename = logfilename.replace('.log', '.yml') visymlfilename = logfilename.replace('.log', '_vis.yml') fid = open(prob.opts['configure']['output prefix'] + '_evalsensor.out', 'w') fid.write(evalsensor_output) fid.close() # Write output yml file config = wst_config.output_config() module_blocks = ("general", "sensor placement") template_options = { 'general': { 'cpu time': round(time.time() - startTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) }, 'sensor placement': { 'nodes': data.locations, 'objective': data.objective, 'lower bound': data.lower_bound, 'upper bound': data.upper_bound, 'greedy ranking': prefix + '_evalsensor.out', 'stage 2': [] } } if outfilename != None: prob.saveOutput(outfilename, config, module_blocks, template_options) # Write output visualization yml file shape = ["square", "circle", "triangle", "diamond"] color = [ "navy", "maroon", "green", "yellow", "aqua", "lime", "magenta", "red", "blue", "black" ] config = wst_config.master_config() module_blocks = ("network", "visualization", "configure") template_options = { 'network': { 'epanet file': "<REQUIRED INPUT>" }, 'visualization': { 'layers': [] }, 'configure': { 'output prefix': os.path.abspath(prob.opts['configure']['output prefix']) } } i = 0 for solution in data.locations: if len(data.locations) == 1: label = "Sensor placement" else: label = "Sensor placement " + str(i + 1) template_options['visualization']['layers'].append({ 'label': label, 'locations': "['sensor placement']['nodes'][" + str(i) + "][i]", 'file': os.path.abspath(outfilename), 'location type': 'node', 'shape': shape[i % len(shape)], # Modulus 'fill': { 'color': color[i % len(color)], # Modulus 'size': 15, 'opacity': 0 }, 'line': { 'color': color[i % len(color)], # Modulus 'size': 2, 'opacity': 0.6 } }) i += 1 if visymlfilename != None: prob.saveVisOutput(visymlfilename, config, module_blocks, template_options) # Run visualization cmd = ['wst', 'visualization', visymlfilename] p = pyutilib.subprocess.run( cmd) # logging information should not be printed to the screen # print solution to screen logger.info("\nWST normal termination") logger.info("---------------------------") dir_ = os.path.dirname(logfilename) if dir_ == "": dir_ = '.' logger.info("Directory: " + dir_) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename)) logger.info("Visualization configuration file: " + os.path.basename(visymlfilename)) logger.info( 'WARNING: EPANET input file required to create HTML network graphic\n')
def run(self): logger.info("WST sim2Impact subcommand") logger.info("---------------------------") # set start time self.startTime = time.time() # validate input logger.info("Validating configuration file") self.validate() # create sim2Impact command line cmd = self.opts['configure']['sim2Impact executable'] # required options if self.opts['impact']['metric'].__class__ is list: for s in self.opts['impact']['metric']: cmd = cmd + " --" + s.lower() else: cmd = cmd + " --" + self.opts['impact']['metric'].lower() # optional options useDVF = False if self.opts['impact']['metric'].__class__ is list: for s in self.opts['impact']['metric']: if s not in ['EC', 'TEC']: useDVF = True else: if self.opts['impact']['metric'] not in ['EC', 'TEC']: useDVF = True if useDVF: if self.opts['impact'][ 'dvf file'] not in pywst.common.problem.none_list: cmd = cmd + " --dvf=" + self.opts['impact']['dvf file'] if self.opts['impact'][ 'detection confidence'] not in pywst.common.problem.none_list: cmd = cmd + " --detectionConfidence=" + str( self.opts['impact']['detection confidence']) if self.opts['impact']['detection limit'].__class__ is list: for s in self.opts['impact']['detection limit']: cmd = cmd + " --detectionLimit=" + str(s) #if self.opts['impact']['detection limit'] not in pywst.common.problem.none_list: # cmd = cmd + " --detectionLimit=" + str(self.opts['impact']['detection limit']) if self.opts['impact'][ 'response time'] not in pywst.common.problem.none_list: cmd = cmd + " --responseTime=" + str( self.opts['impact']['response time']) if self.opts['impact'][ 'msx species'] not in pywst.common.problem.none_list: cmd = cmd + " --species=" + str(self.opts['impact']['msx species']) # arguments cmd = cmd + " " + self.opts['configure']['output prefix'] if self.opts['impact']['erd file'].__class__ is list: for s in self.opts['impact']['erd file']: cmd = cmd + " " + s else: cmd = cmd + " " + self.opts['impact']['erd file'] if self.opts['impact']['metric'].__class__ is list: for s in self.opts['impact']['metric']: if s in ['DPD', 'DPE', 'DPK', 'PD', 'PE', 'PK']: cmd = cmd + " " + self.opts['impact']['tai file'] break else: if self.opts['impact']['metric'] in [ 'DPD', 'DPE', 'DPK', 'PD', 'PE', 'PK' ]: cmd = cmd + " " + self.opts['impact']['tai file'] # run sim2Impact logger.info("Computing impact assessment") logger.debug(cmd) sub_logger = logging.getLogger('wst.sim2Impact.exec') sub_logger.setLevel(logging.DEBUG) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) if p[0]: message = 'An error occured when running the sim2Impact executable.\n Error Message: ' + p[ 1] + '\n Command: ' + cmd + '\n' logger.error(message) raise RuntimeError(message) # remove temporary files if debug = 0 if self.opts['configure']['debug'] == 0: pyutilib.services.TempfileManager.clear_tempfiles() # write output file prefix = os.path.basename(self.opts['configure']['output prefix']) logfilename = logger.parent.handlers[0].baseFilename outfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.yml') impact_files = [] id_files = [] prefix = os.path.basename(self.opts['configure']['output prefix']) for s in self.opts['impact']['metric']: impact_files.append(prefix + '_' + s.lower() + '.impact') id_files.append(prefix + '_' + s.lower() + '.id') config = wst_config.output_config() module_blocks = ("general", "sim2Impact") template_options = { 'general': { 'cpu time': round(time.time() - self.startTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) }, 'sim2Impact': { 'impact file': impact_files, 'id file': id_files, 'nodemap file': prefix + '.nodemap', 'scenariomap file': prefix + '.scenariomap' } } self.saveOutput(outfilename, config, module_blocks, template_options) # print solution to screen logger.info("\nWST normal termination") logger.info("---------------------------") logger.info("Directory: " + os.path.dirname(logfilename)) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename) + '\n') return
def run(self): # setup logger logger = logging.getLogger('wst.booster_msx') logger.info("WST booster_msx subcommand") logger.info("---------------------------") # set start time self.startTime = time.time() # validate input logger.info("Validating configuration file") self.validate() # open inp file, set feasible nodes try: enData = pyepanet.ENepanet() enData.ENopen(self.opts['network']['epanet file'], 'tmp.rpt') except: rmsg = "Error: EPANET inp file not loaded using pyepanet" logger.error(msg) raise RuntimeError(msg) nlinks = enData.ENgetcount(pyepanet.EN_LINKCOUNT) self.all_link_ids = [enData.ENgetlinkid(i + 1) for i in range(nlinks)] self.all_link_endpoints = dict( (i + 1, enData.ENgetlinknodes(i + 1)) for i in range(nlinks)) self.node_names, self.node_indices = wst_util.feasible_nodes(\ self.opts['booster msx']['feasible nodes'],\ self.opts['booster msx']['infeasible nodes'], \ self.opts['booster msx']['max boosters'], enData) #if len(self.node_names) == 0: # logger.warn('List of feasible node locations is empty. Booster msx will default to using all nzd junctions as feasible node locations.') if len(self.node_names) < self.opts['booster msx']['max boosters']: logger.warn( 'Max nodes reduced to match number of feasible locations.') self.opts['booster msx']['max boosters'] = len(self.node_names) enData.ENclose() # write tmp TSG file if ['scenario']['tsg file'] == none if self.opts['scenario']['tsi file'] in pywst.common.problem.none_list: if self.opts['scenario'][ 'tsg file'] in pywst.common.problem.none_list: tmpdir = os.path.dirname( self.opts['configure']['output prefix']) tmpTSGFile = pyutilib.services.TempfileManager.create_tempfile( dir=tmpdir, prefix='tmp_', suffix='.tsg') wst_util.write_tsg(self.opts['scenario']['location'],\ self.opts['scenario']['type'],\ self.opts['scenario']['species'],\ self.opts['scenario']['strength'],\ self.opts['scenario']['start time'],\ self.opts['scenario']['end time'],\ tmpTSGFile) self.opts['scenario']['tsg file'] = tmpTSGFile # expand tsg file extTSGfile = wst_util.expand_tsg(self.opts) self.opts['scenario']['tsg file'] = extTSGfile # get detection times self.time_detect = wst_util.eventDetection_tevasim( self.opts, self.opts['booster msx']['detection']) self.results = { 'dateOfLastRun': '', 'nodesToBoost': [], 'finalMetric': -999, 'runTime': None } self.results['dateOfLastRun'] = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S") # define tmp filenames for booster_msx tmpdir = os.path.dirname(self.opts['configure']['output prefix']) #ftevasim = pyutilib.services.TempfileManager.create_tempfile(dir=tmpdir, prefix='tmp_', suffix='_tevasim.yml') #fsim2Impact = pyutilib.services.TempfileManager.create_tempfile(dir=tmpdir, prefix='tmp_', suffix='_sim2Impact.yml') finp = pyutilib.services.TempfileManager.create_tempfile(dir=tmpdir, prefix='tmp_', suffix='.in') fout = pyutilib.services.TempfileManager.create_tempfile(dir=tmpdir, prefix='tmp_', suffix='.out') ffwd = pyutilib.services.TempfileManager.create_tempfile( dir=tmpdir, prefix='tmp_', suffix='_fwd.py') _solver_type = self.opts['solver']['type'] # coliny_ea solver (through DAKOTA) if _solver_type == 'dakota:coliny_ea' or _solver_type == 'coliny_ea': self.createDakotaInput(filename=finp, fwdRunFilename=ffwd) self.createDriverScripts(filename=ffwd, driver='dakota') logger.info('Launching Dakota ...') dexe = self.opts['configure']['dakota executable'] if dexe is None: msg = "Error: Cannot find the dakota executable on the system PATH" logger.error(msg) raise RuntimeError(msg) cmd = ' '.join([dexe, '-input', finp, '-output', fout]) logger.debug(cmd) sub_logger = logging.getLogger('wst.booster_msx.dakota') sub_logger.setLevel(logging.DEBUG) fh = logging.FileHandler(fout, mode='w') sub_logger.addHandler(fh) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) sub_logger.removeHandler(fh) fh.close() self.parseDakotaOutput(fout) # coliny_ea solver (through COLINY) elif _solver_type == 'coliny:ea': self.createColinyInput('sco:ea', [ 'max_iterations', 'max_function_evaluations', 'population_size', 'initialization_type', 'fitness_type', 'crossover_rate', 'crossover_type', 'mutation_rate', 'mutation_type', 'seed' ], filename=finp, fwdRunFilename=ffwd) self.createDriverScripts(filename=ffwd, driver='coliny') logger.info('Launching Coliny ...') cexe = self.opts['configure']['coliny executable'] if cexe is None: msg = "Error: Cannot find the coliny executable on the system PATH" logger.error(msg) raise RuntimeError(msg) cmd = ' '.join([cexe, finp]) logger.debug(cmd) sub_logger = logging.getLogger('wst.booster_msx.coliny') sub_logger.setLevel(logging.DEBUG) fh = logging.FileHandler(fout, mode='w') sub_logger.addHandler(fh) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) sub_logger.removeHandler(fh) fh.close() self.parseColinyOutput(fout) # StateMachine LS (through COLINY) elif _solver_type == 'coliny:StateMachineLS': try: enData = pyepanet.ENepanet() enData.ENopen(self.opts['network']['epanet file'], 'tmp.rpt') except: msg = "Error: EPANET inp file not loaded using pyepanet" logger.error(msg) raise RuntimeError(msg) nJunctions = enData.ENgetcount(pyepanet.EN_NODECOUNT) \ - enData.ENgetcount(pyepanet.EN_TANKCOUNT) enData.ENclose() init_pts = self.opts['solver']['initial points'] self.createStateMachineInput() self.createColinyInput( 'sco:StateMachineLS', ['verbosity', 'max_iterations', 'max_fcn_evaluations'], filename=finp, fwdRunFilename=ffwd, init_pts=init_pts, repn='bin') self.createDriverScripts(filename=ffwd, driver='coliny', repn='bin') logger.info('Launching Coliny ...') cexe = self.opts['configure']['coliny executable'] cmd = ' '.join([cexe, finp]) logger.debug(cmd) sub_logger = logging.getLogger('wst.booster_msx.coliny') sub_logger.setLevel(logging.DEBUG) fh = logging.FileHandler(fout, mode='w') sub_logger.addHandler(fh) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) sub_logger.removeHandler(fh) fh.close() self.parseColinyOutput(fout, repn='bin') elif _solver_type == 'EVALUATE': self.createDakotaInput(filename=finp, fwdRunFilename=ffwd) self.createDriverScripts(filename=ffwd, driver='dakota') logger.info('Evaluate Placement ...') # create dummy params.in fid = open('params.in', 'w') fid.write('%d variables\n' % len(self.node_names)) for i in range(len(self.node_names)): fid.write('%s x%d\n' % (i + 1, i + 1)) #fid.write('1 functions\n') #fid.write('1 ASV_1\n') #fid.write('2 derivative_variables\n') #fid.write('1 DVV_1\n') #fid.write('2 DVV_2\n') #fid.write('0 analysis_components\n') fid.close() eval_out_file = pyutilib.services.TempfileManager.create_tempfile( dir=tmpdir, prefix='tmp_', suffix='_eval.out') cmd = ' '.join(['python', ffwd, 'params.in', eval_out_file]) logger.debug(cmd) sub_logger = logging.getLogger('wst.booster_msx.exec') sub_logger.setLevel(logging.DEBUG) fh = logging.FileHandler(fout, mode='w') sub_logger.addHandler(fh) p = pyutilib.subprocess.run( cmd, stdout=pywst.common.problem.LoggingFile(sub_logger)) sub_logger.removeHandler(fh) fh.close() fid = open(eval_out_file, 'r') self.results['finalMetric'] = float(fid.read()) self.results['nodesToBoost'] = self.opts['booster msx'][ 'feasible nodes'] fid.close() else: raise Exception("ERROR: Unknown or unsupported solver, '%s'" % _solver_type) # remove temporary files if debug = 0 if self.opts['configure']['debug'] == 0: pyutilib.services.TempfileManager.clear_tempfiles() if os.path.exists('./hydraulics.hyd'): os.remove('./hydraulics.hyd') if os.path.exists('./tmp.rpt'): os.remove('./tmp.rpt') # write output file prefix = os.path.basename(self.opts['configure']['output prefix']) logfilename = logger.parent.handlers[0].baseFilename outfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '.yml') visymlfilename = logger.parent.handlers[0].baseFilename.replace( '.log', '_vis.yml') # Write output yml file config = wst_config.output_config() module_blocks = ("general", "booster") template_options = { 'general': { 'cpu time': round(time.time() - self.startTime, 3), 'directory': os.path.dirname(logfilename), 'log file': os.path.basename(logfilename) }, 'booster': { 'nodes': self.results['nodesToBoost'], 'objective': self.results['finalMetric'] } } self.saveOutput(outfilename, config, module_blocks, template_options) # Write output visualization yml file config = wst_config.master_config() module_blocks = ("network", "visualization", "configure") template_options = { 'network': { 'epanet file': os.path.abspath(self.opts['network']['epanet file']) }, 'visualization': { 'layers': [] }, 'configure': { 'output prefix': os.path.abspath(self.opts['configure']['output prefix']) } } if len(self.opts['booster msx']['detection']) > 0: template_options['visualization']['layers'].append({ 'label': 'Sensor locations', 'locations': self.opts['booster msx']['detection'], 'location type': 'node', 'shape': 'square', 'fill': { 'color': '#000099"', 'size': 15, 'opacity': 0 }, 'line': { 'color': '#000099', 'size': 2, 'opacity': 0.6 } }) if len(self.results['nodesToBoost']) > 0: template_options['visualization']['layers'].append({ 'label': 'Booster locations', 'locations': "['booster']['nodes'][i]", 'file': outfilename, 'location type': 'node', 'shape': 'circle', 'fill': { 'color': '#aa0000', 'size': 15, 'opacity': 0.6 }, 'line': { 'color': '#000099', 'size': 1, 'opacity': 0 } }) if visymlfilename != None: self.saveVisOutput(visymlfilename, config, module_blocks, template_options) # Run visualization cmd = ['wst', 'visualization', visymlfilename] p = pyutilib.subprocess.run( cmd) # logging information should not be printed to the screen # print solution to screen logger.info("\nWST Normal Termination") logger.info("---------------------------") logger.info("Directory: " + os.path.dirname(logfilename)) logger.info("Results file: " + os.path.basename(outfilename)) logger.info("Log file: " + os.path.basename(logfilename)) logger.info("Visualization configuration file: " + os.path.basename(visymlfilename) + '\n') return