def test_parse(self): f = FileDownloader() f.parse_args([]) self.assertFalse(f.insecure) self.assertIsNone(f.cacert) self.assertIsNone(f.target) f = FileDownloader() f.parse_args(['--insecure']) self.assertTrue(f.insecure) self.assertIsNone(f.cacert) self.assertIsNone(f.target) f = FileDownloader() f.parse_args(['--insecure', '--cacert', this_file()]) self.assertTrue(f.insecure) self.assertEqual(f.cacert, this_file()) self.assertIsNone(f.target) f = FileDownloader() f.parse_args(['--insecure', 'bar', '--cacert', this_file()]) self.assertTrue(f.insecure) self.assertEqual(f.cacert, this_file()) self.assertEqual(f.target, 'bar') f = FileDownloader() with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--cacert']) self.assertIn('argument --cacert: expected one argument', io.getvalue()) f = FileDownloader() with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--cacert', '--insecure']) self.assertIn('argument --cacert: expected one argument', io.getvalue()) f = FileDownloader() with self.assertRaisesRegexp( RuntimeError, "--cacert='nonexistant_file_name' does " "not refer to a valid file"): f.parse_args(['--cacert', 'nonexistant_file_name']) f = FileDownloader() with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--foo']) self.assertIn('error: unrecognized arguments: --foo', io.getvalue())
def test_warm_start(self): m = ConcreteModel() m.x = Var() m.z = Var(domain=Integers) m.w = Var(domain=Boolean) m.c = Constraint(expr=m.x + m.z + m.w >= 0) m.o = Objective(expr=m.x + m.z + m.w) # Set some initial values for warm start. m.x.set_value(10) m.z.set_value(5) m.w.set_value(1) with SolverFactory("cbc") as opt: with capture_output() as output: opt.solve(m, tee=True, warmstart=True) # Check if CBC loaded the warmstart file. self.assertIn('opening mipstart file', output.getvalue()) # Only integer and binary variables are considered by CBC. self.assertIn('MIPStart values read for 2 variables.', output.getvalue()) # m.x is ignored because it is continuous, so cost should be 5+1 self.assertIn('MIPStart provided solution with cost 6', output.getvalue())
def solver_log(logger, level=logging.ERROR): """Context manager to send solver output to a logger. This uses a separate thread to log solver output while the solver is running""" # wait 3 seconds to join thread. Should be plenty of time. In case # something goes horribly wrong though don't want to hang. The logging # thread is daemonic, so it will shut down with the main process even if it # stays around for some mysterious reason while the model is running. join_timeout = 3 tee = logger.isEnabledFor(level) if not solver_capture(): yield SolverLogInfo(tee=tee) else: with capture_output() as s: lt = IOToLogTread(s, logger=logger, level=level) lt.start() try: yield SolverLogInfo(tee=tee, thread=lt) except: lt.stop.set() lt.join(timeout=join_timeout) raise # thread should end when s is closed, but setting stop makes sure # the last of the output gets logged before closing s lt.stop.set() lt.join(timeout=join_timeout)
def test_tee_and_logfile(self): with SolverFactory("gams", solver_io="python") as opt: with capture_output() as output: opt.solve(self.m, logfile=self.logfile, tee=True) self._check_stdout(output.getvalue(), exists=True) self._check_logfile(exists=True)
def test_no_tee(self): with SolverFactory("gams", solver_io="python") as opt: with capture_output() as output: opt.solve(self.m, tee=False) self._check_stdout(output.getvalue(), exists=False) self._check_logfile(exists=False)
def run_convergence_evaluation(sample_file_dict, conv_eval): """ Run convergence evaluation and generate the statistics based on information in the sample_file. Parameters ---------- sample_file_dict : dict Dictionary created by ConvergenceEvaluationSpecification that contains the input and sample point information conv_eval : ConvergenceEvaluation The ConvergenceEvaluation object that should be used Returns ------- N/A """ inputs = sample_file_dict['inputs'] samples = sample_file_dict['samples'] # current parallel task manager code does not work with dictionaries, so # convert samples to a list # ToDo: fix and test parallel task manager with dictionaries and change # this samples_list = list() for k, v in samples.items(): v['_name'] = k samples_list.append(v) n_samples = len(samples_list) task_mgr = mpiu.ParallelTaskManager(n_samples) local_samples_list = task_mgr.global_to_local_data(samples_list) results = list() for (si, ss) in enumerate(local_samples_list): sample_name = ss['_name'] # print progress on the rank-0 process if task_mgr.is_root(): _progress_bar( float(si) / float(len(local_samples_list)), 'Root Process: {}'.format(sample_name)) # capture the output # ToDo: make this an option and turn off for single sample execution output_buffer = StringIO() with LoggingIntercept(output_buffer, 'idaes', logging.ERROR): with capture_output(): # as str_out: model = conv_eval.get_initialized_model() _set_model_parameters_from_sample(model, inputs, ss) solver = conv_eval.get_solver() (status_obj, solved, iters, time) = \ _run_ipopt_with_stats(model, solver) if not solved: _log.error(f'Sample: {sample_name} failed to converge.') results_dict = OrderedDict() results_dict['name'] = sample_name results_dict['sample_point'] = ss results_dict['solved'] = solved results_dict['iters'] = iters results_dict['time'] = time results.append(results_dict) global_results = task_mgr.gather_global_data(results) return inputs, samples, global_results