Ejemplo n.º 1
0
 def test_solve_distributed(self):
     ns_host = '127.0.0.1'
     ns_process = None
     dispatcher_process = None
     scenariotreeserver_processes = []
     try:
         ns_process, ns_port = \
             _get_test_nameserver(ns_host=ns_host)
         self.assertNotEqual(ns_process, None)
         dispatcher_process, dispatcher_port = \
             _get_test_dispatcher(ns_host=ns_host,
                                  ns_port=ns_port)
         self.assertNotEqual(dispatcher_process, None)
         scenariotreeserver_processes = []
         for i in range(3):
             scenariotreeserver_processes.append(\
                 subprocess.Popen(["scenariotreeserver", "--traceback"] + \
                                  ["--pyro-host="+str(ns_host)] + \
                                  ["--pyro-port="+str(ns_port)]))
         cmd = ('python ' + \
                join(examples_dir, 'solve_distributed.py') + \
                ' %d') % (ns_port)
         print("Testing command: "+cmd)
         time.sleep(2)
         [_poll(proc) for proc in scenariotreeserver_processes]
         _run_cmd(cmd, shell=True)
     finally:
         _kill(ns_process)
         _kill(dispatcher_process)
         [_kill(proc) for proc in scenariotreeserver_processes]
Ejemplo n.º 2
0
 def test_scenarios_MPS_1server(self):
     self._setup(self.options, servers=1)
     self.options['--core-format'] = 'mps'
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self._diff(os.path.join(thisDir, self.baseline_basename+'_MPS_baseline'),
                self.options['--output-directory'])
Ejemplo n.º 3
0
 def _run_cmd_with_pyro(self, cmd, num_servers):
     ns_host = '127.0.0.1'
     ns_process = None
     dispatcher_process = None
     scenariotreeserver_processes = []
     try:
         ns_process, ns_port = \
             _get_test_nameserver(ns_host=ns_host)
         self.assertNotEqual(ns_process, None)
         dispatcher_process, dispatcher_port = \
             _get_test_dispatcher(ns_host=ns_host,
                                  ns_port=ns_port)
         self.assertNotEqual(dispatcher_process, None)
         scenariotreeserver_processes = []
         for i in range(num_servers):
             scenariotreeserver_processes.append(\
                 subprocess.Popen(["scenariotreeserver", "--traceback"] + \
                                  ["--pyro-host="+str(ns_host)] + \
                                  ["--pyro-port="+str(ns_port)]))
         cmd += " --scenario-tree-manager=pyro"
         cmd += " --pyro-host=" + ns_host
         cmd += " --pyro-port=" + str(ns_port)
         print("Testing command: " + cmd)
         time.sleep(2)
         [_poll(proc) for proc in scenariotreeserver_processes]
         _run_cmd(cmd, shell=True)
     finally:
         _kill(ns_process)
         _kill(dispatcher_process)
         [_kill(proc) for proc in scenariotreeserver_processes]
         if os.path.exists(os.path.join(thisDir, 'Pyro_NS_URI')):
             try:
                 os.remove(os.path.join(thisDir, 'Pyro_NS_URI'))
             except OSError:
                 pass
Ejemplo n.º 4
0
 def _run_cmd_with_pyro(self, cmd, num_servers):
     ns_host = '127.0.0.1'
     ns_process = None
     dispatcher_process = None
     scenariotreeserver_processes = []
     try:
         ns_process, ns_port = \
             _get_test_nameserver(ns_host=ns_host)
         self.assertNotEqual(ns_process, None)
         dispatcher_process, dispatcher_port = \
             _get_test_dispatcher(ns_host=ns_host,
                                  ns_port=ns_port)
         self.assertNotEqual(dispatcher_process, None)
         scenariotreeserver_processes = []
         for i in range(num_servers):
             scenariotreeserver_processes.append(\
                 subprocess.Popen(["scenariotreeserver", "--traceback"] + \
                                  ["--pyro-host="+str(ns_host)] + \
                                  ["--pyro-port="+str(ns_port)]))
         cmd += " --scenario-tree-manager=pyro"
         cmd += " --pyro-host=" + ns_host
         cmd += " --pyro-port=" + str(ns_port)
         print("Testing command: "+cmd)
         time.sleep(2)
         [_poll(proc) for proc in scenariotreeserver_processes]
         _run_cmd(cmd, shell=True)
     finally:
         _kill(ns_process)
         _kill(dispatcher_process)
         [_kill(proc) for proc in scenariotreeserver_processes]
         if os.path.exists(os.path.join(thisDir,'Pyro_NS_URI')):
             try:
                 os.remove(os.path.join(thisDir,'Pyro_NS_URI'))
             except OSError:
                 pass
Ejemplo n.º 5
0
 def test_scenarios_LP(self):
     self._setup(self.options)
     self.options['--core-format'] = 'lp'
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self._diff(os.path.join(thisDir, self.basename + '_LP_baseline'),
                self.options['--output-directory'])
Ejemplo n.º 6
0
 def test_scenarios_LP(self):
     self._setup(self.options)
     self.options['--core-format'] = 'lp'
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self._diff(os.path.join(thisDir, self.basename+'_LP_baseline'),
                self.options['--output-directory'])
Ejemplo n.º 7
0
 def test_scenarios_LP_symbolic_names_1server(self):
     self._setup(self.options, servers=1)
     self.options['--core-format'] = 'lp'
     self.options['--symbolic-solver-labels'] = ''
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self._diff(os.path.join(thisDir, self.baseline_basename+'_LP_symbolic_names_baseline'),
                self.options['--output-directory'])
Ejemplo n.º 8
0
 def test_bad_objective_constant_MPS(self):
     cmd, output_dir = self._get_cmd(join(
         thisDir, "model_bad_objective_constant.py"),
                                     options={'--core-format': 'mps'})
     with self.assertRaises(subprocess.CalledProcessError):
         try:
             _run_cmd(cmd, shell=True)
         except:
             shutil.rmtree(output_dir, ignore_errors=True)
             raise
Ejemplo n.º 9
0
 def test_bad_variable_bounds_LP(self):
     cmd, output_dir = self._get_cmd(join(thisDir,
                                          "model_bad_variable_bounds.py"),
                                     options={'--core-format': 'lp'})
     with self.assertRaises(subprocess.CalledProcessError):
         try:
             _run_cmd(cmd, shell=True)
         except:
             shutil.rmtree(output_dir, ignore_errors=True)
             raise
Ejemplo n.º 10
0
 def test_bad_variable_bounds_LP(self):
     cmd, output_dir = self._get_cmd(
         join(thisDir, "model_bad_variable_bounds.py"),
         options={'--core-format': 'lp'})
     with self.assertRaises(subprocess.CalledProcessError):
         try:
             _run_cmd(cmd, shell=True)
         except:
             shutil.rmtree(output_dir,
                           ignore_errors=True)
             raise
Ejemplo n.º 11
0
 def test_bad_objective_constant_MPS(self):
     cmd, output_dir = self._get_cmd(
         join(thisDir, "model_bad_objective_constant.py"),
         options={'--core-format': 'mps'})
     with self.assertRaises(subprocess.CalledProcessError):
         try:
             _run_cmd(cmd, shell=True)
         except:
             shutil.rmtree(output_dir,
                           ignore_errors=True)
             raise
Ejemplo n.º 12
0
 def test_generate_distributed_NL(self):
     class_name, test_name = self.id().split('.')[-2:]
     tmpdir = os.path.join(thisDir, class_name+"_"+test_name)
     shutil.rmtree(tmpdir, ignore_errors=True)
     self.assertEqual(os.path.exists(tmpdir), False)
     cmd = 'python '+join(examples_dir, 'apps', 'generate_distributed_NL.py')
     cmd += " -m "+join(pysp_examples_dir, "networkx_scenariotree", "ReferenceModel.py")
     cmd += " --output-directory="+tmpdir
     print("Testing command: "+cmd)
     _run_cmd(cmd, shell=True)
     self.assertEqual(os.path.exists(tmpdir), True)
     shutil.rmtree(tmpdir, ignore_errors=True)
Ejemplo n.º 13
0
 def test_generate_distributed_NL(self):
     class_name, test_name = self.id().split('.')[-2:]
     tmpdir = os.path.join(thisDir, class_name + "_" + test_name)
     shutil.rmtree(tmpdir, ignore_errors=True)
     self.assertEqual(os.path.exists(tmpdir), False)
     cmd = 'python ' + join(examples_dir, 'apps',
                            'generate_distributed_NL.py')
     cmd += " -m " + join(pysp_examples_dir, "networkx_scenariotree",
                          "ReferenceModel.py")
     cmd += " --output-directory=" + tmpdir
     print("Testing command: " + cmd)
     _run_cmd(cmd, shell=True)
     self.assertEqual(os.path.exists(tmpdir), True)
     shutil.rmtree(tmpdir, ignore_errors=True)
Ejemplo n.º 14
0
 def test_scenario_tree_image(self):
     class_name, test_name = self.id().split('.')[-2:]
     tmpfname = os.path.join(thisDir, class_name+"_"+test_name)+".pdf"
     try:
         os.remove(tmpfname)
     except OSError:
         pass
     self.assertEqual(os.path.exists(tmpfname), False)
     cmd = 'python '+join(examples_dir, 'apps', 'scenario_tree_image.py')
     cmd += " -m "+join(pysp_examples_dir, "networkx_scenariotree", "ReferenceModel.py")
     cmd += " --output-file="+tmpfname
     print("Testing command: "+cmd)
     _run_cmd(cmd, shell=True)
     self.assertEqual(os.path.exists(tmpfname), True)
     os.remove(tmpfname)
Ejemplo n.º 15
0
 def test_scenarios_1server(self):
     self._setup(self.options, servers=1)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self.assertMatchesJsonBaseline(
         self.options['--jsonsaver-output-name'],
         join(thisDir, self.basename+'_ef_solution.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
     self.assertMatchesJsonBaseline(
         self.options['--output-scenario-costs'],
         join(thisDir, self.basename+'_ef_costs.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
Ejemplo n.º 16
0
 def test_scenarios_1server(self):
     self._setup(self.options, servers=1)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self.assertMatchesJsonBaseline(
         self.options['--jsonsaver-output-name'],
         join(thisDir, self.basename+'_ef_solution.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
     self.assertMatchesJsonBaseline(
         self.options['--output-scenario-costs'],
         join(thisDir, self.basename+'_ef_costs.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
Ejemplo n.º 17
0
 def test_scenario_tree_image(self):
     class_name, test_name = self.id().split('.')[-2:]
     tmpfname = os.path.join(thisDir, class_name + "_" + test_name) + ".pdf"
     try:
         os.remove(tmpfname)
     except OSError:
         pass
     self.assertEqual(os.path.exists(tmpfname), False)
     cmd = 'python ' + join(examples_dir, 'apps', 'scenario_tree_image.py')
     cmd += " -m " + join(pysp_examples_dir, "networkx_scenariotree",
                          "ReferenceModel.py")
     cmd += " --output-file=" + tmpfname
     print("Testing command: " + cmd)
     _run_cmd(cmd, shell=True)
     self.assertEqual(os.path.exists(tmpfname), True)
     os.remove(tmpfname)
Ejemplo n.º 18
0
 def test_scenarios(self):
     if not testing_solvers['cplex', 'lp']:
         self.skip("cplex is not available")
     self._setup(self.options)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self.assertMatchesJsonBaseline(
         self.options['--jsonsaver-output-name'],
         join(thisDir, self.basename + '_ef_solution.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
     self.assertMatchesJsonBaseline(self.options['--output-scenario-costs'],
                                    join(thisDir,
                                         self.basename + '_ef_costs.json'),
                                    tolerance=_diff_tolerance,
                                    delete=True,
                                    exact=_json_exact_comparison)
Ejemplo n.º 19
0
 def test_scenarios(self):
     if not testing_solvers['cplex','lp']:
         self.skip("cplex is not available")
     self._setup(self.options)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
     self.assertMatchesJsonBaseline(
         self.options['--jsonsaver-output-name'],
         join(thisDir, self.basename+'_ef_solution.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
     self.assertMatchesJsonBaseline(
         self.options['--output-scenario-costs'],
         join(thisDir, self.basename+'_ef_costs.json'),
         tolerance=_diff_tolerance,
         delete=True,
         exact=_json_exact_comparison)
Ejemplo n.º 20
0
def _run(*arg, quiet=False, check_returncode=True, **kwargs):
    """
    Call command in subprocess

    Args:
        quiet (bool): Hide commands output.
        check_returncode (bool): Check return code.

    Returns:
        subprocess.CompletedProcess: call result.
    """
    if quiet:
        kwargs.update(dict(stdout=_PIPE, stderr=_PIPE,
                           universal_newlines=True))

    result = _run_cmd(*arg, **kwargs)
    if check_returncode:
        result.check_returncode()
    return result
Ejemplo n.º 21
0
    driver.perform_build('pyomo',
                         coverage=True,
                         omit=coverage_omit,
                         config='pyomo_all.ini')

elif config == "core":
    # Install
    print("-" * 60)
    print("Installing Pyomo")
    print("-" * 60)
    driver.perform_install('pyomo', config='pyomo_all.ini')
    print("-" * 60)
    print("Running 'pyomo install-extras' ...")
    print("-" * 60)
    if _run_cmd is subprocess.check_call:
        _run_cmd("python/bin/pyomo install-extras", shell=True)
    elif _run_cmd is subprocess.check_output:
        output = _run_cmd("python/bin/pyomo install-extras", shell=True)
        print(output.encode('utf-8', 'replace').decode('utf-8'))
    else:
        assert False
    # Test
    os.environ['TEST_PACKAGES'] = ' '.join([
        'pyomo.checker', 'pyomo.common', 'pyomo.core', 'pyomo.environ',
        'pyomo.opt', 'pyomo.repn', 'pyomo.scripting', 'pyomo.solvers',
        'pyomo.util', 'pyomo.version'
    ])
    print("-" * 60)
    print("Performing tests")
    print("-" * 60)
    driver.perform_tests('pyomo', coverage=True, omit=coverage_omit)
Ejemplo n.º 22
0
 def test_scenarios(self):
     self._setup(self.options)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
Ejemplo n.º 23
0
 def test_benders_scripting(self):
     cmd = 'python '+join(examples_dir, 'benders_scripting.py')
     print("Testing command: "+cmd)
     _run_cmd(cmd, shell=True)
Ejemplo n.º 24
0
 def test_ef_duals(self):
     cmd = 'python '+join(examples_dir, 'ef_duals.py')
     print("Testing command: "+cmd)
     _run_cmd(cmd, shell=True)
Ejemplo n.º 25
0
 def test_admm(self):
     cmd = 'python '+join(examples_dir, 'apps', 'admm.py')
     cmd += " -m "+join(pysp_examples_dir, "farmer", "models")
     cmd += " -s "+join(pysp_examples_dir, "farmer", "scenariodata")
     print("Testing command: "+cmd)
     _run_cmd(cmd, shell=True)
Ejemplo n.º 26
0
    def _baseline_test(self,
                       options_string="",
                       validation_options_string="",
                       cleanup_func=None,
                       rename_func=None,
                       check_baseline_func=None):

        global _test_name_wildcard_include
        global _test_name_wildcard_exclude
        class_name, test_name = self.id().split('.')[-2:]
        if all(not fnmatch.fnmatch(class_name+'.'+test_name,inc_wildcard) \
               for inc_wildcard in _test_name_wildcard_include):
            self.skipTest("Test %s.%s does not match any include wildcard in '%s'" \
                          % (class_name, test_name, _test_name_wildcard_include))
        if any(fnmatch.fnmatch(class_name+'.'+test_name,exc_wildcard) \
               for exc_wildcard in _test_name_wildcard_exclude):
            self.skipTest("Test %s.%s matches at least one exclude wildcard in '%s'" \
                          % (class_name, test_name, _test_name_wildcard_exclude))
        if not testing_solvers[self.solver_name, self.solver_io]:
            self.skipTest("Solver (%s,%s) not available" %
                          (self.solver_name, self.solver_io))
        prefix = class_name + "." + test_name
        argstring = self.get_cmd_base()+" "\
                    "--model-directory="+self.model_directory+" "\
                    "--instance-directory="+self.instance_directory+" "\
                    "--solution-writer=pysp.plugins.jsonsolutionwriter "\
                    +options_string+" "\
                    "&> "+join(thisDir,prefix+".out")
        print("Testing command(" + basename(prefix) + "): " + argstring)
        self.safe_delete(join(thisDir, prefix + ".out"))
        self.safe_delete(join(thisDir, "ef_solution.json"))
        self.safe_delete(join(thisDir, prefix + ".ef_solution.json.out"))
        if cleanup_func is not None:
            cleanup_func(self, class_name, test_name)
        _run_cmd(argstring, shell=True)
        self.assertTrue(os.path.exists(join(thisDir, "ef_solution.json")))
        os.rename(join(thisDir, "ef_solution.json"),
                  join(thisDir, prefix + ".ef_solution.json.out"))
        if rename_func is not None:
            rename_func(self, class_name, test_name)

        validate_ef_main([join(thisDir,prefix+".ef_solution.json.out"),
                          '-t',repr(_diff_tolerance)]\
                         +validation_options_string.split())

        if self.baseline_group is not None:
            group_prefix = self.baseline_group + "." + test_name
            # Disable automatic deletion of the ef_solution output
            # file on passing test just in case the optional
            # check_baseline_func wants to look at it.
            self.assertMatchesJsonBaseline(
                join(thisDir, prefix + ".ef_solution.json.out"),
                join(baselineDir,
                     group_prefix + ".ef_solution.json.baseline.gz"),
                tolerance=_diff_tolerance,
                delete=False,
                exact=_json_exact_comparison)

        if check_baseline_func is not None:
            assert self.baseline_group is not None
            check_baseline_func(self, class_name, test_name)
        else:
            # Now we can safely delete this file because the test has
            # passed if we are here
            self.safe_delete(join(thisDir, prefix + ".ef_solution.json.out"))
        self.safe_delete(join(thisDir, prefix + ".out"))
Ejemplo n.º 27
0
    def _baseline_test(self,
                       options_string="",
                       validation_options_string="",
                       cleanup_func=None,
                       rename_func=None,
                       check_baseline_func=None):

        global _test_name_wildcard_include
        global _test_name_wildcard_exclude
        class_name, test_name = self.id().split('.')[-2:]
        if all(not fnmatch.fnmatch(class_name+'.'+test_name,inc_wildcard) \
               for inc_wildcard in _test_name_wildcard_include):
            self.skipTest("Test %s.%s does not match any include wildcard in '%s'" \
                          % (class_name, test_name, _test_name_wildcard_include))
        if any(fnmatch.fnmatch(class_name+'.'+test_name,exc_wildcard) \
               for exc_wildcard in _test_name_wildcard_exclude):
            self.skipTest("Test %s.%s matches at least one exclude wildcard in '%s'" \
                          % (class_name, test_name, _test_name_wildcard_exclude))
        if not testing_solvers[self.solver_name,self.solver_io]:
            self.skipTest("Solver (%s,%s) not available"
                          % (self.solver_name, self.solver_io))
        prefix = class_name+"."+test_name
        argstring = self.get_cmd_base()+" "\
                    "--model-directory="+self.model_directory+" "\
                    "--instance-directory="+self.instance_directory+" "\
                    "--solution-writer=pyomo.pysp.plugins.jsonsolutionwriter "\
                    +options_string+" "\
                    "&> "+join(thisDir,prefix+".out")
        print("Testing command("+basename(prefix)+"): " + argstring)
        self.safe_delete(join(thisDir,prefix+".out"))
        self.safe_delete(join(thisDir,"ef_solution.json"))
        self.safe_delete(join(thisDir,prefix+".ef_solution.json.out"))
        if cleanup_func is not None:
            cleanup_func(self, class_name, test_name)
        _run_cmd(argstring, shell=True)
        self.assertTrue(os.path.exists(join(thisDir,"ef_solution.json")))
        os.rename(join(thisDir,"ef_solution.json"),
                  join(thisDir,prefix+".ef_solution.json.out"))
        if rename_func is not None:
            rename_func(self, class_name, test_name)

        validate_ef_main([join(thisDir,prefix+".ef_solution.json.out"),
                          '-t',repr(_diff_tolerance)]\
                         +validation_options_string.split())

        if self.baseline_group is not None:
            group_prefix = self.baseline_group+"."+test_name
            # Disable automatic deletion of the ef_solution output
            # file on passing test just in case the optional
            # check_baseline_func wants to look at it.
            self.assertMatchesJsonBaseline(
                join(thisDir,prefix+".ef_solution.json.out"),
                join(baselineDir,group_prefix+".ef_solution.json.baseline.gz"),
                tolerance=_diff_tolerance,
                delete=False,
                exact=_json_exact_comparison)

        if check_baseline_func is not None:
            assert self.baseline_group is not None
            check_baseline_func(self, class_name, test_name)
        else:
            # Now we can safely delete this file because the test has
            # passed if we are here
            self.safe_delete(join(thisDir,prefix+".ef_solution.json.out"))
        self.safe_delete(join(thisDir,prefix+".out"))
Ejemplo n.º 28
0
 def test_scenarios_1server(self):
     self._setup(self.options, servers=1)
     cmd = self._get_cmd()
     _run_cmd(cmd, shell=True)
Ejemplo n.º 29
0
 def test_admm(self):
     cmd = 'python ' + join(examples_dir, 'apps', 'admm.py')
     cmd += " -m " + join(pysp_examples_dir, "farmer", "models")
     cmd += " -s " + join(pysp_examples_dir, "farmer", "scenariodata")
     print("Testing command: " + cmd)
     _run_cmd(cmd, shell=True)
Ejemplo n.º 30
0
elif config == "default":
    driver.perform_build('pyomo', coverage=True, omit=coverage_omit,
                         config='pyomo_all.ini')

elif config == "core":
    # Install
    print("-" * 60)
    print("Installing Pyomo")
    print("-" * 60)
    driver.perform_install('pyomo', config='pyomo_all.ini')
    print("-" * 60)
    print("Running 'pyomo install-extras' ...")
    print("-" * 60)
    if _run_cmd is subprocess.check_call:
        _run_cmd("python/bin/pyomo install-extras", shell=True)
    elif _run_cmd is subprocess.check_output:
        output = _run_cmd("python/bin/pyomo install-extras", shell=True)
        try:
            print(output)
        except:
            if hasattr(output, 'encode'):
                output = output.encode('utf-8','replace')
            print(output.decode('utf-8'))
    else:
        assert False
    # Test
    os.environ['TEST_PACKAGES'] = ' '.join([
            'pyomo.checker','pyomo.common','pyomo.core','pyomo.environ',
            'pyomo.opt','pyomo.repn','pyomo.scripting','pyomo.solvers',
            'pyomo.util','pyomo.version'])