示例#1
0
 def from_dict(cls, d):
     qa = QueueAdapter.from_dict(d['queue_adapter']) if d['queue_adapter'] is not None else None
     return cls(job_rundir=d['job_rundir'], qout_file=d['qout_file'], qerr_file=d['qerr_file'], queue_adapter=qa,
                max_mem_per_proc_mb=d['max_mem_per_proc_mb'],
                mem_per_proc_increase_mb=d['mem_per_proc_increase_mb'],
                max_master_mem_overhead_mb=d['max_master_mem_overhead_mb'],
                master_mem_overhead_increase_mb=d['master_mem_overhead_increase_mb'])
示例#2
0
    def test_walltime_handler(self):
        # Simple test from a queue_adapter
        f = open(os.path.join(test_dir, "fw_handlers", "walltime", "PBS_qadapter.json"))
        dd = json.load(f)
        f.close()
        qad = QueueAdapter.from_dict(dd)
        walltime_handler = WalltimeHandler(job_rundir=os.path.join(test_dir, "fw_handlers", "walltime"),
                                           qout_file='queue.qout', qerr_file='queue.qerr', queue_adapter=qad,
                                           max_timelimit=1200, timelimit_increase=600)
        self.assertTrue(walltime_handler.check())
        # Test with a given Firework
        walltime_handler = WalltimeHandler(job_rundir='.',
                                           qout_file='queue.qout', qerr_file='queue.qerr', queue_adapter=None,
                                           max_timelimit=1200, timelimit_increase=600)
        # Get the Firework from json file
        f = open(os.path.join(test_dir, 'fw_handlers', 'walltime', 'sleep_fw.json'))
        dd = json.load(f)
        f.close()
        fw_to_check = Firework.from_dict(dd)

        # Hack the launch_dir so that it points to the directory where the queue.qout is
        fw_to_check.launches[-1].launch_dir = os.path.join(test_dir, fw_to_check.launches[-1].launch_dir)
        walltime_handler.src_setup(fw_spec={}, fw_to_check=fw_to_check)

        self.assertTrue(walltime_handler.check())
        actions = walltime_handler.correct()
        self.assertEqual(actions, {'errors': ['WalltimeHandler'],
                                   'actions': [{'action': {'_set': {'timelimit': 660.0}},
                                                'object': {'source': 'fw_spec', 'key': 'qtk_queueadapter'},
                                                'action_type': 'modify_object'}]})
示例#3
0
 def from_dict(cls, d):
     qa = QueueAdapter.from_dict(
         d['queue_adapter']) if d['queue_adapter'] is not None else None
     return cls(job_rundir=d['job_rundir'],
                qout_file=d['qout_file'],
                qerr_file=d['qerr_file'],
                queue_adapter=qa,
                max_timelimit=d['max_timelimit'],
                timelimit_increase=d['timelimit_increase'])
示例#4
0
    def test_manager(self):
        """Testing abirun.py manager"""
        env = self.get_env()

        no_logo_colors = ["--no-logo", "--no-colors"]

        # Test doc_manager
        env.run(self.script, ".", "doc_manager", self.loglevel, self.verbose, *no_logo_colors)
        for qtype in QueueAdapter.all_qtypes():
            env.run(self.script, ".", "doc_manager", qtype, self.loglevel, self.verbose, *no_logo_colors)

        # Test doc_sheduler
        env.run(self.script, ".", "doc_scheduler", self.loglevel, self.verbose, *no_logo_colors)
示例#5
0
    def test_manager(self):
        """Testing abirun.py manager"""
        env = self.get_env()

        no_logo_colors = ["--no-logo", "--no-colors"]

        # Test doc_manager
        env.run(self.script, ".", "doc_manager", self.loglevel, self.verbose,
                *no_logo_colors)
        for qtype in QueueAdapter.all_qtypes():
            env.run(self.script, ".", "doc_manager", qtype, self.loglevel,
                    self.verbose, *no_logo_colors)

        # Test doc_sheduler
        env.run(self.script, ".", "doc_scheduler", self.loglevel, self.verbose,
                *no_logo_colors)
示例#6
0
    def test_without_flow(self):
        """Testing abirun.py commands without flow"""
        env = self.get_env()
        no_logo_colors = ["--no-logo", "--no-colors"]

        # Test doc_manager
        r = env.run(self.script, "doc_manager", self.loglevel, self.verbose, *no_logo_colors,
                    expect_stderr=self.expect_stderr)
        for qtype in QueueAdapter.all_qtypes():
            r = env.run(self.script, ".", "doc_manager", qtype, self.loglevel, self.verbose, *no_logo_colors,
                        expect_stderr=self.expect_stderr)

        # Test doc_sheduler
        r = env.run(self.script, "doc_scheduler", self.loglevel, self.verbose, *no_logo_colors,
                    expect_stderr=self.expect_stderr)

        # Test abibuild
        r = env.run(self.script, "abibuild", self.loglevel, self.verbose, *no_logo_colors,
                    expect_stderr=self.expect_stderr)
示例#7
0
    def test_without_flow(self):
        """Testing abirun.py commands without flow"""
        env = self.get_env()
        no_logo_colors = ["--no-logo", "--no-colors"]

        # Test doc_manager
        r = env.run(self.script, "doc_manager", self.loglevel, self.verbose, *no_logo_colors,
                expect_stderr=self.expect_stderr)
        for qtype in QueueAdapter.all_qtypes():
            r= env.run(self.script, ".", "doc_manager", qtype, self.loglevel, self.verbose, *no_logo_colors,
                       expect_stderr=self.expect_stderr)

        # Test doc_sheduler
        r = env.run(self.script, "doc_scheduler", self.loglevel, self.verbose, *no_logo_colors,
                    expect_stderr=self.expect_stderr)

        # Test abibuild
        r = env.run(self.script, "abibuild", self.loglevel, self.verbose, *no_logo_colors,
                    expect_stderr=self.expect_stderr)
示例#8
0
    def test_base(self):
        """unit tests for Qadapter subclasses. A more complete coverage would require integration testing."""
        self.maxDiff = None
        aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
        sub_classes = QueueAdapter.__subclasses__()

        # Test if we can instantiate the concrete classes with the abc protocol.
        for subc in sub_classes:
            print("subclass: ", subc)

            # Create the adapter subclass.
            self.QDICT["queue"]["qtype"] = subc.QTYPE
            qad = make_qadapter(**self.QDICT)
            print(qad)
            hw = qad.hw
            giga = 1024

            # Test the programmatic interface used to change job parameters.
            aequal(qad.num_launches, 0)
            afalse(qad.has_omp)
            atrue(qad.has_mpi)
            qad.set_mpi_procs(2)
            aequal(qad.mpi_procs, 2)
            atrue(qad.pure_mpi)
            afalse(qad.pure_omp)
            afalse(qad.hybrid_mpi_omp)
            aequal(qad.mem_per_proc, giga)
            qad.set_mem_per_proc(2 * giga)
            aequal(qad.mem_per_proc, 2 * giga)
            aequal(qad.timelimit, 120)

            # Enable OMP
            qad.set_omp_threads(2)
            aequal(qad.omp_threads, 2)
            atrue(qad.has_omp)
            afalse(qad.pure_mpi)
            afalse(qad.pure_omp)
            atrue(qad.hybrid_mpi_omp)

            atrue(
                qad.hw.can_use_omp_threads(hw.sockets_per_node *
                                           hw.cores_per_socket))
            afalse(
                qad.hw.can_use_omp_threads(hw.sockets_per_node *
                                           hw.cores_per_socket + 1))

            # Test the creation of the script
            script = qad.get_script_str("job.sh",
                                        "/launch/dir",
                                        "executable",
                                        "qout_path",
                                        "qerr_path",
                                        stdin="STDIN",
                                        stdout="STDOUT",
                                        stderr="STDERR")

            # Test whether qad can be serialized with Pickle.
            deserialized_qads = self.serialize_with_pickle(qad, test_eq=False)

            for new_qad in deserialized_qads:
                new_script = new_qad.get_script_str("job.sh",
                                                    "/launch/dir",
                                                    "executable",
                                                    "qout_path",
                                                    "qerr_path",
                                                    stdin="STDIN",
                                                    stdout="STDOUT",
                                                    stderr="STDERR")
                aequal(new_script, script)

            # Test can_run and distribute
            # The hardware has num_nodes=3, sockets_per_node=2, cores_per_socket=4, mem_per_node="8 Gb"
            afalse(
                qad.can_run_pconf(
                    ParalConf(mpi_ncpus=hw.num_cores + 1,
                              omp_ncpus=1,
                              mem_per_cpu=0.1)))
            afalse(
                qad.can_run_pconf(
                    ParalConf(mpi_ncpus=4, omp_ncpus=9, mem_per_cpu=0.1)))
            afalse(
                qad.can_run_pconf(
                    ParalConf(mpi_ncpus=4, omp_ncpus=1,
                              mem_per_cpu=10 * giga)))

            d = qad.distribute(mpi_procs=4, omp_threads=1, mem_per_proc=giga)
            assert d.num_nodes == 1 and d.mpi_per_node == 4 and d.exact

            d = qad.distribute(mpi_procs=16, omp_threads=1, mem_per_proc=giga)
            assert d.num_nodes == 2 and d.mpi_per_node == 8 and d.exact

            # not enough memory per node but can distribute.
            d = qad.distribute(mpi_procs=8,
                               omp_threads=1,
                               mem_per_proc=2 * giga)
            assert d.num_nodes == 2 and d.mpi_per_node == 4 and not d.exact

            # mem_per_proc > mem_per_node!
            with self.assertRaises(qad.Error):
                d = qad.distribute(mpi_procs=9,
                                   omp_threads=1,
                                   mem_per_proc=10 * giga)

            # TODO
            # not commensurate with node
            #d = qad.distribute(mpi_procs=9, omp_threads=1, mem_per_proc=giga)
            #assert d.num_nodes == 3 and d.mpi_per_node == 3 and not d.exact

            with self.assertRaises(qad.Error):
                qad.set_mpi_procs(25)
                qad.validate()
            with self.assertRaises(qad.Error):
                qad.set_mpi_procs(100)
                qad.validate()
            with self.assertRaises(qad.Error):
                qad.set_omp_threads(10)
                qad.validate()
            with self.assertRaises(qad.Error):
                qad.set_mem_per_proc(9 * giga)
                qad.validate()

        # Test if one can register a customized class.
        class MyAdapter(SlurmAdapter):
            QTYPE = "myslurm"

        SlurmAdapter.register(MyAdapter)
        assert issubclass(MyAdapter, QueueAdapter)

        self.QDICT["queue"]["qtype"] = "myslurm"
        qad = make_qadapter(**self.QDICT)
        assert isinstance(qad, MyAdapter)
示例#9
0
    def test_base(self):
        """unit tests for Qadapter subclasses. A more complete coverage would require integration testing."""
        self.maxDiff = None
        aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
        sub_classes = QueueAdapter.__subclasses__()

        # Test if we can instantiate the concrete classes with the abc protocol.
        for subc in sub_classes:
            print("subclass: ", subc)

            # Create the adapter subclass.
            self.QDICT["queue"]["qtype"] = subc.QTYPE
            qad = make_qadapter(**self.QDICT)
            print(qad)
            hw = qad.hw
            giga = 1024

            # Test the programmatic interface used to change job parameters.
            aequal(qad.num_launches, 0)
            afalse(qad.has_omp)
            atrue(qad.has_mpi)
            qad.set_mpi_procs(2)
            aequal(qad.mpi_procs, 2)
            atrue(qad.pure_mpi)
            afalse(qad.pure_omp)
            afalse(qad.hybrid_mpi_omp)
            aequal(qad.mem_per_proc, giga)
            qad.set_mem_per_proc(2 * giga)
            aequal(qad.mem_per_proc, 2 * giga)
            aequal(qad.timelimit, 120)

            # Enable OMP
            qad.set_omp_threads(2)
            aequal(qad.omp_threads, 2)
            atrue(qad.has_omp)
            afalse(qad.pure_mpi)
            afalse(qad.pure_omp)
            atrue(qad.hybrid_mpi_omp)

            atrue(qad.hw.can_use_omp_threads(hw.sockets_per_node * hw.cores_per_socket))
            afalse(qad.hw.can_use_omp_threads(hw.sockets_per_node * hw.cores_per_socket + 1))

            # Test the creation of the script
            script = qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path", 
                                        stdin="STDIN", stdout="STDOUT", stderr="STDERR")

            # Test whether qad can be serialized with Pickle.
            deserialized_qads = self.serialize_with_pickle(qad, test_eq=False)

            for new_qad in deserialized_qads:
                new_script = new_qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path", 
                                                    stdin="STDIN", stdout="STDOUT", stderr="STDERR")
                aequal(new_script, script)

            # Test can_run and distribute
            # The hardware has num_nodes=3, sockets_per_node=2, cores_per_socket=4, mem_per_node="8 Gb"
            afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=hw.num_cores+1, omp_ncpus=1, mem_per_cpu=0.1)))
            afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=9, mem_per_cpu=0.1)))
            afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=1, mem_per_cpu=10 * giga)))

            d = qad.distribute(mpi_procs=4, omp_threads=1, mem_per_proc=giga)
            assert d.num_nodes == 1 and d.mpi_per_node == 4 and d.exact

            d = qad.distribute(mpi_procs=16, omp_threads=1, mem_per_proc=giga)
            assert d.num_nodes == 2 and d.mpi_per_node == 8 and d.exact

            # not enough memory per node but can distribute.
            d = qad.distribute(mpi_procs=8, omp_threads=1, mem_per_proc=2 * giga)
            assert d.num_nodes == 2 and d.mpi_per_node == 4 and not d.exact

            # mem_per_proc > mem_per_node!
            with self.assertRaises(qad.Error):
                d = qad.distribute(mpi_procs=9, omp_threads=1, mem_per_proc=10 * giga)

            # TODO
            # not commensurate with node
            #d = qad.distribute(mpi_procs=9, omp_threads=1, mem_per_proc=giga)
            #assert d.num_nodes == 3 and d.mpi_per_node == 3 and not d.exact

            with self.assertRaises(qad.Error): 
                qad.set_mpi_procs(25)
                qad.validate()
            with self.assertRaises(qad.Error): 
                qad.set_mpi_procs(100)
                qad.validate()
            with self.assertRaises(qad.Error): 
                qad.set_omp_threads(10)
                qad.validate()
            with self.assertRaises(qad.Error): 
                qad.set_mem_per_proc(9 * giga)
                qad.validate()

        # Test if one can register a customized class.
        class MyAdapter(SlurmAdapter):
            QTYPE = "myslurm"

        SlurmAdapter.register(MyAdapter)
        assert issubclass(MyAdapter, QueueAdapter)

        self.QDICT["queue"]["qtype"] = "myslurm"
        qad = make_qadapter(**self.QDICT)
        assert isinstance(qad, MyAdapter)
示例#10
0
 def from_dict(cls, d):
     qa = QueueAdapter.from_dict(d['queue_adapter']) if d['queue_adapter'] is not None else None
     return cls(job_rundir=d['job_rundir'], qout_file=d['qout_file'], qerr_file=d['qerr_file'], queue_adapter=qa,
                max_timelimit=d['max_timelimit'],
                timelimit_increase=d['timelimit_increase'])