Ejemplo n.º 1
0
def test_env(env):
    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        fp = gen.get_masked_flowproject(p)
        fp.import_from(origin=gen.ARCHIVE_DIR)
        jobs = fp.find_jobs(dict(environment=_env_name(env)))
        if not len(jobs):
            raise RuntimeError(
                "No reference data for environment {}!".format(_env_name(env))
                )
        reference = []
        generated = []
        for job in jobs:
            parameters = job.sp.parameters()
            if 'bundle' in parameters:
                bundle = parameters.pop('bundle')
                tmp_out = io.TextIOWrapper(
                    io.BytesIO(), sys.stdout.encoding)
                with open(os.devnull, 'w') as devnull:
                    with redirect_stderr(devnull):
                        with redirect_stdout(tmp_out):
                            fp.submit(
                                env=env, jobs=[job], names=bundle, pretend=True,
                                force=True, bundle_size=len(bundle), **parameters)
                tmp_out.seek(0)
                msg = "---------- Bundled submission of job {}".format(job)
                generated.extend([msg] + tmp_out.read().splitlines())

                with open(job.fn('script_{}.sh'.format('_'.join(bundle)))) as file:
                    reference.extend([msg] + file.read().splitlines())
            else:
                for op in fp.operations:
                    if 'partition' in parameters:
                        # Don't try to submit GPU operations to CPU partitions
                        # and vice versa.  We should be able to relax this
                        # requirement if we make our error checking more
                        # consistent.
                        if operator.xor(
                            'gpu' in parameters['partition'].lower(),
                                'gpu' in op.lower()):
                            continue
                    tmp_out = io.TextIOWrapper(
                        io.BytesIO(), sys.stdout.encoding)
                    with open(os.devnull, 'w') as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    env=env, jobs=[job],
                                    names=[op], pretend=True, force=True, **parameters)
                    tmp_out.seek(0)
                    msg = "---------- Submission of operation {} for job {}.".format(op, job)
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn('script_{}.sh'.format(op))) as file:
                        reference.extend([msg] + file.read().splitlines())

        assert '\n'.join(reference) == '\n'.join(generated)
 def test_run_operations_implicit_argument(self):
     project = self.mock_project()
     for job in project:
         ops = project.next_operations(job)
         with suspend_logging():
             with redirect_stderr(StringIO()):
                 project.run(ops)
     for job in project:
         self.assertIn('said_hello', list(project.labels(job)))
 def test_print_status(self):
     project = self.mock_project()
     for job in project:
         list(project.classify(job))
         self.assertEqual(project.next_operation(job).name, 'a_op')
         self.assertEqual(project.next_operation(job).job, job)
     fd = StringIO()
     with redirect_stderr(StringIO()):
         with redirect_stdout(StringIO()):
             project.print_status(file=fd, err=fd)
 def test_submit_limited(self):
     env = get_environment()
     sched = env.scheduler_type()
     sched.reset()
     project = self.mock_project()
     self.assertEqual(len(list(sched.jobs())), 0)
     with suspend_logging():
         with redirect_stderr(StringIO()):
             project.submit(env, num=1)
             self.assertEqual(len(list(sched.jobs())), 1)
             project.submit(env, num=1)
             self.assertEqual(len(list(sched.jobs())), 2)
 def test_bundles(self):
     env = get_environment()
     sched = env.scheduler_type()
     sched.reset()
     project = self.mock_project()
     self.assertEqual(len(list(sched.jobs())), 0)
     with suspend_logging():
         with redirect_stderr(StringIO()):
             project.submit(bundle_size=2, num=2)
             self.assertEqual(len(list(sched.jobs())), 1)
             project.submit(bundle_size=2, num=4)
             self.assertEqual(len(list(sched.jobs())), 3)
             sched.reset()
             project._fetch_scheduler_status(file=StringIO())
             project.submit(bundle_size=0)
             self.assertEqual(len(list(sched.jobs())), 1)
 def test_submit_operations(self):
     env = get_environment()
     sched = env.scheduler_type()
     sched.reset()
     project = self.mock_project()
     operations = []
     for job in project:
         operations.extend(project.next_operations(job))
     self.assertEqual(len(list(sched.jobs())), 0)
     cluster_job_id = project._store_bundled(operations)
     with suspend_logging():
         with redirect_stderr(StringIO()):
             project.submit_operations(_id=cluster_job_id,
                                       env=env,
                                       operations=operations)
     self.assertEqual(len(list(sched.jobs())), 1)
     sched.reset()
 def test_run(self):
     project = self.mock_project()
     with redirect_stderr(StringIO()):
         project.run()
     for job in project:
         self.assertIn('said_hello', list(project.labels(job)))
Ejemplo n.º 8
0
def test_env(env, monkeypatch):
    monkeypatch.setattr(flow.FlowProject, "_store_bundled", gen._store_bundled)

    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        fp = gen.get_masked_flowproject(p)
        # Here we set the appropriate executable for all the operations. This
        # is necessary as otherwise the default executable between submitting
        # and running could look different depending on the environment.
        executable = "/usr/local/bin/python"
        for group in fp.groups.values():
            for op_key in group.operations:
                if op_key in group.operation_directives:
                    group.operation_directives[op_key][
                        "executable"] = executable
        fp.import_from(origin=gen.ARCHIVE_DIR)
        jobs = fp.find_jobs(dict(environment=_env_name(env)))
        if not len(jobs):
            raise RuntimeError("No reference data for environment {}!".format(
                _env_name(env)))
        reference = []
        generated = []
        for job in jobs:
            parameters = job.sp.parameters()
            if "bundle" in parameters:
                bundle = parameters.pop("bundle")
                tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding)
                with open(os.devnull, "w") as devnull:
                    with redirect_stderr(devnull):
                        with redirect_stdout(tmp_out):
                            fp.submit(
                                env=env,
                                jobs=[job],
                                names=bundle,
                                pretend=True,
                                force=True,
                                bundle_size=len(bundle),
                                **parameters,
                            )
                tmp_out.seek(0)
                msg = f"---------- Bundled submission of job {job}"
                generated.extend([msg] + tmp_out.read().splitlines())

                with open(job.fn("script_{}.sh".format(
                        "_".join(bundle)))) as file:
                    reference.extend([msg] + file.read().splitlines())
            else:
                for op in {**fp.operations, **fp.groups}:
                    if "partition" in parameters:
                        # Don't try to submit GPU operations to CPU partitions
                        # and vice versa.  We should be able to relax this
                        # requirement if we make our error checking more
                        # consistent.
                        if operator.xor(
                                "gpu" in parameters["partition"].lower(),
                                "gpu" in op.lower(),
                        ):
                            continue
                    tmp_out = io.TextIOWrapper(io.BytesIO(),
                                               sys.stdout.encoding)
                    with open(os.devnull, "w") as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    env=env,
                                    jobs=[job],
                                    names=[op],
                                    pretend=True,
                                    force=True,
                                    **parameters,
                                )
                    tmp_out.seek(0)
                    msg = f"---------- Submission of operation {op} for job {job}."
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn(f"script_{op}.sh")) as file:
                        reference.extend([msg] + file.read().splitlines())
        assert "\n".join(reference) == "\n".join(generated)
Ejemplo n.º 9
0
def test_env(env, monkeypatch):
    monkeypatch.setattr(flow.FlowProject, "_store_bundled", gen._store_bundled)
    # We need to set the scheduler manually. The FakeScheduler is used for two
    # reasons. First, the FakeScheduler prints scripts to screen on submission
    # and we can capture that output. Second, the FakeScheduler won't try to
    # call any cluster executable (e.g. squeue) associated with the real
    # schedulers used on supported clusters. Otherwise submission would fail
    # when attempting to determine what jobs already exist on the scheduler.
    monkeypatch.setattr(env, "scheduler_type", FakeScheduler)

    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        with gen.get_masked_flowproject(p, environment=env) as fp:
            # Here we set the appropriate executable for all the operations. This
            # is necessary as otherwise the default executable between submitting
            # and running could look different depending on the environment.
            for group in fp.groups.values():
                for op_key in group.operations:
                    if op_key in group.operation_directives:
                        monkeypatch.setitem(
                            group.operation_directives[op_key],
                            "executable",
                            gen.MOCK_EXECUTABLE,
                        )
            fp.import_from(origin=gen.ARCHIVE_DIR)
            jobs = fp.find_jobs(dict(environment=_env_name(env)))
            if not len(jobs):
                raise RuntimeError(
                    "No reference data for environment {}!".format(
                        _env_name(env)))
            reference = []
            generated = []
            for job in jobs:
                parameters = job.sp.parameters()
                if "bundle" in parameters:
                    bundle = parameters.pop("bundle")
                    tmp_out = io.TextIOWrapper(io.BytesIO(),
                                               sys.stdout.encoding)
                    with open(os.devnull, "w") as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    jobs=[job],
                                    names=bundle,
                                    pretend=True,
                                    force=True,
                                    bundle_size=len(bundle),
                                    **parameters,
                                )
                    tmp_out.seek(0)
                    msg = f"---------- Bundled submission of job {job}"
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn("script_{}.sh".format(
                            "_".join(bundle)))) as file:
                        reference.extend([msg] + file.read().splitlines())
                else:
                    for op in {**fp.operations, **fp.groups}:
                        if "partition" in parameters:
                            # Don't try to submit GPU operations to CPU partitions
                            # and vice versa.  We should be able to relax this
                            # requirement if we make our error checking more
                            # consistent.
                            if operator.xor(
                                    "gpu" in parameters["partition"].lower(),
                                    "gpu" in op.lower(),
                            ):
                                continue
                        tmp_out = io.TextIOWrapper(io.BytesIO(),
                                                   sys.stdout.encoding)
                        with open(os.devnull, "w") as devnull:
                            with redirect_stderr(devnull):
                                with redirect_stdout(tmp_out):
                                    fp.submit(
                                        jobs=[job],
                                        names=[op],
                                        pretend=True,
                                        force=True,
                                        **parameters,
                                    )
                        tmp_out.seek(0)
                        msg = f"---------- Submission of operation {op} for job {job}."
                        generated.extend([msg] + tmp_out.read().splitlines())

                        with open(job.fn(f"script_{op}.sh")) as file:
                            reference.extend([msg] + file.read().splitlines())
            assert "\n".join(generated) == "\n".join(reference)