예제 #1
0
 def test_init_context_manager_constructor(self):
     with signac.TemporaryProject() as tmp_project:
         assert os.path.isdir(tmp_project.root_directory())
         for i in range(10):
             tmp_project.open_job(dict(a=i)).init()
         assert len(tmp_project) == 10
     assert not os.path.isdir(tmp_project.root_directory())
예제 #2
0
def test_env(env):
    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        fp = gen.get_masked_flowproject(p)
        fp.import_from(origin=gen.ARCHIVE_DIR)
        jobs = fp.find_jobs(dict(environment=_env_name(env)))
        if not len(jobs):
            raise RuntimeError(
                "No reference data for environment {}!".format(_env_name(env))
                )
        reference = []
        generated = []
        for job in jobs:
            parameters = job.sp.parameters()
            if 'bundle' in parameters:
                bundle = parameters.pop('bundle')
                tmp_out = io.TextIOWrapper(
                    io.BytesIO(), sys.stdout.encoding)
                with open(os.devnull, 'w') as devnull:
                    with redirect_stderr(devnull):
                        with redirect_stdout(tmp_out):
                            fp.submit(
                                env=env, jobs=[job], names=bundle, pretend=True,
                                force=True, bundle_size=len(bundle), **parameters)
                tmp_out.seek(0)
                msg = "---------- Bundled submission of job {}".format(job)
                generated.extend([msg] + tmp_out.read().splitlines())

                with open(job.fn('script_{}.sh'.format('_'.join(bundle)))) as file:
                    reference.extend([msg] + file.read().splitlines())
            else:
                for op in fp.operations:
                    if 'partition' in parameters:
                        # Don't try to submit GPU operations to CPU partitions
                        # and vice versa.  We should be able to relax this
                        # requirement if we make our error checking more
                        # consistent.
                        if operator.xor(
                            'gpu' in parameters['partition'].lower(),
                                'gpu' in op.lower()):
                            continue
                    tmp_out = io.TextIOWrapper(
                        io.BytesIO(), sys.stdout.encoding)
                    with open(os.devnull, 'w') as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    env=env, jobs=[job],
                                    names=[op], pretend=True, force=True, **parameters)
                    tmp_out.seek(0)
                    msg = "---------- Submission of operation {} for job {}.".format(op, job)
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn('script_{}.sh'.format(op))) as file:
                        reference.extend([msg] + file.read().splitlines())

        assert '\n'.join(reference) == '\n'.join(generated)
예제 #3
0
 def test_init_context_manager_constructor(self):
     with signac.TemporaryProject() as tmp_project:
         self.assertTrue(os.path.isdir(tmp_project.root_directory()))
         for i in range(10):
             tmp_project.open_job(dict(a=i)).init()
         self.assertEqual(len(tmp_project), 10)
     self.assertFalse(os.path.isdir(tmp_project.root_directory()))
예제 #4
0
 def test_init_project_method(self):
     with signac.TemporaryProject() as project:
         with project.temporary_project() as tmp_project:
             assert os.path.isdir(tmp_project.root_directory())
             for i in range(10):
                 tmp_project.open_job(dict(a=i)).init()
             assert len(tmp_project) == 10
         assert not os.path.isdir(tmp_project.root_directory())
예제 #5
0
 def test_init_project_method(self):
     with signac.TemporaryProject() as project:
         with project.temporary_project() as tmp_project:
             self.assertTrue(os.path.isdir(tmp_project.root_directory()))
             for i in range(10):
                 tmp_project.open_job(dict(a=i)).init()
             self.assertEqual(len(tmp_project), 10)
         self.assertFalse(os.path.isdir(tmp_project.root_directory()))
def cli(num_jobs, bundle, parallel, entrypoint):
    """Generate a project with specific set of operations and pretend to submit.

    Usage example:

        \b ./interactive_template_test.py -n 28 -b 28 -p

    You can use the `SIGNAC_FLOW_ENVIRONMENT` environment variable to specify
    the environment to test, e.g.:

        \b
        $ SIGNAC_FLOW_ENVIRONMENT=Bridges2Environment ./interactive_template_test.py

    See `./interactive_template_test.py --help` for more information.
    """
    import flow.environments

    class Project(flow.FlowProject):
        pass

    for i in count():
        if not add_operation(Project, f"op_{i}"):
            break

    with signac.TemporaryProject() as tmp_project:
        for i in range(num_jobs):
            tmp_project.open_job(dict(foo=i)).init()
        flow_project = Project.get_project(root=tmp_project.root_directory())
        flow_project._entrypoint.setdefault("path", entrypoint)

        partition = ""
        force = False
        while True:
            if force:
                click.echo("Pretend submit with --force.")
            partition = click.prompt(
                "Partition (Hit CTRL-C to cancel.)",
                default=partition,
                show_default=True,
            )
            try:
                out = io.StringIO()
                with redirect_stdout(out):
                    with redirect_stderr(open(os.devnull, "w")):
                        flow_project.submit(
                            pretend=True,
                            partition=partition,
                            bundle_size=bundle,
                            parallel=parallel,
                            force=force,
                        )
                click.echo_via_pager(out.getvalue())
            except (jinja2.exceptions.TemplateError, RuntimeError) as error:
                click.secho(str(error), fg="yellow")
                force = click.prompt("Use --force?", default=False)
            else:
                force = False
def main(args):
    # If the ARCHIVE_DIR already exists, only recreate if forced.
    if os.path.exists(ARCHIVE_DIR):
        if args.force:
            print("Removing existing archive '{}'.".format(ARCHIVE_DIR))
            os.unlink(ARCHIVE_DIR)
        else:
            print("Archive '{}' already exists, exiting. "
                  "Use `-f/--force` to overwrite.".format(ARCHIVE_DIR))
            return

    # NOTE: We should replace the below line with
    # with signac.TemporaryProject(name=PROJECT_NAME, cls=TestProject) as fp:
    # once the next version of signac is released, and we can then remove
    # the additional FlowProject instantiation below
    with signac.TemporaryProject(name=PROJECT_NAME) as p:
        init(p)
        fp = get_masked_flowproject(p)

        for job in fp:
            with job:
                kwargs = job.statepoint()
                env = get_nested_attr(flow, kwargs['environment'])
                parameters = kwargs['parameters']
                if 'bundle' in parameters:
                    bundle = parameters.pop('bundle')
                    fn = 'script_{}.sh'.format('_'.join(bundle))
                    tmp_out = io.TextIOWrapper(io.BytesIO(),
                                               sys.stdout.encoding)
                    with redirect_stdout(tmp_out):
                        try:
                            fp.submit(env=env,
                                      jobs=[job],
                                      names=bundle,
                                      pretend=True,
                                      force=True,
                                      bundle_size=len(bundle),
                                      **parameters)
                        except jinja2.TemplateError as e:
                            print('ERROR:',
                                  e)  # Shows template error in output script

                    # Filter out non-header lines
                    tmp_out.seek(0)
                    with open(fn, 'w') as f:
                        with redirect_stdout(f):
                            print(tmp_out.read(), end='')
                else:
                    for op in fp.operations:
                        if 'partition' in parameters:
                            # Don't try to submit GPU operations to CPU partitions
                            # and vice versa.  We should be able to relax this
                            # requirement if we make our error checking more
                            # consistent.
                            if operator.xor(
                                    'gpu' in parameters['partition'].lower(),
                                    'gpu' in op.lower()):
                                continue
                        fn = 'script_{}.sh'.format(op)
                        tmp_out = io.TextIOWrapper(io.BytesIO(),
                                                   sys.stdout.encoding)
                        with redirect_stdout(tmp_out):
                            try:
                                fp.submit(env=env,
                                          jobs=[job],
                                          names=[op],
                                          pretend=True,
                                          force=True,
                                          **parameters)
                            except jinja2.TemplateError as e:
                                print(
                                    'ERROR:',
                                    e)  # Shows template error in output script

                        # Filter out non-header lines and the job-name line
                        tmp_out.seek(0)
                        with open(fn, 'w') as f:
                            with redirect_stdout(f):
                                print(tmp_out.read(), end='')

        # For compactness, we move the output into an ARCHIVE_DIR then delete the original data.
        fp.export_to(target=ARCHIVE_DIR)
예제 #8
0
def test_env(env, monkeypatch):
    monkeypatch.setattr(flow.FlowProject, "_store_bundled", gen._store_bundled)

    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        fp = gen.get_masked_flowproject(p)
        # Here we set the appropriate executable for all the operations. This
        # is necessary as otherwise the default executable between submitting
        # and running could look different depending on the environment.
        executable = "/usr/local/bin/python"
        for group in fp.groups.values():
            for op_key in group.operations:
                if op_key in group.operation_directives:
                    group.operation_directives[op_key][
                        "executable"] = executable
        fp.import_from(origin=gen.ARCHIVE_DIR)
        jobs = fp.find_jobs(dict(environment=_env_name(env)))
        if not len(jobs):
            raise RuntimeError("No reference data for environment {}!".format(
                _env_name(env)))
        reference = []
        generated = []
        for job in jobs:
            parameters = job.sp.parameters()
            if "bundle" in parameters:
                bundle = parameters.pop("bundle")
                tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding)
                with open(os.devnull, "w") as devnull:
                    with redirect_stderr(devnull):
                        with redirect_stdout(tmp_out):
                            fp.submit(
                                env=env,
                                jobs=[job],
                                names=bundle,
                                pretend=True,
                                force=True,
                                bundle_size=len(bundle),
                                **parameters,
                            )
                tmp_out.seek(0)
                msg = f"---------- Bundled submission of job {job}"
                generated.extend([msg] + tmp_out.read().splitlines())

                with open(job.fn("script_{}.sh".format(
                        "_".join(bundle)))) as file:
                    reference.extend([msg] + file.read().splitlines())
            else:
                for op in {**fp.operations, **fp.groups}:
                    if "partition" in parameters:
                        # Don't try to submit GPU operations to CPU partitions
                        # and vice versa.  We should be able to relax this
                        # requirement if we make our error checking more
                        # consistent.
                        if operator.xor(
                                "gpu" in parameters["partition"].lower(),
                                "gpu" in op.lower(),
                        ):
                            continue
                    tmp_out = io.TextIOWrapper(io.BytesIO(),
                                               sys.stdout.encoding)
                    with open(os.devnull, "w") as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    env=env,
                                    jobs=[job],
                                    names=[op],
                                    pretend=True,
                                    force=True,
                                    **parameters,
                                )
                    tmp_out.seek(0)
                    msg = f"---------- Submission of operation {op} for job {job}."
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn(f"script_{op}.sh")) as file:
                        reference.extend([msg] + file.read().splitlines())
        assert "\n".join(reference) == "\n".join(generated)
예제 #9
0
def test_env(env, monkeypatch):
    monkeypatch.setattr(flow.FlowProject, "_store_bundled", gen._store_bundled)
    # We need to set the scheduler manually. The FakeScheduler is used for two
    # reasons. First, the FakeScheduler prints scripts to screen on submission
    # and we can capture that output. Second, the FakeScheduler won't try to
    # call any cluster executable (e.g. squeue) associated with the real
    # schedulers used on supported clusters. Otherwise submission would fail
    # when attempting to determine what jobs already exist on the scheduler.
    monkeypatch.setattr(env, "scheduler_type", FakeScheduler)

    # Force asserts to show the full file when failures occur.
    # Useful to debug errors that arise.

    # Must import the data into the project.
    with signac.TemporaryProject(name=gen.PROJECT_NAME) as p:
        with gen.get_masked_flowproject(p, environment=env) as fp:
            # Here we set the appropriate executable for all the operations. This
            # is necessary as otherwise the default executable between submitting
            # and running could look different depending on the environment.
            for group in fp.groups.values():
                for op_key in group.operations:
                    if op_key in group.operation_directives:
                        monkeypatch.setitem(
                            group.operation_directives[op_key],
                            "executable",
                            gen.MOCK_EXECUTABLE,
                        )
            fp.import_from(origin=gen.ARCHIVE_DIR)
            jobs = fp.find_jobs(dict(environment=_env_name(env)))
            if not len(jobs):
                raise RuntimeError(
                    "No reference data for environment {}!".format(
                        _env_name(env)))
            reference = []
            generated = []
            for job in jobs:
                parameters = job.sp.parameters()
                if "bundle" in parameters:
                    bundle = parameters.pop("bundle")
                    tmp_out = io.TextIOWrapper(io.BytesIO(),
                                               sys.stdout.encoding)
                    with open(os.devnull, "w") as devnull:
                        with redirect_stderr(devnull):
                            with redirect_stdout(tmp_out):
                                fp.submit(
                                    jobs=[job],
                                    names=bundle,
                                    pretend=True,
                                    force=True,
                                    bundle_size=len(bundle),
                                    **parameters,
                                )
                    tmp_out.seek(0)
                    msg = f"---------- Bundled submission of job {job}"
                    generated.extend([msg] + tmp_out.read().splitlines())

                    with open(job.fn("script_{}.sh".format(
                            "_".join(bundle)))) as file:
                        reference.extend([msg] + file.read().splitlines())
                else:
                    for op in {**fp.operations, **fp.groups}:
                        if "partition" in parameters:
                            # Don't try to submit GPU operations to CPU partitions
                            # and vice versa.  We should be able to relax this
                            # requirement if we make our error checking more
                            # consistent.
                            if operator.xor(
                                    "gpu" in parameters["partition"].lower(),
                                    "gpu" in op.lower(),
                            ):
                                continue
                        tmp_out = io.TextIOWrapper(io.BytesIO(),
                                                   sys.stdout.encoding)
                        with open(os.devnull, "w") as devnull:
                            with redirect_stderr(devnull):
                                with redirect_stdout(tmp_out):
                                    fp.submit(
                                        jobs=[job],
                                        names=[op],
                                        pretend=True,
                                        force=True,
                                        **parameters,
                                    )
                        tmp_out.seek(0)
                        msg = f"---------- Submission of operation {op} for job {job}."
                        generated.extend([msg] + tmp_out.read().splitlines())

                        with open(job.fn(f"script_{op}.sh")) as file:
                            reference.extend([msg] + file.read().splitlines())
            assert "\n".join(generated) == "\n".join(reference)
def main(args):
    # If the ARCHIVE_DIR already exists, only recreate if forced.
    if os.path.exists(ARCHIVE_DIR):
        if args.force:
            print(f"Removing existing archive '{ARCHIVE_DIR}'.")
            os.unlink(ARCHIVE_DIR)
        else:
            print(
                "Archive '{}' already exists, exiting. "
                "Use `-f/--force` to overwrite.".format(ARCHIVE_DIR)
            )
            return

    with signac.TemporaryProject(name=PROJECT_NAME) as p:
        init(p)
        fp = get_masked_flowproject(p)

        for job in fp:
            with job:
                kwargs = job.statepoint()
                env = get_nested_attr(flow, kwargs["environment"])
                parameters = kwargs["parameters"]
                if "bundle" in parameters:
                    bundle = parameters.pop("bundle")
                    fn = "script_{}.sh".format("_".join(bundle))
                    tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding)
                    with redirect_stdout(tmp_out):
                        try:
                            fp.submit(
                                env=env,
                                jobs=[job],
                                names=bundle,
                                pretend=True,
                                force=True,
                                bundle_size=len(bundle),
                                **parameters,
                            )
                        except jinja2.TemplateError as e:
                            print("ERROR:", e)  # Shows template error in output script

                    # Filter out non-header lines
                    tmp_out.seek(0)
                    with open(fn, "w") as f:
                        with redirect_stdout(f):
                            print(tmp_out.read(), end="")
                else:
                    for op in {**fp.operations, **fp.groups}:
                        if "partition" in parameters:
                            # Don't try to submit GPU operations to CPU partitions
                            # and vice versa.  We should be able to relax this
                            # requirement if we make our error checking more
                            # consistent.
                            if operator.xor(
                                "gpu" in parameters["partition"].lower(),
                                "gpu" in op.lower(),
                            ):
                                continue
                        fn = f"script_{op}.sh"
                        tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding)
                        with redirect_stdout(tmp_out):
                            try:
                                fp.submit(
                                    env=env,
                                    jobs=[job],
                                    names=[op],
                                    pretend=True,
                                    force=True,
                                    **parameters,
                                )
                            except jinja2.TemplateError as e:
                                print(
                                    "ERROR:", e
                                )  # Shows template error in output script

                        # Filter out non-header lines and the job-name line
                        tmp_out.seek(0)
                        with open(fn, "w") as f:
                            with redirect_stdout(f):
                                print(tmp_out.read(), end="")

        # For compactness, we move the output into an ARCHIVE_DIR then delete the original data.
        fp.export_to(target=ARCHIVE_DIR)
def main(args):
    # If the ARCHIVE_DIR already exists, only recreate if forced.
    if os.path.exists(ARCHIVE_DIR):
        if args.force:
            print(f"Removing existing archive '{ARCHIVE_DIR}'.")
            os.unlink(ARCHIVE_DIR)
        else:
            print("Archive '{}' already exists, exiting. "
                  "Use `-f/--force` to overwrite.".format(ARCHIVE_DIR))
            return

    with signac.TemporaryProject(name=PROJECT_NAME) as p:
        init(p)
        with get_masked_flowproject(p) as fp:
            # Here we set the appropriate executable for all the operations. This
            # is necessary as otherwise the default executable between submitting
            # and running could look different depending on the environment.
            for group in fp.groups.values():
                for op_key in group.operations:
                    if op_key in group.operation_directives:
                        group.operation_directives[op_key][
                            "executable"] = MOCK_EXECUTABLE
            for job in fp:
                with job:
                    kwargs = job.statepoint()
                    env = get_nested_attr(flow, kwargs["environment"])
                    # We need to set the scheduler manually. The FakeScheduler
                    # is used for two reasons. First, the FakeScheduler prints
                    # scripts to screen on submission and we can capture that
                    # output. Second, the FakeScheduler won't try to call any
                    # cluster executable (e.g. squeue) associated with the real
                    # schedulers used on supported clusters. Otherwise
                    # submission would fail when attempting to determine what
                    # jobs already exist on the scheduler.
                    env.scheduler_type = FakeScheduler
                    fp._environment = env
                    parameters = kwargs["parameters"]
                    if "bundle" in parameters:
                        bundle = parameters.pop("bundle")
                        fn = "script_{}.sh".format("_".join(bundle))
                        tmp_out = io.TextIOWrapper(io.BytesIO(),
                                                   sys.stdout.encoding)
                        with redirect_stdout(tmp_out):
                            try:
                                fp.submit(
                                    jobs=[job],
                                    names=bundle,
                                    pretend=True,
                                    force=True,
                                    bundle_size=len(bundle),
                                    **parameters,
                                )
                            except jinja2.TemplateError as e:
                                print(
                                    "ERROR:",
                                    e)  # Shows template error in output script

                        # Filter out non-header lines
                        tmp_out.seek(0)
                        with open(fn, "w") as f:
                            with redirect_stdout(f):
                                print(tmp_out.read(), end="")
                    else:
                        for op in {**fp.operations, **fp.groups}:
                            if "partition" in parameters:
                                # Don't try to submit GPU operations to CPU partitions
                                # and vice versa.  We should be able to relax this
                                # requirement if we make our error checking more
                                # consistent.
                                if operator.xor(
                                        "gpu"
                                        in parameters["partition"].lower(),
                                        "gpu" in op.lower(),
                                ):
                                    continue
                            fn = f"script_{op}.sh"
                            tmp_out = io.TextIOWrapper(io.BytesIO(),
                                                       sys.stdout.encoding)
                            with redirect_stdout(tmp_out):
                                try:
                                    fp.submit(
                                        jobs=[job],
                                        names=[op],
                                        pretend=True,
                                        force=True,
                                        **parameters,
                                    )
                                except jinja2.TemplateError as e:
                                    print(
                                        "ERROR:", e
                                    )  # Shows template error in output script

                            # Filter out non-header lines and the job-name line
                            tmp_out.seek(0)
                            with open(fn, "w") as f:
                                with redirect_stdout(f):
                                    print(tmp_out.read(), end="")

            # For compactness, we move the output into an ARCHIVE_DIR then delete the original data.
            fp.export_to(target=ARCHIVE_DIR)