コード例 #1
0
ファイル: environment.py プロジェクト: egafni/GenomeKey2
    def __init__(self, config_path, reference_version):
        assert os.path.exists(config_path), '%s does not exist' % config_path
        assert reference_version in [
            'hg38', 'b37'
        ], 'bad reference_version: %s' % reference_version
        self.config_path = config_path
        self.config = ConfigParser(interpolation=ExtendedInterpolation())
        self.config.read(config_path)
        self.config.add_section('ref')
        for k, v in self.config['ref_%s' % reference_version].iteritems():
            self.config.set('ref', k, v)

        assert len(self.config['ref'].items()) > 1
        # set_env_aws_credentials()

        os.environ['REQUESTS_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'
        # export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
        from cosmos.web.gemon.views import bprint as gemon_bprint
        from cosmos.api import Cosmos, default_get_submit_args
        from functools import partial
        from flask import Flask

        flask_app = Flask('genomekey',
                          template_folder=os.path.join(library_path,
                                                       'web/templates'))

        flask_app.secret_key = '\x16\x89\xf5-\tK`\xf5FY.\xb9\x9c\xb4qX\xfdm\x19\xbd\xdd\xef\xa9\xe2'
        flask_app.register_blueprint(gemon_bprint, url_prefix='/gemon')
        self.flask_app = flask_app
        self.cosmos_app = Cosmos(self.config['gk']['database_url'],
                                 default_drm=self.config['gk']['default_drm'],
                                 flask_app=flask_app,
                                 get_submit_args=partial(
                                     default_get_submit_args,
                                     parallel_env='orte'))
コード例 #2
0
def main():
    cosmos = Cosmos("cosmos.sqlite").initdb()

    workflow = cosmos.start("ex1", skip_confirm=True)

    t = workflow.add_task(
        func=say,
        params=dict(text="Hello World", out_file="out.txt"),
        uid="my_task",
        time_req=None,
        core_req=1,
        mem_req=1024,
    )

    print(("task.params", t.params))
    print(("task.input_map", t.input_map))
    print(("task.output_map", t.output_map))
    print(("task.core_req", t.core_req))
    print(("task.time_req", t.time_req))
    print(("task.drm", t.drm))
    print(("task.uid", t.uid))

    workflow.run(cmd_wrapper=py_call)

    sys.exit(0 if workflow.successful else 1)
コード例 #3
0
def main():
    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        default_drm="local",
    )
    cosmos.initdb()

    sp.check_call("mkdir -p analysis_output/ex1", shell=True)
    os.chdir("analysis_output/ex1")
    workflow = cosmos.start("Example1", restart=True, skip_confirm=True)

    t = workflow.add_task(
        func=say,
        params=dict(text="Hello World", out_file="out.txt"),
        uid="my_task",
        time_req=None,
        core_req=1,
        mem_req=1024,
    )

    print(("task.params", t.params))
    print(("task.input_map", t.input_map))
    print(("task.output_map", t.output_map))
    print(("task.core_req", t.core_req))
    print(("task.time_req", t.time_req))
    print(("task.drm", t.drm))
    print(("task.uid", t.uid))

    workflow.run()

    sys.exit(0 if workflow.successful else 1)
コード例 #4
0
def main():
    # start cosmos engine
    cosmos = Cosmos(
        database_url="sqlite://",
        default_drm="local",
        # default_drm="ge",
        default_queue="dev-short",
        default_drm_options={},
        get_submit_args=partial(default_get_submit_args, parallel_env="smp"),
    )
    cosmos.initdb()

    # create cosmos workflow
    workflow = cosmos.start(
        # NOTE cosmos will make dirs in this path
        # primary_log_path=os.path.join("logs", "cosmos.log"),
        name="blah",
        restart=True,
        skip_confirm=True,
        fail_fast=True,
    )

    for i in range(100):
        print("add {}".format(i))
        silly_recipe(workflow, i, 100)

    workflow.make_output_dirs()

    # run cosmos workflow
    # with SGESignalHandler(workflow):
    workflow.run()
コード例 #5
0
ファイル: env_variables.py プロジェクト: indraniel/COSMOS2
def main():
    cosmos = Cosmos()
    cosmos.initdb()
    workflow = cosmos.start("env_variables", skip_confirm=True)
    workflow.add_task(func=command_with_env_variables,
                      environment_variables=environment_variables_dict,
                      uid="special")
    workflow.run(cmd_wrapper=py_call)
コード例 #6
0
def test_zero_tasks():
    cosmos = Cosmos()
    cosmos.initdb()
    temp_dir = tempfile.mkdtemp()
    with cd(temp_dir):
        workflow = cosmos.start('workflow', skip_confirm=True)
        workflow.run(set_successful=False)
        workflow.run(cmd_wrapper=py_call)

    shutil.rmtree(temp_dir)
コード例 #7
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument("-drm",
                   default="local",
                   help="",
                   choices=("local", "drmaa:ge", "ge", "slurm"))
    p.add_argument("-j",
                   "--job-class",
                   help="Submit to this job class if the DRM supports it")
    p.add_argument("-q",
                   "--queue",
                   help="Submit to this queue if the DRM supports it")

    args = p.parse_args()

    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        # example of how to change arguments if you're not using default_drm='local'
        get_submit_args=partial(default_get_submit_args, parallel_env="smp"),
        default_drm=args.drm,
        default_max_attempts=2,
        default_job_class=args.job_class,
        default_queue=args.queue,
    )
    cosmos.initdb()

    sp.check_call("mkdir -p analysis_output/1000tasks/", shell=True)
    os.chdir("analysis_output/1000tasks/")

    workflow = cosmos.start("1000_tasks", restart=True, skip_confirm=True)

    recipe(workflow)

    workflow.make_output_dirs()
    workflow.run(max_cores=100)

    # Noting here that if you wanted to look at the outputs of any Tasks to decide how to generate the rest of a DAG
    # you can do so here, proceed to add more tasks via workflow.add_task(), and then call workflow.run() again.
    # Yes, it does require running all Tasks in the dag to get the outputs of any Task, and we hope to address
    # that limitation at some point in the future.

    if pygraphviz_available:
        # These images can also be seen on the fly in the web-interface
        draw_stage_graph(workflow.stage_graph(),
                         "/tmp/ex1_task_graph.png",
                         format="png")
        draw_task_graph(workflow.task_graph(),
                        "/tmp/ex1_stage_graph.png",
                        format="png")
    else:
        print("Pygraphviz is not available :(")

    sys.exit(0 if workflow.successful else 1)
コード例 #8
0
def main():
    cosmos = Cosmos()
    cosmos.initdb()
    workflow = cosmos.start('test', skip_confirm=True)
    for i, num_gpus in enumerate([1, 1, 2, 2, 3]):
        task = workflow.add_task(use_cuda_device,
                                 dict(some_arg=i, num_gpus=num_gpus),
                                 gpu_req=num_gpus,
                                 uid=str(i))

    workflow.run(max_gpus=len(
        os.environ['COSMOS_LOCAL_GPU_DEVICES'].split(',')),
                 cmd_wrapper=py_call,
                 cleanup_at_exit=False)
コード例 #9
0
ファイル: local_gpus.py プロジェクト: indraniel/COSMOS2
def main():
    cosmos = Cosmos().initdb()
    workflow = cosmos.start("gpu", skip_confirm=True)

    for i, num_gpus in enumerate([1, 1, 2, 2, 3]):
        task = workflow.add_task(
            use_cuda_device,
            dict(some_arg=i, num_gpus=num_gpus),
            gpu_req=num_gpus,
            uid=str(i),
        )

    workflow.run(
        max_gpus=len(os.environ["COSMOS_LOCAL_GPU_DEVICES"].split(",")),
        cmd_wrapper=py_call,
    )
コード例 #10
0
ファイル: ex3_pycall.py プロジェクト: indraniel/COSMOS2
def main():
    p = ArgumentParser()
    p.add_argument("--sleep", default=0, type=int)
    args = p.parse_args()

    cosmos = Cosmos("cosmos.sqlite").initdb()
    workflow = cosmos.start("ex3", restart=True, skip_confirm=True)

    t1 = workflow.add_task(func=say,
                           params=dict(text="Hello World", out_file="out.txt"),
                           uid="my_task")
    t2 = workflow.add_task(func=sleep,
                           params=dict(secs=args.sleep),
                           uid="my_task")

    workflow.make_output_dirs()
    workflow.run(cmd_wrapper=py_call)
コード例 #11
0
ファイル: ex2_complete.py プロジェクト: indraniel/COSMOS2
def main():
    p = argparse.ArgumentParser()
    p.add_argument("-drm",
                   default="local",
                   help="",
                   choices=("local", "awsbatch", "slurm", "drmaa:ge", "ge"))
    p.add_argument("-q",
                   "--queue",
                   help="Submit to this queue if the DRM supports it")

    args = p.parse_args()

    cosmos = Cosmos("cosmos.sqlite",
                    default_drm=args.drm,
                    default_max_attempts=2,
                    default_queue=args.queue)
    cosmos.initdb()

    workflow = cosmos.start("Example2", skip_confirm=True)

    recipe(workflow)

    # any parameters that start with out_ are output directories, and will be created if
    # the user calls workflow.make_output_dirs
    workflow.make_output_dirs()
    workflow.run(max_cores=10, cmd_wrapper=py_call)

    # Noting here that if you wanted to look at the outputs of any Tasks to decide how to generate the rest of a DAG
    # you can do so here, proceed to add more tasks via workflow.add_task(), and then call workflow.run() again.
    # Yes, it does require running all Tasks in the dag to get the outputs of any Task, and we hope to address
    # that limitation at some point in the future.

    if pygraphviz_available:
        # These images can also be seen on the fly in the web-interface
        draw_stage_graph(workflow.stage_graph(),
                         "/tmp/ex1_task_graph.png",
                         format="png")
        draw_task_graph(workflow.task_graph(),
                        "/tmp/ex1_stage_graph.png",
                        format="png")
    else:
        print("Pygraphviz is not available :(")

    sys.exit(0 if workflow.successful else 1)
コード例 #12
0
ファイル: ex_gpu.py プロジェクト: egafni/COSMOS2
def main(output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    cosmos = Cosmos()
    cosmos.initdb()
    workflow = cosmos.start(
        "test", skip_confirm=True, primary_log_path=os.path.join(output_dir, "workflow.log"),
    )
    for i, num_gpus in enumerate([1, 1, 2, 2, 3]):
        task = workflow.add_task(
            use_cuda_device, dict(some_arg=i, num_gpus=num_gpus), gpu_req=num_gpus, uid=str(i),
        )

    workflow.run(
        max_gpus=len(os.environ["COSMOS_LOCAL_GPU_DEVICES"].split(",")),
        cmd_wrapper=py_call_cmd_wrapper,
        do_cleanup_atexit=False,
        log_out_dir_func=partial(default_task_log_output_dir, prefix="%s" % output_dir),
    )
コード例 #13
0
ファイル: ex_awsbatch.py プロジェクト: egafni/COSMOS2
def main():
    args = parse_args()

    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        default_drm="awsbatch",
        default_drm_options=dict(
            container_image=args.container_image,
            s3_prefix_for_command_script_temp_files=args.
            s3_prefix_for_command_script_temp_files,
            # only retry on spot instance death
            retry_only_if_status_reason_matches="Host EC2 .+ terminated.",
        ),
        default_queue=args.default_queue,
    )
    cosmos.initdb()

    sp.check_call("mkdir -p analysis_output/ex1", shell=True)
    os.chdir("analysis_output/ex1")
    workflow = cosmos.start("Example1", restart=True, skip_confirm=True)

    t = workflow.add_task(
        func=get_instance_info,
        params=dict(out_s3_uri=args.out_s3_uri, sleep=args.sleep),
        uid="",
        time_req=None,
        max_attempts=args.max_attempts,
        core_req=args.core_req,
        mem_req=1024,
    )
    workflow.run()

    print(("task.params", t.params))
    print(("task.input_map", t.input_map))
    print(("task.output_map", t.output_map))
    print(("task.core_req", t.core_req))
    print(("task.time_req", t.time_req))
    print(("task.drm", t.drm))
    print(("task.uid", t.uid))
    print(("task.drm_options", t.drm_options))
    print(("task.queue", t.queue))

    sys.exit(0 if workflow.successful else 1)
コード例 #14
0
ファイル: ex3.py プロジェクト: egafni/COSMOS2
def main():
    p = ArgumentParser()
    p.add_argument("--sleep", default=0, type=int)
    args = p.parse_args()

    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        default_drm="local",
    )
    cosmos.initdb()
    workflow = cosmos.start("Example3", restart=True, skip_confirm=True)

    t1 = workflow.add_task(func=say,
                           params=dict(text="Hello World", out_file="out.txt"),
                           uid="my_task")
    t2 = workflow.add_task(func=sleep,
                           params=dict(secs=args.sleep),
                           uid="my_task")

    workflow.make_output_dirs()
    workflow.run(cmd_wrapper=py_call_cmd_wrapper)
コード例 #15
0
ファイル: mie.py プロジェクト: vifehe/yaps2
    def __init__(self, config, drm, restart):
        self.config = config

        self.cosmos = Cosmos(database_url='sqlite:///{}'.format(
            self.config.db),
                             get_submit_args=default_get_submit_args,
                             default_drm=drm)

        self.cosmos.initdb()

        primary_logfile = os.path.join(
            self.config.rootdir,
            '{}.log'.format(self.config.project_name),
        )

        self.workflow = self.cosmos.start(
            self.config.project_name,
            primary_log_path=primary_logfile,
            restart=restart,
        )

        self.setup_pipeline()
コード例 #16
0
ファイル: duplicate_uids.py プロジェクト: indraniel/COSMOS2
def main():
    cosmos = Cosmos().initdb()
    workflow = cosmos.start("duplicate_uids", skip_confirm=True)
    task = workflow.add_task(func=prepare_data, params=dict(a=1), uid="x")

    # normally you can't add a task with the same uid to the same stage
    with pytest.raises(DuplicateUid):
        workflow.add_task(func=prepare_data, params=dict(a=1), uid="x")

    # normally you can't add a task with the same uid to the same stage
    with pytest.raises(DuplicateUid):
        workflow.add_task(func=prepare_data, params=dict(a=1), uid="x")

    # set if_duplicate="return" to True to get the same task back that you added
    task2 = workflow.add_task(func=prepare_data,
                              params=dict(a=1),
                              uid="x",
                              if_duplicate="return")
    assert task == task2

    # this can be especially useful in loops to avoid repeating computation
    for _ in range(3):
        task = workflow.add_task(func=prepare_data,
                                 params=dict(a=1),
                                 uid="x",
                                 if_duplicate="return")
        workflow.add_task(func=train_machine_learning_model,
                          params=dict(a=1),
                          uid="x",
                          if_duplicate="return",
                          parents=task)

    # NOTE: parameters must be identical when using this feature
    with pytest.raises(InvalidParams):
        workflow.add_task(func=prepare_data,
                          params=dict(a=1000),
                          uid="x",
                          if_duplicate="return")
コード例 #17
0
        execution.add_task(tools.filter_bed_by_contig,
                           tags=dict(in_bam=bam_path,
                                     in_bed=target_bed_path,
                                     contig=contig),
                           out_dir='work/{contig}') for contig in contigs
    ]

    freebayes_tasks = one2one(tools.freebayes, bed_tasks,
                              dict(max_complex_gap=max_complex_gap))

    merge_vcf_tasks = many2one(tools.vcf_concat_parts, freebayes_tasks)

    execution.run()


if __name__ == '__main__':
    p = argparse.ArgumentParser()
    p.add_argument('bam_path')
    p.add_argument('target_bed_path')
    p.add_argument('--max_complex_gap', type=int, default=2)
    add_execution_args(p)
    start_kwargs, variant_call_args = pop_execution_args(vars(p.parse_args()))

    cosmos = Cosmos('sqlite:///%s/sqlite.db' %
                    os.path.dirname(os.path.abspath(__file__)))
    cosmos.initdb()
    execution = cosmos.start(output_dir='../analysis_output/variant_calling',
                             **start_kwargs)

    variant_call(execution, **variant_call_args)
コード例 #18
0
"""
Basic demonstration the structure of a Task instance
"""
import subprocess as sp
import os
import sys
from cosmos.api import Cosmos, default_get_submit_args
from functools import partial

cosmos = Cosmos('sqlite:///%s/sqlite.db' %
                os.path.dirname(os.path.abspath(__file__)),
                get_submit_args=partial(default_get_submit_args,
                                        parallel_env='smp'),
                default_drm='local')
cosmos.initdb()

sp.check_call('mkdir -p analysis_output/ex1', shell=True)
os.chdir('analysis_output/ex1')
workflow = cosmos.start('Example1', restart=True, skip_confirm=True)


def say(text, out_file, core_req=1):
    return r"""
        echo "{text}" > {out_file}
    """.format(text=text, out_file=out_file)


t = workflow.add_task(func=say,
                      params=dict(text='Hello World',
                                  out_file='out.txt',
                                  core_req=2),
コード例 #19
0
        with open(out_file) as fp:
            i = int(fp.read())
    else:
        i = 0

    with open(out_file, "w") as fp:
        fp.write(str(i + 1))

    if i < 2:
        # fail the first 2 times
        raise


if __name__ == "__main__":
    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        default_drm="local",
    )
    cosmos.initdb()
    workflow = cosmos.start("ExampleReattempt",
                            restart=True,
                            skip_confirm=True)

    if os.path.exists("out.txt"):
        os.unlink("out.txt")

    t = workflow.add_task(func=add_one,
                          params=dict(out_file="out.txt"),
                          uid="my_task",
                          max_attempts=3)

    workflow.make_output_dirs()
コード例 #20
0
"""
Basic demonstration the structure of a Task instance
"""
import subprocess as sp
import os
import sys
from cosmos.api import Cosmos

cosmos = Cosmos('sqlite:///%s/sqlite.db' %
                os.path.dirname(os.path.abspath(__file__)),
                default_drm='local')
cosmos.initdb()

sp.check_call('mkdir -p analysis_output/ex1', shell=True)
os.chdir('analysis_output/ex1')
workflow = cosmos.start('Example1', restart=True, skip_confirm=True)


def say(text, out_file):
    return r"""
        echo "{text}" > {out_file}
    """.format(text=text, out_file=out_file)


t = workflow.add_task(func=say,
                      params=dict(
                          text='Hello World',
                          out_file='out.txt',
                      ),
                      uid='my_task',
                      time_req=None,
コード例 #21
0
def main():
    args = parse_args()

    cosmos = Cosmos(
        "sqlite:///%s/sqlite.db" % os.path.dirname(os.path.abspath(__file__)),
        default_drm="awsbatch",
        default_drm_options=dict(
            container_image=args.container_image,
            s3_prefix_for_command_script_temp_files=args.s3_prefix_for_command_script_temp_files,
            # only retry on spot instance death
            retry_only_if_status_reason_matches="Host EC2 .+ terminated.",
        ),
        default_queue=args.default_queue,
    )

    cosmos.initdb()

    # sp.check_call("mkdir -p analysis_output/ex1", shell=True)
    # os.chdir("analysis_output/ex1")
    workflow = cosmos.start(f"Evaluate_{args.id}", restart=True, skip_confirm=True)

    parameters = np.load(f"optimize_awsbatch/parameters/{args.id}.npy")

    for i, par in enumerate(parameters):
        parameters_ = dict(
            mean_weight=par[0],
            c_w=par[1],
            tau_pos=par[2],
            tau_neg=par[3],
            A_pos=par[4],
            A_neg=par[5],
            weight_decay=par[6],
            n_filters=25,
            time_max=250,
            crop=20,
            kernel_size=16,
            stride=4,
            intensity=127.5,
            c_w_min=None,
            c_l=True,
            network_type="LC_SNN",

        )
        workflow.add_task(
            func=evaluate,
            params=dict(
                parameters=parameters_,
                out_s3_uri=f"{args.out_s3_uri}/scores/{args.id}/{i}.json",
                sleep=args.sleep,
                train=args.train,
                calibrate=args.calibrate,
                test=args.test
            ),
            uid=str(i),
            time_req=None,
            max_attempts=args.max_attempts,
            core_req=args.core_req,
            mem_req=args.mem_req,
        )
    workflow.run()

    sys.exit(0 if workflow.successful else 1)
コード例 #22
0
ファイル: ex2.py プロジェクト: alliemclean/COSMOS2
        stage_name='Summary_Analysis',
        uid='')  # It's the only Task in this Stage, so doesn't need a specific uid


if __name__ == '__main__':
    import argparse

    p = argparse.ArgumentParser()
    p.add_argument('-drm', default='local', help='', choices=('local', 'drmaa:ge', 'ge'))
    p.add_argument('-q', '--queue', help='Submit to this queue of the DRM supports it')

    args = p.parse_args()

    cosmos = Cosmos('sqlite:///%s/sqlite.db' % os.path.dirname(os.path.abspath(__file__)),
                    # example of how to change arguments if you're NOT using default_drm='local'
                    get_submit_args=partial(default_get_submit_args, parallel_env='smp'),
                    default_drm=args.drm,
                    default_queue=args.queue)
    cosmos.initdb()

    sp.check_call('mkdir -p analysis_output/ex2', shell=True)
    os.chdir('analysis_output/ex2')

    workflow = cosmos.start('Example2', restart=True, skip_confirm=True)

    recipe(workflow)

    workflow.make_output_dirs()
    workflow.run(max_attempts=1, max_cores=10)

    if pygraphviz_available:
コード例 #23
0
    load_dotenv()

    def set_env_variables():

        return "\n".join([
            f'export {variable}="{os.getenv(variable)}"'
            for variable in env_variables
        ])

    cosmos = Cosmos(
        RDS_COSMOS_DATABASE,
        default_drm="awsbatch",
        default_drm_options=dict(
            container_image=os.getenv("ECR_CONTAINER_IMAGE"),
            s3_prefix_for_command_script_temp_files=os.path.join(
                S3_BUCKET_PATH, "cosmos-tmp"),
            shm_size=int(args.mem_req * 0.75),
            retry_only_if_status_reason_matches=
            "Host EC2 .+ terminated.",  # only retry on spot instance death
        ),
        default_queue=os.getenv("BATCH_QUEUE_NAME"),
    )
    cosmos.initdb()

    workflow_name = f"{args.name}-{uuid1().hex}"
    workflow = cosmos.start(workflow_name, restart=True, skip_confirm=True)

    task_name = uuid1().hex

    workflow.add_task(
        func=pretrain,
コード例 #24
0
                   help='',
                   choices=('local', 'drmaa:ge', 'ge', 'slurm'))
    p.add_argument('-j',
                   '--job-class',
                   help='Submit to this job class if the DRM supports it')
    p.add_argument('-q',
                   '--queue',
                   help='Submit to this queue if the DRM supports it')

    args = p.parse_args()

    cosmos = Cosmos(
        'sqlite:///%s/sqlite.db?check_same_thread=False' %
        os.path.dirname(os.path.abspath(__file__)),
        # example of how to change arguments if you're not using default_drm='local'
        get_submit_args=partial(default_get_submit_args, parallel_env='smp'),
        default_drm=args.drm,
        default_max_attempts=2,
        default_job_class=args.job_class,
        default_queue=args.queue)
    cosmos.initdb()

    sp.check_call('mkdir -p analysis_output/ex2', shell=True)
    os.chdir('analysis_output/ex2')

    workflow = cosmos.start('Example2', restart=True, skip_confirm=True)

    recipe(workflow)

    workflow.make_output_dirs()
    workflow.run(max_cores=10)
コード例 #25
0
def make_app(database_url):
    cosmos = Cosmos(database_url)
    flask = cosmos.init_flask()
    return flask
コード例 #26
0
from cosmos.api import Cosmos, py_call


def add_one(out_file):
    if os.path.exists(out_file):
        with open(out_file) as fp:
            i = int(fp.read())
    else:
        i = 0

    with open(out_file, "w") as fp:
        fp.write(str(i + 1))

    if i < 2:
        # fail the first 2 times
        raise


if __name__ == "__main__":
    cosmos = Cosmos("sqlite.db", default_drm="local",)
    cosmos.initdb()
    workflow = cosmos.start("reattempt", restart=True, skip_confirm=True)

    if os.path.exists("out.txt"):
        os.unlink("out.txt")

    t = workflow.add_task(func=add_one, params=dict(out_file="out.txt"), uid="my_task", max_attempts=3)

    workflow.make_output_dirs()
    workflow.run(cmd_wrapper=py_call)