Example #1
0
def start(parallel,
          items,
          config,
          dirs=None,
          name=None,
          multiplier=1,
          max_multicore=None):
    """Start a parallel cluster or machines to be used for running remote functions.

    Returns a function used to process, in parallel items with a given function.

    Allows sharing of a single cluster across multiple functions with
    identical resource requirements. Uses local execution for non-distributed
    clusters or completed jobs.

    A checkpoint directory keeps track of finished tasks, avoiding spinning up clusters
    for sections that have been previous processed.

    multiplier - Number of expected jobs per initial input item. Used to avoid underscheduling
      cores when an item is split during processing.
    max_multicore -- The maximum number of cores to use for each process. Can be used
      to process less multicore usage when jobs run faster on more single cores.
    """
    if name:
        checkpoint_dir = utils.safe_makedir(
            os.path.join(dirs["work"], "checkpoints_parallel"))
        checkpoint_file = os.path.join(checkpoint_dir, "%s.done" % name)
    else:
        checkpoint_file = None
    sysinfo = system.get_info(dirs, parallel)
    items = [x for x in items if x is not None] if items else []
    parallel = resources.calculate(
        parallel,
        items,
        sysinfo,
        config,
        multiplier=multiplier,
        max_multicore=int(max_multicore or sysinfo.get("cores", 1)))
    try:
        if checkpoint_file and os.path.exists(checkpoint_file):
            logger.info("run local -- checkpoint passed: %s" % name)
            parallel["cores_per_job"] = 1
            parallel["num_jobs"] = 1
            yield multi.runner(parallel, config)
        elif parallel["type"] == "ipython":
            with ipython.create(parallel, dirs, config) as view:
                yield ipython.runner(view, parallel, dirs, config)
        elif parallel["type"] == "clusterk":
            with clusterk.create(parallel) as queue:
                yield clusterk.runner(queue, parallel)
        else:
            yield multi.runner(parallel, config)
    except:
        raise
    else:
        for x in ["cores_per_job", "num_jobs", "mem"]:
            parallel.pop(x, None)
        if checkpoint_file:
            with open(checkpoint_file, "w") as out_handle:
                out_handle.write("done\n")
Example #2
0
def send_job(fn, data, args, resources=None):
    """decide if send jobs with ipython or run locally"""
    utils.safe_makedir("checkpoint")
    _check_items(data)
    res = []
    dirs = {'work': os.path.abspath(os.getcwd())}
    config = data[0][0]['config']
    if not resources:
        resources = config_default
    step = resources['name']
    if 'mem' not in resources or 'cores' not in resources:
        raise ValueError("resources without mem or cores keys: %s" % resources)
    par = _calculate_resources(data, args, resources)
    # args.memory_per_job = resources['mem']
    # args.cores_per_job = resources['cores']
    # log.setup_log(args)
    log.logger.debug("doing %s" % step)
    if par['type'] == "ipython" and not is_done(step):
        with create(par, dirs, config) as view:
            for sample in data:
                res.append(view.apply_async(fn, sample[0], args))
            res = wait_until_complete(res)
        flag_done(step)
        return res
    for sample in data:
        res.append([fn(sample[0], args)])
    return res
Example #3
0
def start(parallel, items, config, dirs=None, name=None, multiplier=1,
          max_multicore=None):
    """Start a parallel cluster or machines to be used for running remote
    functions.

    Returns a function used to process, in parallel items with a given function.

    Allows sharing of a single cluster across multiple functions with
    identical resource requirements. Uses local execution for non-distributed
    clusters or completed jobs.

    A checkpoint directory keeps track of finished tasks, avoiding spinning up
    clusters for sections that have been previous processed.

    multiplier - Number of expected jobs per initial input item. Used to avoid
    underscheduling cores when an item is split during processing.
    max_multicore -- The maximum number of cores to use for each process. Can be
    used to process less multicore usage when jobs run faster on more single
    cores.
    """
    if name:
        checkpoint_dir = utils.safe_makedir(os.path.join(dirs["work"],
                                                         "checkpoints_parallel"))
        checkpoint_file = os.path.join(checkpoint_dir, "%s.done" % name)
    else:
        checkpoint_file = None
    sysinfo = system.get_info(dirs, parallel)
    items = [x for x in items if x is not None] if items else []
    max_multicore = int(max_multicore or sysinfo.get("cores", 1))
    parallel = resources.calculate(parallel, items, sysinfo, config,
                                   multiplier=multiplier,
                                   max_multicore=max_multicore)
    try:
        view = None
        if checkpoint_file and os.path.exists(checkpoint_file):
            logger.info("run local -- checkpoint passed: %s" % name)
            parallel["cores_per_job"] = 1
            parallel["num_jobs"] = 1
            parallel["checkpointed"] = True
            yield multi.runner(parallel, config)
        elif parallel["type"] == "ipython":
            with ipython.create(parallel, dirs, config) as view:
                yield ipython.runner(view, parallel, dirs, config)
        elif parallel["type"] == "clusterk":
            with clusterk.create(parallel) as queue:
                yield clusterk.runner(queue, parallel)
        else:
            yield multi.runner(parallel, config)
    except:
        if view is not None:
            ipython.stop(view)
        raise
    else:
        for x in ["cores_per_job", "num_jobs", "mem"]:
            parallel.pop(x, None)
        if checkpoint_file:
            with open(checkpoint_file, "w") as out_handle:
                out_handle.write("done\n")