Esempio n. 1
0
def init(sc=None,
         app_name='Hail',
         master=None,
         local='local[*]',
         log=None,
         quiet=False,
         append=False,
         min_block_size=0,
         branching_factor=50,
         tmp_dir=None,
         default_reference='GRCh37',
         idempotent=False,
         global_seed=6348563392232659379,
         spark_conf=None,
         skip_logging_configuration=False,
         local_tmpdir=None,
         _optimizer_iterations=None):
    """Initialize Hail and Spark.

    Examples
    --------
    Import and initialize Hail using GRCh38 as the default reference genome:

    >>> import hail as hl
    >>> hl.init(default_reference='GRCh38')  # doctest: +SKIP

    Notes
    -----
    Hail is not only a Python library; most of Hail is written in Java/Scala
    and runs together with Apache Spark in the Java Virtual Machine (JVM).
    In order to use Hail, a JVM needs to run as well. The :func:`.init`
    function is used to initialize Hail and Spark.

    This function also sets global configuration parameters used for the Hail
    session, like the default reference genome and log file location.

    This function will be called automatically (with default parameters) if
    any Hail functionality requiring the backend (most of the libary!) is used.
    To initialize Hail explicitly with non-default arguments, be sure to do so
    directly after importing the module, as in the above example.

    To facilitate the migration from Spark to the ServiceBackend, this method
    calls init_service when the environment variable HAIL_QUERY_BACKEND is set
    to "service".

    Note
    ----
    If a :class:`pyspark.SparkContext` is already running, then Hail must be
    initialized with it as an argument:

    >>> hl.init(sc=sc)  # doctest: +SKIP

    See Also
    --------
    :func:`.stop`

    Parameters
    ----------
    sc : pyspark.SparkContext, optional
        Spark context. By default, a Spark context will be created.
    app_name : :class:`str`
        Spark application name.
    master : :class:`str`, optional
        URL identifying the Spark leader (master) node or `local[N]` for local clusters.
    local : :class:`str`
       Local-mode core limit indicator. Must either be `local[N]` where N is a
       positive integer or `local[*]`. The latter indicates Spark should use all
       cores available. `local[*]` does not respect most containerization CPU
       limits. This option is only used if `master` is unset and `spark.master`
       is not set in the Spark configuration.
    log : :class:`str`
        Local path for Hail log file. Does not currently support distributed
        file systems like Google Storage, S3, or HDFS.
    quiet : :obj:`bool`
        Print fewer log messages.
    append : :obj:`bool`
        Append to the end of the log file.
    min_block_size : :obj:`int`
        Minimum file block size in MB.
    branching_factor : :obj:`int`
        Branching factor for tree aggregation.
    tmp_dir : :class:`str`, optional
        Networked temporary directory.  Must be a network-visible file
        path.  Defaults to /tmp in the default scheme.
    default_reference : :class:`str`
        Default reference genome. Either ``'GRCh37'``, ``'GRCh38'``,
        ``'GRCm38'``, or ``'CanFam3'``.
    idempotent : :obj:`bool`
        If ``True``, calling this function is a no-op if Hail has already been initialized.
    global_seed : :obj:`int`, optional
        Global random seed.
    spark_conf : :obj:`dict` of :class:`str` to :class`str`, optional
        Spark configuration parameters.
    skip_logging_configuration : :obj:`bool`
        Skip logging configuration in java and python.
    local_tmpdir : :class:`str`, optional
        Local temporary directory.  Used on driver and executor nodes.
        Must use the file scheme.  Defaults to TMPDIR, or /tmp.
    """
    if Env._hc:
        if idempotent:
            return
        else:
            warning(
                'Hail has already been initialized. If this call was intended to change configuration,'
                ' close the session with hl.stop() first.')

    if os.environ.get('HAIL_QUERY_BACKEND') == 'service':
        import asyncio
        # NB: do not use warning because that will initialize Env._hc, which we are trying to do right now.
        print(
            'When using the query service backend, use `await init_service\'',
            file=sys.stderr)
        return asyncio.get_event_loop().run_until_complete(
            init_service(
                log=log,
                quiet=quiet,
                append=append,
                tmpdir=tmp_dir,
                local_tmpdir=local_tmpdir,
                default_reference=default_reference,
                global_seed=global_seed,
                skip_logging_configuration=skip_logging_configuration))

    from hail.backend.spark_backend import SparkBackend

    log = _get_log(log)
    tmpdir = _get_tmpdir(tmp_dir)
    local_tmpdir = _get_local_tmpdir(local_tmpdir)
    optimizer_iterations = get_env_or_default(_optimizer_iterations,
                                              'HAIL_OPTIMIZER_ITERATIONS', 3)

    backend = SparkBackend(idempotent, sc, spark_conf, app_name, master, local,
                           log, quiet, append, min_block_size,
                           branching_factor, tmpdir, local_tmpdir,
                           skip_logging_configuration, optimizer_iterations)

    if not backend.fs.exists(tmpdir):
        backend.fs.mkdir(tmpdir)

    HailContext.create(log, quiet, append, tmpdir, local_tmpdir,
                       default_reference, global_seed, backend)
Esempio n. 2
0
def init(sc=None,
         app_name='Hail',
         master=None,
         local='local[*]',
         log=None,
         quiet=False,
         append=False,
         min_block_size=0,
         branching_factor=50,
         tmp_dir='/tmp',
         default_reference='GRCh37',
         idempotent=False,
         global_seed=6348563392232659379,
         spark_conf=None,
         skip_logging_configuration=False,
         local_tmpdir=None,
         _optimizer_iterations=None):
    """Initialize Hail and Spark.

    Examples
    --------
    Import and initialize Hail using GRCh38 as the default reference genome:

    >>> import hail as hl
    >>> hl.init(default_reference='GRCh38')  # doctest: +SKIP

    Notes
    -----
    Hail is not only a Python library; most of Hail is written in Java/Scala
    and runs together with Apache Spark in the Java Virtual Machine (JVM).
    In order to use Hail, a JVM needs to run as well. The :func:`.init`
    function is used to initialize Hail and Spark.

    This function also sets global configuration parameters used for the Hail
    session, like the default reference genome and log file location.

    This function will be called automatically (with default parameters) if
    any Hail functionality requiring the backend (most of the libary!) is used.
    To initialize Hail explicitly with non-default arguments, be sure to do so
    directly after importing the module, as in the above example.

    Note
    ----
    If a :class:`pyspark.SparkContext` is already running, then Hail must be
    initialized with it as an argument:

    >>> hl.init(sc=sc)  # doctest: +SKIP

    See Also
    --------
    :func:`.stop`

    Parameters
    ----------
    sc : pyspark.SparkContext, optional
        Spark context. By default, a Spark context will be created.
    app_name : :obj:`str`
        Spark application name.
    master : :obj:`str`, optional
        Spark master.
    local : :obj:`str`
       Local-mode master, used if `master` is not defined here or in the
       Spark configuration.
    log : :obj:`str`
        Local path for Hail log file. Does not currently support distributed
        file systems like Google Storage, S3, or HDFS.
    quiet : :obj:`bool`
        Print fewer log messages.
    append : :obj:`bool`
        Append to the end of the log file.
    min_block_size : :obj:`int`
        Minimum file block size in MB.
    branching_factor : :obj:`int`
        Branching factor for tree aggregation.
    tmp_dir : :obj:`str`, optional
        Networked temporary directory.  Must be a network-visible file
        path.  Defaults to /tmp in the default scheme.
    default_reference : :obj:`str`
        Default reference genome. Either ``'GRCh37'``, ``'GRCh38'``,
        ``'GRCm38'``, or ``'CanFam3'``.
    idempotent : :obj:`bool`
        If ``True``, calling this function is a no-op if Hail has already been initialized.
    global_seed : :obj:`int`, optional
        Global random seed.
    spark_conf : :obj:`dict[str, str]`, optional
        Spark configuration parameters.
    skip_logging_configuration : :obj:`bool`
        Skip logging configuration in java and python.
    local_tmpdir : :obj:`str`, optional
        Local temporary directory.  Used on driver and executor nodes.
        Must use the file scheme.  Defaults to TMPDIR, or /tmp.
    """
    from hail.backend.spark_backend import SparkBackend

    if Env._hc:
        if idempotent:
            return
        else:
            warning(
                'Hail has already been initialized. If this call was intended to change configuration,'
                ' close the session with hl.stop() first.')

    log = _get_log(log)
    tmpdir = _get_tmpdir(tmp_dir)
    local_tmpdir = _get_local_tmpdir(local_tmpdir)
    optimizer_iterations = get_env_or_default(_optimizer_iterations,
                                              'HAIL_OPTIMIZER_ITERATIONS', 3)

    backend = SparkBackend(idempotent, sc, spark_conf, app_name, master, local,
                           log, quiet, append, min_block_size,
                           branching_factor, tmpdir, local_tmpdir,
                           skip_logging_configuration, optimizer_iterations)

    HailContext(log, quiet, append, tmp_dir, local_tmpdir, default_reference,
                global_seed, backend)