Exemplo n.º 1
0
    def _run(self,
             batch: 'batch.Batch',
             dry_run: bool,
             verbose: bool,
             delete_scratch_on_exit: bool,
             wait: bool = True,
             open: bool = False,
             disable_progress_bar: bool = False,
             callback: Optional[str] = None,
             token: Optional[str] = None,
             **backend_kwargs):  # pylint: disable-msg=too-many-statements
        """Execute a batch.

        Warning
        -------
        This method should not be called directly. Instead, use :meth:`.batch.Batch.run`
        and pass :class:`.ServiceBackend` specific arguments as key-word arguments.

        Parameters
        ----------
        batch:
            Batch to execute.
        dry_run:
            If `True`, don't execute code.
        verbose:
            If `True`, print debugging output.
        delete_scratch_on_exit:
            If `True`, delete temporary directories with intermediate files.
        wait:
            If `True`, wait for the batch to finish executing before returning.
        open:
            If `True`, open the UI page for the batch.
        disable_progress_bar:
            If `True`, disable the progress bar.
        callback:
            If not `None`, a URL that will receive at most one POST request
            after the entire batch completes.
        token:
            If not `None`, a string used for idempotency of batch submission.
        """

        if backend_kwargs:
            raise ValueError(f'ServiceBackend does not support any of these keywords: {backend_kwargs}')

        build_dag_start = time.time()

        uid = uuid.uuid4().hex[:6]
        remote_tmpdir = f'gs://{self._bucket_name}/batch/{uid}'
        local_tmpdir = f'/io/batch/{uid}'

        default_image = 'ubuntu:18.04'

        attributes = copy.deepcopy(batch.attributes)
        if batch.name is not None:
            attributes['name'] = batch.name

        bc_batch = self._batch_client.create_batch(attributes=attributes, callback=callback,
                                                   token=token, cancel_after_n_failures=batch._cancel_after_n_failures)

        n_jobs_submitted = 0
        used_remote_tmpdir = False

        job_to_client_job_mapping: Dict[_job.Job, bc.Job] = {}
        jobs_to_command = {}
        commands = []

        bash_flags = 'set -e' + ('x' if verbose else '')

        activate_service_account = 'gcloud -q auth activate-service-account ' \
                                   '--key-file=/gsa-key/key.json'

        def copy_input(r):
            if isinstance(r, resource.InputResourceFile):
                return [(r._input_path, r._get_path(local_tmpdir))]
            assert isinstance(r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(remote_tmpdir), r._get_path(local_tmpdir))]

        def copy_internal_output(r):
            assert isinstance(r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(local_tmpdir), r._get_path(remote_tmpdir))]

        def copy_external_output(r):
            if isinstance(r, resource.InputResourceFile):
                return [(r._input_path, dest) for dest in r._output_paths]
            assert isinstance(r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(local_tmpdir), dest) for dest in r._output_paths]

        def symlink_input_resource_group(r):
            symlinks = []
            if isinstance(r, resource.ResourceGroup) and r._source is None:
                for name, irf in r._resources.items():
                    src = irf._get_path(local_tmpdir)
                    dest = f'{r._get_path(local_tmpdir)}.{name}'
                    symlinks.append(f'ln -sf {shq(src)} {shq(dest)}')
            return symlinks

        write_external_inputs = [x for r in batch._input_resources for x in copy_external_output(r)]
        if write_external_inputs:
            def _cp(src, dst):
                return f'gsutil -m cp -R {shq(src)} {shq(dst)}'

            write_cmd = f'''
{bash_flags}
{activate_service_account}
{' && '.join([_cp(*files) for files in write_external_inputs])}
'''

            if dry_run:
                commands.append(write_cmd)
            else:
                j = bc_batch.create_job(image='gcr.io/google.com/cloudsdktool/cloud-sdk:310.0.0-alpine',
                                        command=['/bin/bash', '-c', write_cmd],
                                        attributes={'name': 'write_external_inputs'})
                jobs_to_command[j] = write_cmd
                n_jobs_submitted += 1

        for job in batch._jobs:
            if isinstance(job, _job.PythonJob):
                if job._image is None:
                    version = sys.version_info
                    if version.major != 3 or version.minor not in (6, 7, 8):
                        raise BatchException(
                            f"You must specify 'image' for Python jobs if you are using a Python version other than 3.6, 3.7, or 3.8 (you are using {version})")
                    job._image = f'hailgenetics/python-dill:{version.major}.{version.minor}-slim'
                job._compile(local_tmpdir, remote_tmpdir)

            inputs = [x for r in job._inputs for x in copy_input(r)]

            outputs = [x for r in job._internal_outputs for x in copy_internal_output(r)]
            if outputs:
                used_remote_tmpdir = True
            outputs += [x for r in job._external_outputs for x in copy_external_output(r)]

            symlinks = [x for r in job._mentioned for x in symlink_input_resource_group(r)]

            env_vars = {
                **job._env,
                **{r._uid: r._get_path(local_tmpdir) for r in job._mentioned}}

            if job._image is None:
                if verbose:
                    print(f"Using image '{default_image}' since no image was specified.")

            make_local_tmpdir = f'mkdir -p {local_tmpdir}/{job._job_id}'

            job_command = [cmd.strip() for cmd in job._command]

            prepared_job_command = (f'{{\n{x}\n}}' for x in job_command)
            cmd = f'''
{bash_flags}
{make_local_tmpdir}
{"; ".join(symlinks)}
{" && ".join(prepared_job_command)}
'''

            if dry_run:
                commands.append(cmd)
                continue

            parents = [job_to_client_job_mapping[j] for j in job._dependencies]

            attributes = copy.deepcopy(job.attributes) if job.attributes else dict()
            if job.name:
                attributes['name'] = job.name

            resources: Dict[str, Any] = {}
            if job._cpu:
                resources['cpu'] = job._cpu
            if job._memory:
                resources['memory'] = job._memory
            if job._storage:
                resources['storage'] = job._storage
            if job._machine_type:
                resources['machine_type'] = job._machine_type
            if job._preemptible is not None:
                resources['preemptible'] = job._preemptible

            image = job._image if job._image else default_image
            image_ref = parse_docker_image_reference(image)
            if not is_google_registry_domain(image_ref.domain) and image_ref.name() not in HAIL_GENETICS_IMAGES:
                warnings.warn(f'Using an image {image} not in GCR. '
                              f'Jobs may fail due to Docker Hub rate limits.')

            j = bc_batch.create_job(image=image,
                                    command=[job._shell if job._shell else self._DEFAULT_SHELL, '-c', cmd],
                                    parents=parents,
                                    attributes=attributes,
                                    resources=resources,
                                    input_files=inputs if len(inputs) > 0 else None,
                                    output_files=outputs if len(outputs) > 0 else None,
                                    always_run=job._always_run,
                                    timeout=job._timeout,
                                    gcsfuse=job._gcsfuse if len(job._gcsfuse) > 0 else None,
                                    env=env_vars,
                                    requester_pays_project=batch.requester_pays_project,
                                    mount_tokens=True)

            n_jobs_submitted += 1

            job_to_client_job_mapping[job] = j
            jobs_to_command[j] = cmd

        if dry_run:
            print("\n\n".join(commands))
            return None

        if delete_scratch_on_exit and used_remote_tmpdir:
            parents = list(jobs_to_command.keys())
            rm_cmd = f'gsutil -m rm -r {remote_tmpdir}'
            cmd = f'''
{bash_flags}
{activate_service_account}
{rm_cmd}
'''
            j = bc_batch.create_job(
                image='gcr.io/google.com/cloudsdktool/cloud-sdk:310.0.0-alpine',
                command=['/bin/bash', '-c', cmd],
                parents=parents,
                attributes={'name': 'remove_tmpdir'},
                always_run=True)
            jobs_to_command[j] = cmd
            n_jobs_submitted += 1

        if verbose:
            print(f'Built DAG with {n_jobs_submitted} jobs in {round(time.time() - build_dag_start, 3)} seconds.')

        submit_batch_start = time.time()
        bc_batch = bc_batch.submit(disable_progress_bar=disable_progress_bar)

        jobs_to_command = {j.id: cmd for j, cmd in jobs_to_command.items()}

        if verbose:
            print(f'Submitted batch {bc_batch.id} with {n_jobs_submitted} jobs in {round(time.time() - submit_batch_start, 3)} seconds:')
            for jid, cmd in jobs_to_command.items():
                print(f'{jid}: {cmd}')

            print('')

        deploy_config = get_deploy_config()
        url = deploy_config.url('batch', f'/batches/{bc_batch.id}')
        print(f'Submitted batch {bc_batch.id}, see {url}')

        if open:
            webbrowser.open(url)
        if wait:
            print(f'Waiting for batch {bc_batch.id}...')
            status = bc_batch.wait()
            print(f'batch {bc_batch.id} complete: {status["state"]}')
        return bc_batch
Exemplo n.º 2
0
def test_parse_docker_image_reference():
    x = parse_docker_image_reference('animage')
    assert x.domain is None
    assert x.path == 'animage'
    assert x.tag is None
    assert x.digest is None
    assert x.name() == 'animage'
    assert str(x) == 'animage'

    x = parse_docker_image_reference('hailgenetics/animage')
    assert x.domain == 'hailgenetics'
    assert x.path == 'animage'
    assert x.tag is None
    assert x.digest is None
    assert x.name() == 'hailgenetics/animage'
    assert str(x) == 'hailgenetics/animage'

    x = parse_docker_image_reference('localhost:5000/animage')
    assert x.domain == 'localhost:5000'
    assert x.path == 'animage'
    assert x.tag is None
    assert x.digest is None
    assert x.name() == 'localhost:5000/animage'
    assert str(x) == 'localhost:5000/animage'

    x = parse_docker_image_reference('localhost:5000/a/b/name')
    assert x.domain == 'localhost:5000'
    assert x.path == 'a/b/name'
    assert x.tag is None
    assert x.digest is None
    assert x.name() == 'localhost:5000/a/b/name'
    assert str(x) == 'localhost:5000/a/b/name'

    x = parse_docker_image_reference('localhost:5000/a/b/name:tag')
    assert x.domain == 'localhost:5000'
    assert x.path == 'a/b/name'
    assert x.tag == 'tag'
    assert x.digest is None
    assert x.name() == 'localhost:5000/a/b/name'
    assert str(x) == 'localhost:5000/a/b/name:tag'

    x = parse_docker_image_reference(
        'localhost:5000/a/b/name:tag@sha256:abc123')
    assert x.domain == 'localhost:5000'
    assert x.path == 'a/b/name'
    assert x.tag == 'tag'
    assert x.digest == 'sha256:abc123'
    assert x.name() == 'localhost:5000/a/b/name'
    assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'

    x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')
    assert x.domain == 'localhost:5000'
    assert x.path == 'a/b/name'
    assert x.tag is None
    assert x.digest == 'sha256:abc123'
    assert x.name() == 'localhost:5000/a/b/name'
    assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'

    x = parse_docker_image_reference('name@sha256:abc123')
    assert x.domain is None
    assert x.path == 'name'
    assert x.tag is None
    assert x.digest == 'sha256:abc123'
    assert x.name() == 'name'
    assert str(x) == 'name@sha256:abc123'

    x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')
    assert x.domain == 'gcr.io'
    assert x.path == 'hail-vdc/batch-worker'
    assert x.tag == '123fds312'
    assert x.digest is None
    assert x.name() == 'gcr.io/hail-vdc/batch-worker'
    assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'

    x = parse_docker_image_reference(
        'us-docker.pkg.dev/my-project/my-repo/test-image')
    assert x.domain == 'us-docker.pkg.dev'
    assert x.path == 'my-project/my-repo/test-image'
    assert x.tag is None
    assert x.digest is None
    assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'
    assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'
Exemplo n.º 3
0
    async def _async_run(
            self,
            batch: 'batch.Batch',
            dry_run: bool,
            verbose: bool,
            delete_scratch_on_exit: bool,
            wait: bool = True,
            open: bool = False,
            disable_progress_bar: bool = False,
            callback: Optional[str] = None,
            token: Optional[str] = None,
            **backend_kwargs):  # pylint: disable-msg=too-many-statements
        if backend_kwargs:
            raise ValueError(
                f'ServiceBackend does not support any of these keywords: {backend_kwargs}'
            )

        build_dag_start = time.time()

        uid = uuid.uuid4().hex[:6]
        batch_remote_tmpdir = f'{self.remote_tmpdir}{uid}'
        local_tmpdir = f'/io/batch/{uid}'

        default_image = 'ubuntu:20.04'

        attributes = copy.deepcopy(batch.attributes)
        if batch.name is not None:
            attributes['name'] = batch.name

        bc_batch = self._batch_client.create_batch(
            attributes=attributes,
            callback=callback,
            token=token,
            cancel_after_n_failures=batch._cancel_after_n_failures)

        n_jobs_submitted = 0
        used_remote_tmpdir = False

        job_to_client_job_mapping: Dict[_job.Job, bc.Job] = {}
        jobs_to_command = {}
        commands = []

        bash_flags = 'set -e' + ('x' if verbose else '')

        def copy_input(r):
            if isinstance(r, resource.InputResourceFile):
                return [(r._input_path, r._get_path(local_tmpdir))]
            assert isinstance(
                r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(batch_remote_tmpdir),
                     r._get_path(local_tmpdir))]

        def copy_internal_output(r):
            assert isinstance(
                r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(local_tmpdir),
                     r._get_path(batch_remote_tmpdir))]

        def copy_external_output(r):
            if isinstance(r, resource.InputResourceFile):
                return [(r._input_path, dest) for dest in r._output_paths]
            assert isinstance(
                r, (resource.JobResourceFile, resource.PythonResult))
            return [(r._get_path(local_tmpdir), dest)
                    for dest in r._output_paths]

        def symlink_input_resource_group(r):
            symlinks = []
            if isinstance(r, resource.ResourceGroup) and r._source is None:
                for name, irf in r._resources.items():
                    src = irf._get_path(local_tmpdir)
                    dest = f'{r._get_path(local_tmpdir)}.{name}'
                    symlinks.append(f'ln -sf {shq(src)} {shq(dest)}')
            return symlinks

        write_external_inputs = [
            x for r in batch._input_resources for x in copy_external_output(r)
        ]
        if write_external_inputs:
            transfers_bytes = orjson.dumps([{
                "from": src,
                "to": dest
            } for src, dest in write_external_inputs])
            transfers = transfers_bytes.decode('utf-8')
            write_cmd = [
                'python3', '-m', 'hailtop.aiotools.copy', 'null', transfers
            ]
            if dry_run:
                commands.append(' '.join(shq(x) for x in write_cmd))
            else:
                j = bc_batch.create_job(
                    image=HAIL_GENETICS_HAIL_IMAGE,
                    command=write_cmd,
                    attributes={'name': 'write_external_inputs'})
                jobs_to_command[j] = ' '.join(shq(x) for x in write_cmd)
                n_jobs_submitted += 1

        pyjobs = [j for j in batch._jobs if isinstance(j, _job.PythonJob)]
        for job in pyjobs:
            if job._image is None:
                version = sys.version_info
                if version.major != 3 or version.minor not in (6, 7, 8):
                    raise BatchException(
                        f"You must specify 'image' for Python jobs if you are using a Python version other than 3.6, 3.7, or 3.8 (you are using {version})"
                    )
                job._image = f'hailgenetics/python-dill:{version.major}.{version.minor}-slim'

        with tqdm(total=len(batch._jobs),
                  desc='upload code',
                  disable=disable_progress_bar) as pbar:

            async def compile_job(job):
                used_remote_tmpdir = await job._compile(local_tmpdir,
                                                        batch_remote_tmpdir,
                                                        dry_run=dry_run)
                pbar.update(1)
                return used_remote_tmpdir

            used_remote_tmpdir_results = await bounded_gather(
                *[functools.partial(compile_job, j) for j in batch._jobs],
                parallelism=150)
            used_remote_tmpdir |= any(used_remote_tmpdir_results)

        for job in tqdm(batch._jobs,
                        desc='create job objects',
                        disable=disable_progress_bar):
            inputs = [x for r in job._inputs for x in copy_input(r)]

            outputs = [
                x for r in job._internal_outputs
                for x in copy_internal_output(r)
            ]
            if outputs:
                used_remote_tmpdir = True
            outputs += [
                x for r in job._external_outputs
                for x in copy_external_output(r)
            ]

            symlinks = [
                x for r in job._mentioned
                for x in symlink_input_resource_group(r)
            ]

            if job._image is None:
                if verbose:
                    print(
                        f"Using image '{default_image}' since no image was specified."
                    )

            make_local_tmpdir = f'mkdir -p {local_tmpdir}/{job._dirname}'

            job_command = [cmd.strip() for cmd in job._wrapper_code]
            prepared_job_command = (f'{{\n{x}\n}}' for x in job_command)
            cmd = f'''
{bash_flags}
{make_local_tmpdir}
{"; ".join(symlinks)}
{" && ".join(prepared_job_command)}
'''

            user_code = '\n\n'.join(job._user_code) if job._user_code else None

            if dry_run:
                formatted_command = f'''
================================================================================
# Job {job._job_id} {f": {job.name}" if job.name else ''}

--------------------------------------------------------------------------------
## USER CODE
--------------------------------------------------------------------------------
{user_code}

--------------------------------------------------------------------------------
## COMMAND
--------------------------------------------------------------------------------
{cmd}
================================================================================
'''
                commands.append(formatted_command)
                continue

            parents = [job_to_client_job_mapping[j] for j in job._dependencies]

            attributes = copy.deepcopy(
                job.attributes) if job.attributes else {}
            if job.name:
                attributes['name'] = job.name

            resources: Dict[str, Any] = {}
            if job._cpu:
                resources['cpu'] = job._cpu
            if job._memory:
                resources['memory'] = job._memory
            if job._storage:
                resources['storage'] = job._storage
            if job._machine_type:
                resources['machine_type'] = job._machine_type
            if job._preemptible is not None:
                resources['preemptible'] = job._preemptible

            image = job._image if job._image else default_image
            image_ref = parse_docker_image_reference(image)
            if image_ref.hosted_in('dockerhub') and image_ref.name(
            ) not in HAIL_GENETICS_IMAGES:
                warnings.warn(f'Using an image {image} from Docker Hub. '
                              f'Jobs may fail due to Docker Hub rate limits.')

            env = {**job._env, 'BATCH_TMPDIR': local_tmpdir}

            j = bc_batch.create_job(
                image=image,
                command=[
                    job._shell if job._shell else DEFAULT_SHELL, '-c', cmd
                ],
                parents=parents,
                attributes=attributes,
                resources=resources,
                input_files=inputs if len(inputs) > 0 else None,
                output_files=outputs if len(outputs) > 0 else None,
                always_run=job._always_run,
                timeout=job._timeout,
                cloudfuse=job._cloudfuse if len(job._cloudfuse) > 0 else None,
                env=env,
                requester_pays_project=batch.requester_pays_project,
                mount_tokens=True,
                user_code=user_code)

            n_jobs_submitted += 1

            job_to_client_job_mapping[job] = j
            jobs_to_command[j] = cmd

        if dry_run:
            print("\n\n".join(commands))
            return None

        if delete_scratch_on_exit and used_remote_tmpdir:
            parents = list(jobs_to_command.keys())
            j = bc_batch.create_job(image=HAIL_GENETICS_HAIL_IMAGE,
                                    command=[
                                        'python3', '-m',
                                        'hailtop.aiotools.delete',
                                        batch_remote_tmpdir
                                    ],
                                    parents=parents,
                                    attributes={'name': 'remove_tmpdir'},
                                    always_run=True)
            jobs_to_command[j] = cmd
            n_jobs_submitted += 1

        if verbose:
            print(
                f'Built DAG with {n_jobs_submitted} jobs in {round(time.time() - build_dag_start, 3)} seconds.'
            )

        submit_batch_start = time.time()
        batch_handle = bc_batch.submit(
            disable_progress_bar=disable_progress_bar)

        jobs_to_command = {j.id: cmd for j, cmd in jobs_to_command.items()}

        if verbose:
            print(
                f'Submitted batch {batch_handle.id} with {n_jobs_submitted} jobs in {round(time.time() - submit_batch_start, 3)} seconds:'
            )
            for jid, cmd in jobs_to_command.items():
                print(f'{jid}: {cmd}')
            print('')

        deploy_config = get_deploy_config()
        url = deploy_config.url('batch', f'/batches/{batch_handle.id}')
        print(f'Submitted batch {batch_handle.id}, see {url}')

        if open:
            webbrowser.open(url)
        if wait:
            print(f'Waiting for batch {batch_handle.id}...')
            status = batch_handle.wait()
            print(f'batch {batch_handle.id} complete: {status["state"]}')
        return batch_handle