Ejemplo n.º 1
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "docker_%s" % name
            return self.destination_info.get(destination_name, default)

        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"%s=$%s"' % (pass_through_var, pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in six.iteritems(self.destination_info):
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"%s=%s"' % (env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self._expand_volume_str(self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        # If a tool definitely has a temp directory available set it to /tmp in container for compat.
        # with CWL. This is part of that spec and should make it easier to share containers between CWL
        # and Galaxy.
        if self.job_info.tmp_directory is not None:
            volumes.append(DockerVolume.from_str("%s:/tmp:rw" % self.job_info.tmp_directory))
        volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = dict(
            docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
            sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
            host=prop("host", docker_util.DEFAULT_HOST),
        )

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=prop("net", "none"),  # By default, docker instance has networking disabled
            auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=prop("run_extra_arguments", docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            **docker_host_props
        )
        return "%s\n%s" % (cache_command, run_command)
Ejemplo n.º 2
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "docker_%s" % name
            return self.destination_info.get(destination_name, default)

        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append('"%s=$%s"' % (pass_through_var, pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in six.iteritems(self.destination_info):
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append('"%s=%s"' % (env, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self._expand_volume_str(self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        # If a tool definitely has a temp directory available set it to /tmp in container for compat.
        # with CWL. This is part of that spec and should make it easier to share containers between CWL
        # and Galaxy.
        if self.job_info.tmp_directory is not None:
            volumes.append(DockerVolume.from_str("%s:/tmp:rw" % self.job_info.tmp_directory))
        volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = dict(
            docker_cmd=prop("cmd", docker_util.DEFAULT_DOCKER_COMMAND),
            sudo=asbool(prop("sudo", docker_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND),
            host=prop("host", docker_util.DEFAULT_HOST),
        )

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=prop("net", "none"),  # By default, docker instance has networking disabled
            auto_rm=asbool(prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=prop("run_extra_arguments", docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            **docker_host_props
        )
        return "%s\n%s" % (cache_command, run_command)
Ejemplo n.º 3
0
    def containerize_command(self, command):

        env = []
        for pass_through_var in self.tool_info.env_pass_through:
            env.append((pass_through_var, "$%s" % pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in self.destination_info.items():
            if key.startswith("singularity_env_"):
                real_key = key[len("singularity_env_"):]
                env.append((real_key, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self._expand_volume_str(self.destination_info.get("singularity_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]

        run_command = singularity_util.build_singularity_run_command(
            command,
            self.container_id,
            volumes=volumes,
            env=env,
            working_directory=working_directory,
            run_extra_arguments=self.prop("run_extra_arguments", singularity_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            guest_ports=self.tool_info.guest_ports,
            container_name=self.container_name,
            **self.get_singularity_target_kwds()
        )
        return run_command
Ejemplo n.º 4
0
    def containerize_command(self, command):
        env_directives = []
        for pass_through_var in self.tool_info.env_pass_through:
            env_directives.append(f'"{pass_through_var}=${pass_through_var}"')

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in self.destination_info.items():
            if key.startswith("docker_env_"):
                env = key[len("docker_env_"):]
                env_directives.append(f'"{env}={value}"')

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception(f"Cannot containerize command [{working_directory}] without defined working directory.")

        volumes_raw = self._expand_volume_str(self.destination_info.get("docker_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        # TODO: Remove redundant volumes...
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]
        volumes_from = self.destination_info.get("docker_volumes_from", docker_util.DEFAULT_VOLUMES_FROM)

        docker_host_props = self.docker_host_props

        cached_image_file = self.__get_cached_image_file()
        if not cached_image_file:
            # TODO: Add option to cache it once here and create cached_image_file.
            cache_command = docker_util.build_docker_cache_command(self.container_id, **docker_host_props)
        else:
            cache_command = self.__cache_from_file_command(cached_image_file, docker_host_props)
        run_command = docker_util.build_docker_run_command(
            command,
            self.container_id,
            volumes=volumes,
            volumes_from=volumes_from,
            env_directives=env_directives,
            working_directory=working_directory,
            net=self.prop("net", None),  # By default, docker instance has networking disabled
            auto_rm=asbool(self.prop("auto_rm", docker_util.DEFAULT_AUTO_REMOVE)),
            set_user=self.prop("set_user", docker_util.DEFAULT_SET_USER),
            run_extra_arguments=self.prop("run_extra_arguments", docker_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            guest_ports=self.tool_info.guest_ports,
            container_name=self.container_name,
            **docker_host_props
        )
        kill_command = docker_util.build_docker_simple_command("kill", container_name=self.container_name, **docker_host_props)
        # Suppress standard error below in the kill command because it can cause jobs that otherwise would work
        # to fail. Likely, in these cases the container has been stopped normally and so cannot be stopped again.
        # A less hacky approach might be to check if the container is running first before trying to kill.
        # https://stackoverflow.com/questions/34228864/stop-and-delete-docker-container-if-its-running
        # Standard error is:
        #    Error response from daemon: Cannot kill container: 2b0b961527574ebc873256b481bbe72e: No such container: 2b0b961527574ebc873256b481bbe72e
        return f"""
Ejemplo n.º 5
0
    def containerize_command(self, command):
        def prop(name, default):
            destination_name = "singularity_%s" % name
            return self.destination_info.get(destination_name, default)

        env = []
        for pass_through_var in self.tool_info.env_pass_through:
            env.append((pass_through_var, "$%s" % pass_through_var))

        # Allow destinations to explicitly set environment variables just for
        # docker container. Better approach is to set for destination and then
        # pass through only what tool needs however. (See todo in ToolInfo.)
        for key, value in six.iteritems(self.destination_info):
            if key.startswith("singularity_env_"):
                real_key = key[len("singularity_env_"):]
                env.append((real_key, value))

        working_directory = self.job_info.working_directory
        if not working_directory:
            raise Exception("Cannot containerize command [%s] without defined working directory." % working_directory)

        volumes_raw = self._expand_volume_str(self.destination_info.get("singularity_volumes", "$defaults"))
        preprocessed_volumes_list = preprocess_volumes(volumes_raw, self.container_type)
        volumes = [DockerVolume.from_str(v) for v in preprocessed_volumes_list]

        singularity_target_kwds = dict(
            singularity_cmd=prop("cmd", singularity_util.DEFAULT_SINGULARITY_COMMAND),
            sudo=asbool(prop("sudo", singularity_util.DEFAULT_SUDO)),
            sudo_cmd=prop("sudo_cmd", singularity_util.DEFAULT_SUDO_COMMAND),
        )
        run_command = singularity_util.build_singularity_run_command(
            command,
            self.container_id,
            volumes=volumes,
            env=env,
            working_directory=working_directory,
            run_extra_arguments=prop("run_extra_arguments", singularity_util.DEFAULT_RUN_EXTRA_ARGUMENTS),
            **singularity_target_kwds
        )
        return run_command