Пример #1
0
def test_execute_with_called_process_error(
    capsys, docker_container, return_code, tmp_path
):
    tmp_file = os.path.join(tmp_path, "CalledProcessError.sh")
    # "This is stdout" to stdout, "This is stderr" to stderr
    script = f"""#!/bin/bash
echo "This is stdout"
>&2 echo "This is stderr"
{"exit 1" if return_code else ""}
"""
    write_file(tmp_file, script)
    if return_code:
        with pytest.raises(CalledProcessError):
            for line in Client.execute(
                docker_container[1], f"/bin/sh {tmp_file}", stream=True
            ):
                print(line, "")
    else:
        for line in Client.execute(
            docker_container[1], f"/bin/sh {tmp_file}", stream=True
        ):
            print(line, "")
    captured = capsys.readouterr()
    assert "stdout" in captured.out
    if return_code:
        assert "stderr" in captured.err
    else:
        assert "stderr" not in captured.err
Пример #2
0
    def test_instance_cmds(self, docker_container):
        image = docker_container[1]
        myinstance = Client.instance(image)
        assert myinstance.get_uri().startswith("instance://")

        print("...Case 2: List instances")
        instances = Client.instances()
        assert len(instances) == 1
        instances = Client.instances(return_json=True)
        assert len(instances) == 1
        assert isinstance(instances[0], dict)

        print("...Case 3: Commands to instances")
        result = Client.execute(myinstance, ["echo", "hello"])
        assert result == "hello\n"

        print("...Case 4: Return value from instance")
        result = Client.execute(myinstance, "ls /", return_result=True)
        print(result)
        assert "tmp\nusr\nvar" in result["message"]
        assert result["return_code"] == 0

        print("...Case 5: Stop instances")
        myinstance.stop()
        instances = Client.instances()
        assert instances == []
        myinstance1 = Client.instance(image)
        myinstance2 = Client.instance(image)
        assert myinstance1 is not None
        assert myinstance2 is not None
        instances = Client.instances()
        assert len(instances) == 2
        Client.instance_stopall()
        instances = Client.instances()
        assert instances == []
Пример #3
0
    def test_instance_cmds(self, docker_container):
        image = docker_container[1]
        myinstance = Client.instance(image)
        assert myinstance.get_uri().startswith('instance://')

        print("...Case 2: List instances")
        instances = Client.instances()
        assert len(instances) == 1
        instances = Client.instances(return_json=True)
        assert len(instances) == 1
        assert isinstance(instances[0], dict)

        print("...Case 3: Commands to instances")
        result = Client.execute(myinstance, ['echo', 'hello'])
        assert result == 'hello\n'

        print('...Case 4: Return value from instance')
        result = Client.execute(myinstance, 'ls /', return_result=True)
        print(result)
        assert 'tmp\nusr\nvar' in result['message']
        assert result['return_code'] == 0

        print("...Case 5: Stop instances")
        myinstance.stop()
        instances = Client.instances()
        assert instances == []
        myinstance1 = Client.instance(image)
        myinstance2 = Client.instance(image)
        assert myinstance1 is not None
        assert myinstance2 is not None
        instances = Client.instances()
        assert len(instances) == 2
        Client.instance_stopall()
        instances = Client.instances()
        assert instances == []
Пример #4
0
def make_separate_directory(file_all, protein, source):
    #shutil.rmtree('trajectories')
    isdir = os.path.isdir('trajectories')
    if isdir == False:
        print('Trajectories do not exist')
        os.mkdir('trajectories')
    else:
        print('exist')

    for count, file in enumerate(file_all, start=0):
        ismod = os.path.isdir(f'./trajectories/model_{count}')
        if ismod == False:
            os.mkdir(f'./trajectories/model_{count}')
        with open(f'./trajectories/model_{count}/position_ligand_{count}.pdbqt', 'w') as file_traj:
            file_traj.write(file)
        shutil.copy(protein, f'./trajectories/model_{count}/')
        # nakopiruje navic potrebna data
        shutil.copy(f'{source}/_Xqmin_tmp.in', f'./trajectories/model_{count}/')
        shutil.copy(f'{source}/_11_run_tleap.sh', f'./trajectories/model_{count}/')
        shutil.copy(f'{source}/_21_run-mm_meta.sh', f'./trajectories/model_{count}/')
        shutil.copy(f'{source}/{protein}', f'./trajectories/model_{count}/')
        shutil.copy(f'{source}/ligand.prepi', f'./trajectories/model_{count}/')
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/emin2*', shell = True)
        except:
            pass
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/complex.inpcrd', shell = True)
        except:
            pass
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/complex.prmtop', shell = True)
        except:
            pass
    # convert traj_position_ligand_{count}.pdbqt to pdb -> singularity
        Client.load('/home/petrahrozkova/Stažené/caverdock_1.1.sif')

        Client.execute(['/opt/mgltools-1.5.6/bin/pythonsh',
                            '/opt/mgltools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/pdbqt_to_pdb.py',
                            '-f',os.getcwd()+'/trajectories/model_'+str(count)+'/position_ligand_*.pdbqt',
                            '-o', os.getcwd()+'/trajectories/model_'+str(count)+'/ligand.pdb'])

        subprocess.call(f'sed -i \'s/<0> d/TIP d/g\' ./trajectories/model_{count}/ligand.pdb', shell = True) #ligand_for_complex.pdb

        subprocess.call(f'tail -n +2 \"./trajectories/model_{count}/ligand.pdb\" > '
                        f'\"./trajectories/model_{count}/ligand_for_complex.pdb\"', shell = True)

        # spojit 'hloupe" ligand, protein do complex.pdb
        subprocess.call( #remove last line in file with END. IMPORTANT!
            f'head -n -1 ./trajectories/model_{count}/{protein} > ./trajectories/model_{count}/complex.pdb | '
            f'echo TER >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
        subprocess.call(f'cat ./trajectories/model_{count}/ligand_for_complex.pdb'
                        f' >> ./trajectories/model_{count}/complex.pdb ',
                        #f'| echo TER >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
        subprocess.call(f'echo END >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
Пример #5
0
def call(container, command, cwd='', paths={}, remove=True):
    ###load container
    Client.load(container)

    ###setup mount point paths
    #{"/path/outside":"/path/incontainer"}
    volumes = []
    if paths:
        for key in paths.keys():
            volumes.append(key + ':' + paths[key])

    ###setup command as a list
    command_list = shlex.split(command)

    ###run the container
    output = Client.execute(command_list,
                            bind=volumes,
                            options=['--pwd', cwd, '--cleanenv'],
                            stream=True)

    try:
        ###stream the output
        for line in output:
            yield line.strip()
    #catch singularity errors from non-zero exit
    #TODO figure out how to read them
    except subprocess.CalledProcessError:
        pass
Пример #6
0
def extract_content(image_path, member_name, return_hash=False):
    '''extract_content will extract content from an image using cat.
    If hash=True, a hash sum is returned instead
    '''
    if member_name.startswith('./'):
        member_name = member_name.replace('.','',1)
    if return_hash:
        hashy = hashlib.md5()

    try:
        content = Client.execute(image_path,'cat %s' %(member_name))
    except:
        return None

    if not isinstance(content,bytes):
        content = content.encode('utf-8')
        content = bytes(content)

    # If permissions don't allow read, return None
    if len(content) == 0:
        return None
    if return_hash:
        hashy.update(content)
        return hashy.hexdigest()
    return content
    def exec(self, process: ProcessContainer, args):
        """Execute a process

        Parameters
        ----------
        process
            Metadata of the process
        args
            list of arguments

        """
        # print('container type = ', process.container()['type'])
        if (process.container()['type'] != 'singularity'
                and process.container()['type'] != 'docker'):
            raise RunnerExecError("The process " + process.name +
                                  " is not compatible with Singularity")

        image_uri = replace_env_variables(process, process.container()['uri'])
        if process.container()['type'] == 'docker':
            image_uri = 'docker://' + image_uri

        self.notify_message('run singularity container: ' + image_uri)
        self.notify_message('args:' + ' '.join(args))
        # print("run singularity container:", image_uri)
        # print('args:', args)
        puller = Client.execute(image_uri, args)
        for line in puller:
            self.notify_message(line)
            print(line)
Пример #8
0
def get_nvidia_devices(use_docker=True):
    """
    Returns a Dict[index, UUID] of all NVIDIA devices available to docker

    Arguments:
        use_docker: whether or not to use a docker container to run nvidia-smi. if not, use singularity

    Raises docker.errors.ContainerError if GPUs are unreachable,
           docker.errors.ImageNotFound if the CUDA image cannot be pulled
           docker.errors.APIError if another server error occurs
    """
    cuda_image = 'nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04'
    nvidia_command = 'nvidia-smi --query-gpu=index,uuid --format=csv,noheader'
    if use_docker:
        client.images.pull(cuda_image)
        output = client.containers.run(
            cuda_image,
            nvidia_command,
            runtime=NVIDIA_RUNTIME,
            detach=False,
            stdout=True,
            remove=True,
        )
        gpus = output.decode()
    else:
        # use the singularity runtime to run nvidia-smi
        img = Client.pull('docker://' + cuda_image, pull_folder='/tmp')
        output = Client.execute(img, nvidia_command, options=['--nv'])
        if output['return_code'] != 0:
            raise SingularityError
        gpus = output['message']
    # Get newline delimited gpu-index, gpu-uuid list
    logger.info("GPUs: " + str(gpus.split('\n')[:-1]))
    return {gpu.split(',')[0].strip(): gpu.split(',')[1].strip() for gpu in gpus.split('\n')[:-1]}
Пример #9
0
def run_singularity_container_executable(uuid, container_image,
                                         executable_path, logs_path,
                                         args_dict):
    """Launch an executable within a Singularity container.

    Args:
        uuid: A string containing the uuid of the job being run.
        container_image: A string containing the name of the container
            to pull.
        executable_path: A string containing the path of the executable
            to execute within the container.
        logs_path: A string (or None) containing the path of the
            directory containing the relevant logs within the container.
        args_dict: A dictionary containing arguments and corresponding
            values.
    """
    # Import Singularity library
    from spython.main import Client as client

    # Pull the specified container. This pull in the latest version of
    # the container (with the specified tag if provided).
    singularity_image = client.pull(
        image=container_image,
        pull_folder=os.environ['WORKER_SINGULARITY_IMAGES_DIRECTORY'],
    )

    # Run the executable with the arguments
    if logs_path is None:
        bind_option = None
    else:
        # Create the host path. This is required by the Singularity
        # library (though not the Docker library)
        host_path = os.path.join(os.environ['WORKER_LOGS_DIRECTORY'], uuid)
        os.makedirs(host_path, exist_ok=True)

        # Build the bind option to pass on to Singularity
        bind_option = host_path.rstrip('/') + ":" + logs_path.rstrip('/')

    client.execute(
        image=singularity_image,
        command=[executable_path, json.dumps(args_dict)],
        bind=bind_option,
    )

    # TODO give a real return value
    return "FINISHED"
Пример #10
0
 def run(self, cmd, bind_dir):
     self._make_dir()
     self._pull_image()
     client.load(os.path.join(self.img_dir, self.img_name))
     output = client.execute(cmd,
                             bind=bind_dir + ":/data",
                             options=['--pwd', '/data', '--cleanenv'],
                             stream=True)
     return output
Пример #11
0
def run_case(image_dir, image_name, testcase_dir):
    client.load(os.path.join(image_dir, image_name))
    output = client.execute(
        ['mpirun', '-np', '6', '/SU2/bin/SU2_CFD', 'Jones.cfg'],
        bind=testcase_dir + ":/data:",
        options=['--pwd', '/data'],
        stream=True)
    for val in output:
        print(val.strip('\n'))
Пример #12
0
 def run_command(self, command):
     if self.mode == 'docker':
         res = self.docker_container.exec_run(command)
         return str(res.output)
     elif self.mode == 'singularity':
         from spython.main import Client
         res = Client.execute(self.client_instance, command)
         if isinstance(res, dict):
             res = res['message']
         return res
 def run(self, cmd, cores, cfg, log):
     self._pull_image()
     client.load(os.path.join(self.image_dir, self.image_name))
     totalcmd = ['mpirun', '-np', str(cores), '/SU2/bin/' + cmd, cfg.fname]
     output = client.execute(
         totalcmd,
         bind=','.join([self.case_dir + ":/data",
                        self.mesh_dir + ":/mesh"]),
         options=['--pwd', '/data', '--cleanenv'],
         stream=True)
     with open(log.fname, 'w', buffering=10) as lfile:
         for line in output:
             lfile.write(line)
def remove_ligand_from_emin(protein, verbose, configfile):
    #  ted defaultne vypocitany emin5.pdb
    with open(protein) as oldfile, open('protein.pdb', 'w+') as newfile:
        for line in oldfile:
            if not configfile["LIGAND"]["name"] in line:
                newfile.write(line)
    # convert pdb to pdbqt
    prepare_receptor = ''
    if int(configfile["SINGULARITY"]["value"]) == 1:
        Client.load(configfile["SINGULARITY"]["singularity"])
        prepare_receptor = Client.execute([
            configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"], '-r',
            'protein.pdb'
        ])
        if verbose:
            print(
                f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb'
            )
        if not os.path.isfile('protein.pdbqt'):
            logging.error(
                f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n Message: \n {prepare_receptor}'
            )
            if verbose:
                print(
                    f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n {prepare_receptor}'
                )
            sys.exit(1)

    else:
        subprocess.call(
            f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb',
            shell=True)
        logging.info(
            f'Run prepare_receptor. Message: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb '
        )
        if verbose:
            print(
                f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb'
            )
            print(f'Message{prepare_receptor}')
        if not os.path.isfile('protein.pdbqt'):
            logging.error(
                f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n'
            )
            if verbose:
                print(
                    f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb'
                )
            sys.exit(1)
    check_exist_file('protein.pdbqt')
Пример #15
0
def test_AFNI_libraries():
    SINGULARITY_IMAGE_PATH = '/home/circleci/project/C-PAC-CI.simg'
    if not os.path.exists(SINGULARITY_IMAGE_PATH):
        try:
            SINGULARITY_IMAGE_PATH = [
                d for d in os.listdir(str(Path(__file__).parent.parent.parent))
                if (d.endswith('.simg') or d.endswith('.sif'))
            ][0]
        except:
            raise Exception("Singularity image not in expected location.")
    if os.path.exists(SINGULARITY_IMAGE_PATH):
        afni_libraries = Client.execute(
            Client.instance(SINGULARITY_IMAGE_PATH),
            ['./dev/circleci_data/AFNI_libraries.sh'])
        assert "not found" not in afni_libraries, '\n'.join([
            line.strip() for line in afni_libraries.split('\n')
            if "not found" in line
        ])
Пример #16
0
 def _try_to_stream(self, args, stream_command='run'):
     self._bindings_as_option()
     try:
         if stream_command == 'run':
             for line in Client.run(Client.instance(self.image),
                                    args=args,
                                    options=self.options,
                                    stream=True,
                                    return_result=True):
                 yield line
         elif stream_command == 'execute':
             for line in Client.execute(self.image,
                                        command=args['command'].split(' '),
                                        options=self.options,
                                        stream=True,
                                        quiet=False):
                 yield line
     except CalledProcessError:  # pragma: no cover
         return
Пример #17
0
def call(container, command, cwd='', paths={}, remove=True):
    ###load container
    container = 'docker://' + container
    Client.load(container)

    ###setup mount point paths
    #{"/path/outside":"/path/incontainer"}
    volumes = []
    if paths:
        for key in paths.keys():
            volumes.append(key + ':' + paths[key])

    ###setup command as a list
    command_list = command.split()

    ###run the container
    output = Client.execute(command_list,
                            bind=volumes,
                            writable=True,
                            options=['--pwd', cwd])
    #once container is finished return output as a string
    return output
def run_cd_energy_profile(tunnel, traj, configfile, verbose):
    file = ''
    try:
        if int(configfile["SINGULARITY"]["value"]) == 1:
            Client.load(str(configfile['SINGULARITY']['singularity']))
        if verbose:
            print(
                f'{configfile["CD-ENERGYPROFILE"]["path_cd-energyprofile"]} -d {os.getcwd()}/{str(tunnel)} -t {str(traj)} -s {str(configfile["CPU"]["cpu"])}'
            )
        if int(configfile["SINGULARITY"]["value"]) == 1:
            file = Client.execute([
                'cd-energyprofile', '-d',
                os.getcwd() + '/' + str(tunnel), '-t',
                str(traj), '-s',
                str(configfile['CPU']['cpu'])
            ])
            with open(f'{os.getcwd()}/energy.dat', 'w') as file_energy_dat:
                file_energy_dat.write(str(file))
        else:
            subprocess.call(
                f'{configfile["CD-ENERGYPROFILE"]["path_cd-energyprofile"]} -d {os.getcwd()}/{str(tunnel)} -t {str(traj)} -s {str(configfile["CPU"]["cpu"])} > energy.dat',
                shell=True)
        if verbose:
            print(f'Message from cd-energyprofile: \n {file}')
        logging.info(f'Message from cd-energyprofieL: \n {file}')

    except:
        if verbose:
            print('cd-energyprofile returncode is not 0. Check logfile.')
        logging.error(f'cd-energyprofile returncode is not 0.')
        sys.exit(1)
    if not os.path.exists('energy.dat'):
        logging.error(f'Cannot make energy.dat')
        if verbose:
            print('Energy.dat not exists')
        sys.exit(1)
def make_separate_directory(file_all, protein, source, configfile):
    try:
        shutil.rmtree('trajectories')
        isdir = False
    except:
        isdir = os.path.isdir('trajectories')
    if isdir == False:
        print('Trajectories do not exist')
        os.mkdir('trajectories')
    else:
        print('exist')

    for count, file in enumerate(file_all, start=0):
        ismod = os.path.isdir(f'./trajectories/model_{count}')
        if ismod == False:
            os.mkdir(f'./trajectories/model_{count}')
        with open(
                f'./trajectories/model_{count}/position_ligand_{count}.pdbqt',
                'w') as file_traj:
            file_traj.write(file)
        shutil.copy(protein, f'./trajectories/model_{count}/')
        # nakopiruje navic potrebna data
        #shutil.copy(f'{source}/_11_run_tleap', f'./trajectories/model_{count}/')
        #shutil.copy(f'{source}/_21_run_prepare_sander', f'./trajectories/model_{count}/')
        shutil.copy(f'{source}/{protein}', f'./trajectories/model_{count}/')

        try:
            subprocess.run(f'rm ./trajectories/model_{count}/emin*',
                           shell=True)
        except:
            pass
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/complex.inpcrd',
                           shell=True)
        except:
            pass
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/complex.prmtop',
                           shell=True)
        except:
            pass
        try:
            subprocess.run(f'rm ./trajectories/model_{count}/ligand.prepi',
                           shell=True)
        except:
            pass
        # convert traj_position_ligand_{count}.pdbqt to pdb -> singularity
        if int(configfile["SINGULARITY"]["value"]) == 1:
            Client.load('/home/petrahrozkova/Stažené/caverdock_1.1.sif')
            Client.execute([
                '/opt/mgltools-1.5.6/bin/pythonsh',
                '/opt/mgltools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/pdbqt_to_pdb.py',
                '-f',
                os.getcwd() + '/trajectories/model_' + str(count) +
                '/position_ligand_*.pdbqt', '-o',
                os.getcwd() + '/trajectories/model_' + str(count) +
                '/ligand_H.pdb'
            ])
        else:
            subprocess.call(
                f'{configfile["PDBQT_TO_PDB"]["path_pdbqt_to_pdb"]} -f {os.getcwd()}/trajectories/model_{str(count)}/position_ligand_{str(count)}.pdbqt '
                f'-o {os.getcwd()}/trajectories/model_{str(count)}/ligand_H.pdb',
                shell=True)
        check_exist_file(
            f'{os.getcwd()}/trajectories/model_{str(count)}/ligand_H.pdb')
        subprocess.call(
            f'sed -i \'s/<0> d/{configfile["LIGAND"]["name"]} d/g\' ./trajectories/model_{count}/ligand_H.pdb',
            shell=True)  #ligand_for_complex.pdb

        #subprocess.call(f'pdb4amber -i ./trajectories/model_{count}/ligand_H.pdb -o ./trajectories/model_{count}/ligand.pdb --nohyd', shell = True)
        check_exist_file(
            f'{os.getcwd()}/trajectories/model_{str(count)}/ligand_H.pdb')

        #subprocess.call(f'antechamber -i {ligand} -fi pdbqt -o ligand.pdb -if pdb', shell = True)
        subprocess.call(
            f'antechamber -i ./trajectories/model_{count}/ligand_H.pdb -fi pdb -o ./trajectories/model_{count}/ligand.prepi -fo prepi',
            shell=True)
        check_exist_file(
            f'{os.getcwd()}/trajectories/model_{str(count)}/ligand.prepi')

        subprocess.call(
            f'cat \"./trajectories/model_{count}/ligand_H.pdb\" > '
            f'\"./trajectories/model_{count}/ligand_for_complex.pdb\"',
            shell=True)

        # split (ugly) ligand and protein into complex.pdb
        subprocess.call(
            f'pdb4amber -i ./trajectories/model_{count}/{protein} -o ./trajectories/model_{count}/protein_w_H.pdb --nohyd',
            shell=True)

        subprocess.call(  #remove last line in file with END. IMPORTANT!
            f'head -n -1 ./trajectories/model_{count}/protein_w_H.pdb > ./trajectories/model_{count}/complex.pdb | '
            f'echo TER >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
        subprocess.call(
            f'cat ./trajectories/model_{count}/ligand_for_complex.pdb'
            f' >> ./trajectories/model_{count}/complex.pdb ',
            #f'| echo TER >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
        subprocess.call(
            f'echo END >> ./trajectories/model_{count}/complex.pdb',
            shell=True)
        check_exist_file(
            f'{os.getcwd()}/trajectories/model_{str(count)}/complex.pdb')
        print(f'{os.getcwd()}')

        #subprocess.call(f'pdb4amber -i ./trajectories/model_{count}/complex_H.pdb -o ./trajectories/model_{count}/complex.pdb --nohyd', shell = True)
        check_exist_file(
            f'{os.getcwd()}/trajectories/model_{str(count)}/complex.pdb')
Пример #20
0
def test_execute_with_return_code(docker_container):
    result = Client.execute(docker_container[1], "ls /", return_result=True)
    print(result)
    assert "tmp\nusr\nvar" in result["message"]
    assert result["return_code"] == 0
Пример #21
0
def test_execute(docker_container):
    result = Client.execute(docker_container[1], "ls /")
    print(result)
    assert "tmp\nusr\nvar" in result
Пример #22
0
def run_singularity_container_command(
    uuid,
    container_image,
    command_to_run,
    logs_path,
    results_path,
    env_vars_list,
    args_dict,
):
    """Launch an executable within a Singularity container.

    Args:
        uuid: A string containing the uuid of the job being run.
        container_image: A string containing the name of the container
            to pull.
        command_to_run: A string containing the command to run.
        logs_path: A string (or None) containing the path of the
            directory containing the relevant logs within the container.
        results_path: A string (or None) containing the path of the
            directory containing any output files from the container.
        env_vars_list: A list of strings containing the environment
            variable names for the worker to consume from its
            environment.
        args_dict: A dictionary containing arguments and corresponding
            values.

    Raises:
        KeyError: An environment variable specified was not available in
            the worker's environment.
        SingularityPullFailure: The Singularity pull could not complete
            with the specified timeout and number of retries.
    """
    # Import Singularity library
    from spython.main import Client as client

    # Pull the specified container. This pull in the latest version of
    # the container (with the specified tag if provided).
    timeout = int(os.environ["SINGULARITY_PULL_TIMEOUT"])
    num_retries = int(os.environ["SINGULARITY_PULL_RETRIES"])

    # Put a timeout on the client pull method
    client.pull = timeout_decorator.timeout(timeout,
                                            timeout_exception=StopIteration)(
                                                client.pull)

    for retry in range(num_retries):
        try:
            singularity_image = client.pull(
                image=container_image,
                pull_folder=os.environ["WORKER_SINGULARITY_IMAGES_DIRECTORY"],
                name_by_commit=True,
            )

            break
        except StopIteration:
            # If this is the last retry, raise an exception to indicate
            # a failed job
            if retry == num_retries - 1:
                raise SingularityPullFailure(
                    ("Could not pull {image_url} within "
                     "{timeout} seconds after {num_retries} retries.").format(
                         image_url=container_image,
                         timeout=timeout,
                         num_retries=num_retries,
                     ))

    # Find out where to put the logs
    if logs_path is None:
        bind_option = []
    else:
        # Create the host logs path. This is required by the Singularity
        # library (though not the Docker library)
        host_logs_path = os.path.join(os.environ["WORKER_LOGS_DIRECTORY"],
                                      uuid)

        create_local_directory(host_logs_path)

        # Build the bind option to pass on to Singularity
        bind_option = [
            host_logs_path.rstrip("/") + ":" + logs_path.rstrip("/")
        ]

    # Find out where to put the results
    if results_path is not None:
        # Create the host results path
        host_results_path = os.path.join(
            os.environ["WORKER_RESULTS_DIRECTORY"], uuid)

        create_local_directory(host_results_path)

        # Build the bind option to pass on to Singularity
        bind_option += [
            host_results_path.rstrip("/") + ":" + results_path.rstrip("/")
        ]

    # Check for required environment variables. Note that by default
    # Singularity containers have access to their outside environment
    # variables, so we don't need to pass them along explicitly like we
    # need to for a Docker container.
    try:
        # Test to see that all keys are defined
        {key: os.environ[key] for key in env_vars_list}
    except KeyError as e:
        raise KeyError(
            "Environment variable %s not present in the worker's environment!"
            % e)

    # Pass along the job's UUID
    os.environ["JOB_UUID"] = uuid

    # Compose the command to run
    command = shlex.split(command_to_run)

    if args_dict:
        command += [json.dumps(args_dict)]

    # Run the executable
    iter_ = client.execute(image=singularity_image,
                           command=command,
                           bind=bind_option,
                           stream=True)

    # Okay, here's some magic. The issue is that without stream=True in
    # the above call, there's no way of determining the return code of
    # the above operation, and so no way of knowing whether it failed.
    # However, with stream=True, it'll raise a
    # subprocess.CalledProcessError exception for any non-zero return
    # code. Great! But before we can get that exception triggered we
    # need to iterate through all of the command's stdout, which is what
    # the below (seemingly useless) loop does.
    for _ in iter_:
        pass
Пример #23
0
    def run_command(self, model: Model, command_str,
                    mounts=None, environment=None,
                    stdin=None, stdout=sys.stdout, stderr=sys.stderr,
                    raise_errors=True):
        model = cast(SingularityModel, model)
        if mounts is None:
            mounts = []
        if environment is None:
            environment = {}

        # Support custom checkpoint loading
        if model.checkpoint is not None:
            host_checkpoint_path = Path(model.checkpoint).absolute()

            # Mount given checkpoint read-only within the guest
            guest_checkpoint_path = "/opt/lmzoo_checkpoint"
            mounts.append((host_checkpoint_path, guest_checkpoint_path, "ro"))

            # Update relevant environment variable
            environment["LMZOO_CHECKPOINT_PATH"] = guest_checkpoint_path

        binds = ["%s:%s:%s" % (host, guest, mode)
                for host, guest, mode in mounts]

        nv = False # TODO

        command = command_str.split(" ")

        if stdin is not None:
            stdin_f = NamedTemporaryFile("w")
            stdin_f.write(stdin.read())
            stdin_f.flush()

            binds.append("%s:/host_stdin:ro" % stdin_f.name)
            command = ["sh", "-c", 'cat /host_stdin | %s' % " ".join(command)]

        # TODO no separate stderr support :( manually reroute stderr for now
        command.append("2>/dev/null")

        # Prepare environment variables for export
        environment = {"SINGULARITYENV_%s" % key: value
                       for key, value in environment.items()}

        try:
            with modified_environ(**environment):
                result = Client.execute(image=model.reference, command=command,
                                        bind=binds, stream=True)

                for line in result:
                    stdout.write(line)
        except CalledProcessError as e:
            if raise_errors:
                if e.returncode == STATUS_CODES["unsupported_feature"]:
                    feature = command_str.split(" ")[0]
                    raise errors.UnsupportedFeatureError(feature=feature,
                                                         model=str(model))
                else:
                    raise

        if stdin is not None:
            stdin_f.close()

        return result
Пример #24
0
def test_execute(docker_container):
    result = Client.execute(docker_container[1], 'ls /')
    print(result)
    assert 'tmp\nusr\nvar' in result
Пример #25
0
def test_execute_with_return_code(docker_container):
    result = Client.execute(docker_container[1], 'ls /', return_result=True)
    print(result)
    assert 'tmp\nusr\nvar' in result['message']
    assert result['return_code'] == 0
def run_caverdock(ligand, tunnel, configfile, verbose):
    # IN: protein, ligand, tunel
    # OUT: pdbqt file with lb and ub
    prepare_conf = ''

    if int(configfile["SINGULARITY"]["value"]) == 1:
        singularity = Client.load(str(
            configfile["SINGULARITY"]["singularity"]))
        logging.info(
            f'Singularity for caverdock: {configfile["SINGULARITY"]["singularity"]} \n'
        )
        logging.info(f'Message from singularity: \n {singularity}')
        if verbose:
            print(
                f'Singularity for caverdock: {configfile["SINGULARITY"]["singularity"]} \n'
            )
            print(f'Message from singularity: \n {singularity} \n')
        prepare_conf = Client.execute([
            configfile["CD-PREPARECONF"]["path_cd-prepareconf"], '-r',
            'protein.pdbqt', '-l',
            str(ligand), '-t',
            str(tunnel)
        ])
        with open('caverdock.conf', 'w+') as file_conf:
            file_conf.write(prepare_conf)
        if not os.path.isfile('caverdock.conf'):
            logging.error(
                f'cd-prepareconf -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
            )
            logging.error(f'Message from cd-prepareconf: \n {prepare_conf}')
            if verbose:
                print(
                    f'ERROR: {configfile["CD-PREPARECONF"]["path_cd-prepareconf"]} -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
                )
                print(f'ERROR: Message from cd-prepareconf: \n {prepare_conf}')
            sys.exit(1)

    else:
        subprocess.call(
            f'{configfile["CD-PREPARECONF"]["path_cd-prepareconf"]} -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf',
            shell=True)
        logging.info(
            f'cd-prepareconf -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
        )
        #logging.info(f'Message from cd-prepareconf: \n {prepare_conf}')
        if verbose:
            print(
                f'{configfile["CD-PREPARECONF"]["path_cd-prepareconf"]} -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
            )
            #print(f'Message from cd-prepareconf: \n {prepare_conf}')

        if not os.path.isfile('caverdock.conf'):
            logging.error(
                f'cd-prepareconf -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
            )
            #logging.error(f'Message from cd-prepareconf: \n {prepare_conf}')
            if verbose:
                print(
                    f'ERROR: {configfile["CD-PREPARECONF"]["path_cd-prepareconf"]} -r protein.pdbqt -l  {ligand} -t {tunnel} > caverdock.conf'
                )
                #print(f'ERROR: Message from cd-prepareconf: \n {prepare_conf}')
            sys.exit(1)

    mpirun = ''
    if int(configfile["SINGULARITY"]["value"]) == 1:
        mpirun = Client.execute([
            'mpirun', '-np', configfile["CPU"]["cpu"], 'caverdock', '--config',
            'caverdock.conf', '--out',
            str(configfile["RESULT_CD"]["name"])
        ])

    else:
        subprocess.call(
            f'/usr/bin/mpirun.mpich -np {str(configfile["CPU"]["cpu"])} {configfile["CAVERDOCK"]["path_caverdock"]}  --config caverdock.conf --out {str(configfile["RESULT_CD"]["name"])}',
            shell=True)

    logging.info(
        f'mpirun -np {str(configfile["CPU"]["cpu"])} caverdock  --config caverdock.conf --out {str(configfile["RESULT_CD"]["name"])}'
    )
    logging.info(f'Message from mpirun: \n {mpirun}')
    if verbose:
        print(
            f'mpirun -np {str(configfile["CPU"]["cpu"])} caverdock  --config caverdock.conf --out {str(configfile["RESULT_CD"]["name"])}'
        )
        print(f'Message from mpirun: \n {mpirun}')

    if not (os.path.isfile(f'{configfile["RESULT_CD"]["name"]}-lb.pdbqt')
            or os.path.isfile(f'{configfile["RESULT_CD"]["name"]}-ub.pdbqt')):
        print(f'{configfile["RESULT_CD"]["name"]}-lb.pdbqt')
        logging.error(
            f'mpirun -np {str(configfile["CPU"]["cpu"])} caverdock  --config caverdock.conf --out {str(configfile["RESULT_CD"]["name"])}'
        )
        logging.error(f'Message from mpirun: \n {mpirun}')
        if verbose:
            print(
                f'ERROR: mpirun -np {str(configfile["CPU"]["cpu"])} caverdock  --config caverdock.conf --out {str(configfile["RESULT_CD"]["name"])}'
            )
            print(f'ERROR: Message from mpirun: \n {mpirun}')
        sys.exit(1)