def get_nvidia_devices(use_docker=True): """ Returns a Dict[index, UUID] of all NVIDIA devices available to docker Arguments: use_docker: whether or not to use a docker container to run nvidia-smi. if not, use singularity Raises docker.errors.ContainerError if GPUs are unreachable, docker.errors.ImageNotFound if the CUDA image cannot be pulled docker.errors.APIError if another server error occurs """ cuda_image = 'nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04' nvidia_command = 'nvidia-smi --query-gpu=index,uuid --format=csv,noheader' if use_docker: client.images.pull(cuda_image) output = client.containers.run( cuda_image, nvidia_command, runtime=NVIDIA_RUNTIME, detach=False, stdout=True, remove=True, ) gpus = output.decode() else: # use the singularity runtime to run nvidia-smi img = Client.pull('docker://' + cuda_image, pull_folder='/tmp') output = Client.execute(img, nvidia_command, options=['--nv']) if output['return_code'] != 0: raise SingularityError gpus = output['message'] # Get newline delimited gpu-index, gpu-uuid list logger.info("GPUs: " + str(gpus.split('\n')[:-1])) return {gpu.split(',')[0].strip(): gpu.split(',')[1].strip() for gpu in gpus.split('\n')[:-1]}
def call(container, command, cwd='', paths={}, remove=True): ###load container Client.load(container) ###setup mount point paths #{"/path/outside":"/path/incontainer"} volumes = [] if paths: for key in paths.keys(): volumes.append(key + ':' + paths[key]) ###setup command as a list command_list = shlex.split(command) ###run the container output = Client.execute(command_list, bind=volumes, options=['--pwd', cwd, '--cleanenv'], stream=True) try: ###stream the output for line in output: yield line.strip() #catch singularity errors from non-zero exit #TODO figure out how to read them except subprocess.CalledProcessError: pass
def run_singularity_container_executable(container_image, executable_path, args_dict): """Launch an executable within a Singularity container. Args: container_image: A string containing the name of the container to pull. executable_path: A string containing the path of the executable to execute within the container. args_dict: A dictionary containing arguments and corresponding values. """ # Import Singularity library from spython.main import Client as client # Pull the specified container. This pull in the latest version of # the container (with the specified tag if provided). singularity_image = client.pull(container_image) # Run the executable with the arguments # TODO how do we get logs? client.run( singularity_image, [executable_path, json.dumps(args_dict)], ) # TODO give a real return value return "FINISHED"
def __init__(self, **kwargs): self.platform = Platform_Meta('Singularity', 'Ⓢ') image = kwargs.get("image") tag = kwargs.get("tag") pwd = os.getcwd() if kwargs.get("working_dir") is not None: pwd = kwargs["working_dir"] os.chdir(pwd) print(f"Loading {self.platform.symbol} {self.platform.name}") if image and isinstance(image, str) and os.path.exists(image): self.image = image elif tag and isinstance(tag, str): # pragma: no cover self.image = Client.pull(f"docker://{image}:{tag}", pull_folder=pwd) else: # pragma: no cover try: self.image = Client.pull("shub://FCP-INDI/C-PAC", pull_folder=pwd) except Exception: try: self.image = Client.pull(f"docker://fcpindi/c-pac:latest", pull_folder=pwd) except Exception: raise OSError("Could not connect to Singularity") self.volumes = {} self.options = list(chain.from_iterable( kwargs["container_options"])) if bool( kwargs.get("container_options")) else [] self._set_bindings(**kwargs)
def test_execute_with_called_process_error( capsys, docker_container, return_code, tmp_path ): tmp_file = os.path.join(tmp_path, "CalledProcessError.sh") # "This is stdout" to stdout, "This is stderr" to stderr script = f"""#!/bin/bash echo "This is stdout" >&2 echo "This is stderr" {"exit 1" if return_code else ""} """ write_file(tmp_file, script) if return_code: with pytest.raises(CalledProcessError): for line in Client.execute( docker_container[1], f"/bin/sh {tmp_file}", stream=True ): print(line, "") else: for line in Client.execute( docker_container[1], f"/bin/sh {tmp_file}", stream=True ): print(line, "") captured = capsys.readouterr() assert "stdout" in captured.out if return_code: assert "stderr" in captured.err else: assert "stderr" not in captured.err
def singularity_pull(self, image): """Pulls an docker or singularity images from hub. """ log.info('{}[{}] singularity pull {}'.format( self.msg_prefix, self.action['name'], image) ) if not self.dry_run: sclient.pull(image, name=self.image_name)
def make_separate_directory(file_all, protein, source): #shutil.rmtree('trajectories') isdir = os.path.isdir('trajectories') if isdir == False: print('Trajectories do not exist') os.mkdir('trajectories') else: print('exist') for count, file in enumerate(file_all, start=0): ismod = os.path.isdir(f'./trajectories/model_{count}') if ismod == False: os.mkdir(f'./trajectories/model_{count}') with open(f'./trajectories/model_{count}/position_ligand_{count}.pdbqt', 'w') as file_traj: file_traj.write(file) shutil.copy(protein, f'./trajectories/model_{count}/') # nakopiruje navic potrebna data shutil.copy(f'{source}/_Xqmin_tmp.in', f'./trajectories/model_{count}/') shutil.copy(f'{source}/_11_run_tleap.sh', f'./trajectories/model_{count}/') shutil.copy(f'{source}/_21_run-mm_meta.sh', f'./trajectories/model_{count}/') shutil.copy(f'{source}/{protein}', f'./trajectories/model_{count}/') shutil.copy(f'{source}/ligand.prepi', f'./trajectories/model_{count}/') try: subprocess.run(f'rm ./trajectories/model_{count}/emin2*', shell = True) except: pass try: subprocess.run(f'rm ./trajectories/model_{count}/complex.inpcrd', shell = True) except: pass try: subprocess.run(f'rm ./trajectories/model_{count}/complex.prmtop', shell = True) except: pass # convert traj_position_ligand_{count}.pdbqt to pdb -> singularity Client.load('/home/petrahrozkova/Stažené/caverdock_1.1.sif') Client.execute(['/opt/mgltools-1.5.6/bin/pythonsh', '/opt/mgltools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24/pdbqt_to_pdb.py', '-f',os.getcwd()+'/trajectories/model_'+str(count)+'/position_ligand_*.pdbqt', '-o', os.getcwd()+'/trajectories/model_'+str(count)+'/ligand.pdb']) subprocess.call(f'sed -i \'s/<0> d/TIP d/g\' ./trajectories/model_{count}/ligand.pdb', shell = True) #ligand_for_complex.pdb subprocess.call(f'tail -n +2 \"./trajectories/model_{count}/ligand.pdb\" > ' f'\"./trajectories/model_{count}/ligand_for_complex.pdb\"', shell = True) # spojit 'hloupe" ligand, protein do complex.pdb subprocess.call( #remove last line in file with END. IMPORTANT! f'head -n -1 ./trajectories/model_{count}/{protein} > ./trajectories/model_{count}/complex.pdb | ' f'echo TER >> ./trajectories/model_{count}/complex.pdb', shell=True) subprocess.call(f'cat ./trajectories/model_{count}/ligand_for_complex.pdb' f' >> ./trajectories/model_{count}/complex.pdb ', #f'| echo TER >> ./trajectories/model_{count}/complex.pdb', shell=True) subprocess.call(f'echo END >> ./trajectories/model_{count}/complex.pdb', shell=True)
def download_images(image_dir, image_url): image_name = 'su2_containers_fork_dev.sif' try: os.mkdir(image_dir) except: pass client.pull(image=image_url, name=image_name, pull_folder=image_dir) return image_name
def singularity_build(self, path): """Builds an image from a recipefile. """ recipefile_path = os.path.join(path, 'Singularity') log.info('{}[{}] singularity build {} {}'.format( self.msg_prefix, self.action['name'], self.image_name, recipefile_path)) if not self.dry_run: sclient.build(recipefile_path, self.image_name)
def run_case(image_dir, image_name, testcase_dir): client.load(os.path.join(image_dir, image_name)) output = client.execute( ['mpirun', '-np', '6', '/SU2/bin/SU2_CFD', 'Jones.cfg'], bind=testcase_dir + ":/data:", options=['--pwd', '/data'], stream=True) for val in output: print(val.strip('\n'))
def run(self, cmd, bind_dir): self._make_dir() self._pull_image() client.load(os.path.join(self.img_dir, self.img_name)) output = client.execute(cmd, bind=bind_dir + ":/data", options=['--pwd', '/data', '--cleanenv'], stream=True) return output
def __init__(self, mode, container_image, volumes, extra_kwargs): assert mode in ('docker', 'singularity') self.mode = mode if mode == 'docker': import docker client = docker.from_env() if extra_kwargs.get('requires_gpu', False): extra_kwargs.pop('requires_gpu') extra_kwargs["device_requests"] = [ docker.types.DeviceRequest(count=-1, capabilities=[['gpu']]) ] # check if the image is already present locally repo_tags = [] for image in client.images.list(): repo_tags.extend(image.attrs['RepoTags']) if container_image not in repo_tags: client.images.pull(container_image) self.docker_container = client.containers.create(container_image, tty=True, volumes=volumes, **extra_kwargs) elif mode == 'singularity': from spython.main import Client # load local image file if it exists, otherwise search dockerhub if Path(container_image).exists(): self.singularity_image = container_image else: self.singularity_image = Client.pull( f'docker://{container_image}') if not Path(self.singularity_image).exists(): raise FileNotFoundError( f'Unable to locate container image {container_image}') # bin options singularity_bind = ','.join([ f'{volume_src}:{volume["bind"]}' for volume_src, volume in volumes.items() ]) options = ['--bind', singularity_bind] # gpu options if extra_kwargs.get('requires_gpu', False): # only nvidia at the moment options += ['--nv'] self.client_instance = Client.instance(self.singularity_image, start=False, options=options)
def test_instance_class(): instance = Client.instance("docker://ubuntu", start=False) assert instance.get_uri() == "instance://" + instance.name assert instance.name != "" name = "coolName" instance = Client.instance("docker://busybox:1.30.1", start=False, name=name) assert instance.get_uri() == "instance://" + instance.name assert instance.name == name
def test_pull_and_run(tmp_path): image = Client.pull("shub://vsoch/singularity-images", pull_folder=str(tmp_path)) print(image) assert os.path.exists(image) ext = 'sif' if Client.version_info().major >= 3 else 'simg' assert image == str(tmp_path / ('singularity-images.' + ext)) result = Client.run(image) print(result) assert 'You say please, but all I see is pizza..' in result
def test_instance_class(): instance = Client.instance('docker://ubuntu', start=False) assert instance.get_uri() == 'instance://' + instance.name assert instance.name != '' name = 'coolName' instance = Client.instance('docker://busybox:1.30.1', start=False, name=name) assert instance.get_uri() == 'instance://' + instance.name assert instance.name == name
def singularity_pull(self, image): """Pulls an docker or singularity images from hub. """ if not self.skip_pull: log.info('{}[{}] singularity pull {}'.format( self.msg_prefix, self.action['name'], image)) if not self.dry_run: sclient.pull(image, name=self.image_name) else: if not self.singularity_exists(): log.fail('The required singularity image {} was not found ' 'locally.'.format(self.image_name))
def run(self, cmd, cores, cfg, log): self._pull_image() client.load(os.path.join(self.image_dir, self.image_name)) totalcmd = ['mpirun', '-np', str(cores), '/SU2/bin/' + cmd, cfg.fname] output = client.execute( totalcmd, bind=','.join([self.case_dir + ":/data", self.mesh_dir + ":/mesh"]), options=['--pwd', '/data', '--cleanenv'], stream=True) with open(log.fname, 'w', buffering=10) as lfile: for line in output: lfile.write(line)
def singularity_start(self): """Starts a singularity instance based on the image. """ env_vars = self.action.get('env', {}) for s in self.action.get('secrets', []): env_vars.update({s: os.environ[s]}) for e, v in self.env.items(): env_vars.update({e: v}) env_vars.update({'HOME': os.environ['HOME']}) # sets the env variables for k, v in env_vars.items(): sclient.setenv(k, v) args = self.action.get('args', None) runs = self.action.get('runs', None) ecode = None bind_list = [self.workspace, os.environ['HOME']] if runs: info = '{}[{}] singularity exec {} {}'.format( self.msg_prefix, self.action['name'], self.image_name, runs) commands = runs start = sclient.execute else: info = '{}[{}] singularity run {} {}'.format( self.msg_prefix, self.action['name'], self.image_name, args) commands = args start = sclient.run log.info(info) if not self.dry_run: output = start(self.image_name, commands, contain=True, bind=bind_list, stream=True) try: for line in output: log.action_info(line) ecode = 0 except subprocess.CalledProcessError as ex: ecode = ex.returncode else: ecode = 0 return ecode
def remove_ligand_from_emin(protein, verbose, configfile): # ted defaultne vypocitany emin5.pdb with open(protein) as oldfile, open('protein.pdb', 'w+') as newfile: for line in oldfile: if not configfile["LIGAND"]["name"] in line: newfile.write(line) # convert pdb to pdbqt prepare_receptor = '' if int(configfile["SINGULARITY"]["value"]) == 1: Client.load(configfile["SINGULARITY"]["singularity"]) prepare_receptor = Client.execute([ configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"], '-r', 'protein.pdb' ]) if verbose: print( f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb' ) if not os.path.isfile('protein.pdbqt'): logging.error( f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n Message: \n {prepare_receptor}' ) if verbose: print( f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n {prepare_receptor}' ) sys.exit(1) else: subprocess.call( f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb', shell=True) logging.info( f'Run prepare_receptor. Message: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb ' ) if verbose: print( f'{configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb' ) print(f'Message{prepare_receptor}') if not os.path.isfile('protein.pdbqt'): logging.error( f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb \n' ) if verbose: print( f'Cannot run prepare_receptor. Try: \n {configfile["PREPARE_RECEPTOR"]["path_prepare_receptor"]} -r protein.pdb' ) sys.exit(1) check_exist_file('protein.pdbqt')
def extract_apps(image, app_names): ''' extract app will extract metadata for one or more apps Parameters ========== image: the absolute path to the image app_name: the name of the app under /scif/apps ''' apps = dict() if isinstance(app_names, tuple): app_names = list(app_names) if not isinstance(app_names, list): app_names = [app_names] if len(app_names) == 0: return apps for app_name in app_names: metadata = dict() # Inspect: labels, env, runscript, tests, help try: inspection = json.loads(Client.inspect(image, app=app_name)) del inspection['data']['attributes']['deffile'] metadata['inspect'] = inspection # If illegal characters prevent load, not much we can do except: pass apps[app_name] = metadata return apps
def export_to_targz(image, tmpdir=None): """export a Singularity image to a .tar.gz file. If run within a docker image, you should set via_build to false (as sudo will work under priviledged). Outside of Docker as regular user, via_build works better. Parameters ========== image: the full path to the Singularity image tmpdir: a temporary directory to export to. """ print("Exporting %s to targz..." % image) if tmpdir == None: tmpdir = tempfile.mkdtemp() # We will build into this directory (sandbox) to export without sudo export_dir = get_temporary_name(tmpdir, "singularity-clair") targz = "%s.gz" % export_dir sandbox = Client.build(image, export_dir, sandbox=True, sudo=False) # Write the tarfile with tarfile.open(targz, "w:gz") as tar: tar.add(sandbox, arcname="/") shutil.rmtree(sandbox) if os.path.exists(targz): return targz
def exec(self, process: ProcessContainer, args): """Execute a process Parameters ---------- process Metadata of the process args list of arguments """ # print('container type = ', process.container()['type']) if (process.container()['type'] != 'singularity' and process.container()['type'] != 'docker'): raise RunnerExecError("The process " + process.name + " is not compatible with Singularity") image_uri = replace_env_variables(process, process.container()['uri']) if process.container()['type'] == 'docker': image_uri = 'docker://' + image_uri self.notify_message('run singularity container: ' + image_uri) self.notify_message('args:' + ' '.join(args)) # print("run singularity container:", image_uri) # print('args:', args) puller = Client.execute(image_uri, args) for line in puller: self.notify_message(line) print(line)
def action_build(builder={}): '''build a container from a generated recipe. ''' # Post indicates browsing the tree, with an additional path to parse if request.method == "POST": uri = request.form.get('uri') recipe = request.form.get('recipe').replace('\r','') app.logger.info('BUILD: %s' %uri) # Clean up old recipes cleanup('/tmp', "^Singularity.*") recipe = write_temp_recipe(recipe) if recipe is not None: # When whiteout but is fixed with Singularity... image, builder = Client.build(recipe=recipe, robot_name=True, sudo=False, stream=True) app.logger.info('build %s' %image) builder = itertools.chain(builder, [image]) return Response(builder, mimetype='text/plain')
def extract_content(image_path, member_name, return_hash=False): '''extract_content will extract content from an image using cat. If hash=True, a hash sum is returned instead ''' if member_name.startswith('./'): member_name = member_name.replace('.','',1) if return_hash: hashy = hashlib.md5() try: content = Client.execute(image_path,'cat %s' %(member_name)) except: return None if not isinstance(content,bytes): content = content.encode('utf-8') content = bytes(content) # If permissions don't allow read, return None if len(content) == 0: return None if return_hash: hashy.update(content) return hashy.hexdigest() return content
def action_pull(): '''the fetch view to perform the pull, and return a response ''' # Ensure uri appears once container = request.form.get('uri') # ubuntu:latest uri = request.form.get('endpoint') # docker:// container = '%s%s' % (uri, container.replace(uri, '')) app.logger.info("PULL for %s" % container) # If nvidia is used, use sregistry client if 'nvidia' in uri: nvidia = app.sregistry._get_setting('SREGISTRY_NVIDIA_TOKEN') # Make sure we use the right client os.environ['SREGISTRY_CLIENT'] = uri.replace('://', '') os.environ.putenv('SREGISTRY_CLIENT', uri.replace('://', '')) from sregistry.main import get_client client = get_client(image=container) app.logger.info("Using client %s" % client.client_name) try: puller = client.pull(container, force=True) except: puller = '''ERROR: manifest unknown, or pull error. Use docker-compose logs web to see issue!''' else: # We will stream the response back! image, puller = Client.pull(container, stream=True, pull_folder='/tmp') puller = itertools.chain(puller, [image]) return Response(puller, mimetype='text/plain')
def test_build_from_docker(tmp_path): container = str(tmp_path / "container.sif") created_container = Client.build( "docker://busybox:1.30.1", image=container, sudo=False ) assert created_container == container assert os.path.exists(created_container)
def singularity_exists(self): """Check whether an instance exists or not. """ instances = Client.instances(quiet=self.quiet) for instance in instances: if self.pid in instance.name: return True return False
def run_singularity_container_executable(uuid, container_image, executable_path, logs_path, args_dict): """Launch an executable within a Singularity container. Args: uuid: A string containing the uuid of the job being run. container_image: A string containing the name of the container to pull. executable_path: A string containing the path of the executable to execute within the container. logs_path: A string (or None) containing the path of the directory containing the relevant logs within the container. args_dict: A dictionary containing arguments and corresponding values. """ # Import Singularity library from spython.main import Client as client # Pull the specified container. This pull in the latest version of # the container (with the specified tag if provided). singularity_image = client.pull( image=container_image, pull_folder=os.environ['WORKER_SINGULARITY_IMAGES_DIRECTORY'], ) # Run the executable with the arguments if logs_path is None: bind_option = None else: # Create the host path. This is required by the Singularity # library (though not the Docker library) host_path = os.path.join(os.environ['WORKER_LOGS_DIRECTORY'], uuid) os.makedirs(host_path, exist_ok=True) # Build the bind option to pass on to Singularity bind_option = host_path.rstrip('/') + ":" + logs_path.rstrip('/') client.execute( image=singularity_image, command=[executable_path, json.dumps(args_dict)], bind=bind_option, ) # TODO give a real return value return "FINISHED"
def get_metadata(self, image_file, names=None): '''extract metadata using Singularity inspect, if the executable is found. If not, return a reasonable default (the parsed image name) Parameters ========== image_file: the full path to a Singularity image names: optional, an extracted or otherwise created dictionary of variables for the image, likely from utils.parse_image_name ''' if names is None: names = {} metadata = {} # We can't return anything without image_file or names if image_file: if not os.path.exists(image_file): bot.error('Cannot find %s.' % image_file) return names or metadata # The user provided a file, but no names if not names: names = parse_image_name(remove_uri(image_file)) # Look for the Singularity Executable singularity = which('singularity')['message'] # Inspect the image, or return names only if os.path.exists(singularity) and image_file: from spython.main import Client as Singularity # Store the original quiet setting is_quiet = Singularity.quiet # We try and inspect, but not required (wont work within Docker) try: Singularity.quiet = True updates = Singularity.inspect(image=image_file) except: bot.warning( 'Inspect command not supported, metadata not included.') updates = None # Restore the original quiet setting Singularity.quiet = is_quiet # Try loading the metadata if updates is not None: try: updates = json.loads(updates) metadata.update(updates) except: pass metadata.update(names) return metadata
def get_instance(instanceDict): try: t_ins = Client.instance(image=instanceDict["image"], name=instanceDict["ins_name"], options=instanceDict["options"]) return t_ins except Exception as e: logging.error(e) return None
def test_AFNI_libraries(): SINGULARITY_IMAGE_PATH = '/home/circleci/project/C-PAC-CI.simg' if not os.path.exists(SINGULARITY_IMAGE_PATH): try: SINGULARITY_IMAGE_PATH = [ d for d in os.listdir(str(Path(__file__).parent.parent.parent)) if (d.endswith('.simg') or d.endswith('.sif')) ][0] except: raise Exception("Singularity image not in expected location.") if os.path.exists(SINGULARITY_IMAGE_PATH): afni_libraries = Client.execute( Client.instance(SINGULARITY_IMAGE_PATH), ['./dev/circleci_data/AFNI_libraries.sh']) assert "not found" not in afni_libraries, '\n'.join([ line.strip() for line in afni_libraries.split('\n') if "not found" in line ])
def singularity_run(image_path, path_bind, args_list, container_dir='/tmp', return_results=True, use_ephemeral_space=False, dry_run=False): ''' A wrapper module for running singularity based containers :param image_path: Singularrity image path :param path_bind: Path to bind to singularity /tmp dir :param args_list: List of args for singulatiy run :param return_results: Return singulatiy run results, default True :param use_ephemeral_space: Toggle for using ephemeral space for temp dir, default False :param dry_run: Return the singularity command without run, default False :returns: A response from container run and a string containing singularity command line ''' try: check_file_path(image_path) check_file_path(path_bind) temp_dir = get_temp_dir(use_ephemeral_space=use_ephemeral_space) res = None temp_image_path = \ os.path.join( temp_dir, os.path.basename(image_path)) copy_local_file(image_path, temp_image_path) # copy image to tmp dir if not isinstance(args_list,list) and \ len(args_list) > 0: raise ValueError( 'No args provided for singularity run') # safemode args = ' '.join(args_list) # flatten args singularity_run_cmd = \ 'singularity run {0} --bind {1}:{2} {3}'.\ format( temp_image_path, path_bind, container_dir, args) if dry_run: return res, singularity_run_cmd else: res = \ Client.run( image=temp_image_path, bind='{0}:{1}'.format(path_bind,container_dir), args=args, return_result=return_results) remove_dir(temp_dir) # remove copied image after run return res, singularity_run_cmd except Exception as e: raise ValueError( 'Failed to run image {0}, error: {1}'.\ format(image_path,e))
read_file, write_file ) from spython.main import Client import unittest import tempfile import shutil import json import os print("######################################################## test_reproduce") # Pull images for all tests tmpdir = tempfile.mkdtemp() image1 = Client.pull('docker://ubuntu:14.04', pull_folder=tmpdir) image2 = Client.pull('docker://busybox:1', pull_folder=tmpdir) class TestReproduce(unittest.TestCase): def setUp(self): self.pwd = get_installdir() self.tmpdir = tmpdir self.image1 = image1 self.image2 = image2 def tearDown(self): pass def test_get_image_hashes(self): from singularity.analysis.reproduce import get_image_hashes, get_image_hash
def run_build(build_dir, params, verbose=True): '''run_build takes a build directory and params dictionary, and does the following: - downloads repo to a temporary directory - changes branch or commit, if needed - creates and bootstraps singularity image from Singularity file - returns a dictionary with: image (path), metadata (dict) The following must be included in params: spec_file, repo_url, branch, commit ''' # Download the repository download_repo(repo_url=params['repo_url'], destination=build_dir) os.chdir(build_dir) if params['branch'] != None: bot.info('Checking out branch %s' %params['branch']) os.system('git checkout %s' %(params['branch'])) else: params['branch'] = "master" # Set the debug level Client.debug = params['debug'] # Commit if params['commit'] not in [None,'']: bot.info('Checking out commit %s' %params['commit']) os.system('git checkout %s .' %(params['commit'])) # From here on out commit is used as a unique id, if we don't have one, we use current else: params['commit'] = os.popen('git log -n 1 --pretty=format:"%H"').read() bot.warning("commit not specified, setting to current %s" %params['commit']) # Dump some params for the builder, in case it fails after this passing_params = "/tmp/params.pkl" pickle.dump(params, open(passing_params,'wb')) # Now look for spec file if os.path.exists(params['spec_file']): bot.info("Found spec file %s in repository" %params['spec_file']) # If the user has a symbolic link if os.path.islink(params['spec_file']): bot.info("%s is a symbolic link." %params['spec_file']) params['spec_file'] = os.path.realpath(params['spec_file']) # START TIMING start_time = datetime.now() # Secure Build image = Client.build(recipe=params['spec_file'], build_folder=build_dir, isolated=True) # Save has for metadata (also is image name) version = get_image_file_hash(image) params['version'] = version pickle.dump(params, open(passing_params,'wb')) # Rename image to be hash finished_image = "%s/%s.simg" %(os.path.dirname(image), version) image = shutil.move(image, finished_image) final_time = (datetime.now() - start_time).seconds bot.info("Final time of build %s seconds." %final_time) # Did the container build successfully? test_result = test_container(image) if test_result['return_code'] != 0: bot.error("Image failed to build, cancelling.") sys.exit(1) # Get singularity version singularity_version = Client.version() Client.debug = False inspect = Client.inspect(image) # this is a string Client.debug = params['debug'] # Get information on apps Client.debug = False app_names = Client.apps(image) Client.debug = params['debug'] apps = extract_apps(image, app_names) metrics = {'build_time_seconds': final_time, 'singularity_version': singularity_version, 'singularity_python_version': singularity_python_version, 'inspect': inspect, 'version': version, 'apps': apps} output = {'image':image, 'metadata':metrics, 'params':params } return output else: # Tell the user what is actually there present_files = glob("*") bot.error("Build file %s not found in repository" %params['spec_file']) bot.info("Found files are %s" %"\n".join(present_files)) # Params have been exported, will be found by log sys.exit(1)