def test_write_read_files(self): '''test_write_read_files will test the functions write_file and read_file ''' print("Testing utils.write_file...") from singularity.utils import write_file import json tmpfile = tempfile.mkstemp()[1] os.remove(tmpfile) write_file(tmpfile,"hello!") self.assertTrue(os.path.exists(tmpfile)) print("Testing utils.read_file...") from singularity.utils import read_file content = read_file(tmpfile)[0] self.assertEqual("hello!",content) from singularity.utils import write_json print("Testing utils.write_json...") print("...Case 1: Providing bad json") bad_json = {"Wakkawakkawakka'}":[{True},"2",3]} tmpfile = tempfile.mkstemp()[1] os.remove(tmpfile) with self.assertRaises(TypeError) as cm: write_json(bad_json,tmpfile) print("...Case 2: Providing good json") good_json = {"Wakkawakkawakka":[True,"2",3]} tmpfile = tempfile.mkstemp()[1] os.remove(tmpfile) write_json(good_json,tmpfile) with open(tmpfile,'r') as filey: content = json.loads(filey.read()) self.assertTrue(isinstance(content,dict)) self.assertTrue("Wakkawakkawakka" in content)
def zip_up(file_list,zip_name,output_folder=None): '''zip_up will zip up some list of files into a package (.zip) :param file_list: a list of files to include in the zip. :param output_folder: the output folder to create the zip in. If not :param zip_name: the name of the zipfile to return. specified, a temporary folder will be given. ''' tmpdir = tempfile.mkdtemp() # Make a new archive output_zip = "%s/%s" %(tmpdir,zip_name) zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True) # Write files to zip, depending on type for filename,content in file_list.items(): bot.debug("Adding %s to package..." %filename) # If it's the files list, move files into the archive if filename.lower() == "files": if not isinstance(content,list): content = [content] for copyfile in content: zf.write(copyfile,os.path.basename(copyfile)) os.remove(copyfile) else: output_file = "%s/%s" %(tmpdir, filename) # If it's a list, write to new file, and save if isinstance(content,list): write_file(output_file,"\n".join(content)) # If it's a dict, save to json elif isinstance(content,dict): write_json(content,output_file) # If bytes, need to decode elif isinstance(content,bytes): write_file(output_file,content.decode('utf-8')) # String or other else: output_file = write_file(output_file,content) if os.path.exists(output_file): zf.write(output_file,filename) os.remove(output_file) # Close the zip file zf.close() if output_folder is not None: shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name)) shutil.rmtree(tmpdir) output_zip = "%s/%s"%(output_folder,zip_name) return output_zip
def get_boutiques_json(name,version,inputs,command=None,description=None,output_file=None, schema_version='0.2-snapshot',docker_image=None,docker_index=None): '''get_boutique returns a Boutiques data structure (json) to describe meta data for a singularity container. The function is not singularity specific and can be used for other cases. :param name: the name of the container / object to export :param version: the version of the container, should be some hash/uid for the container :param inputs: should be a dictionary with keys flags, required, default, type, choices, description, see note below. :param command: the command to use on command line for the object / container, if not provided, will use name :param description: a description of the container / object to export :param docker_image: Name of the Docker image for the object [OPTIONAL] :param docker_index: Docker index where the Docker image is stored (e.g. http://index.docker.io) [OPTIONAL] ::note inputs A list of {'flags':[...], 'name':'', 'required':True/False, 'default': .., 'type':bool, 'choices':OPTIONAL, 'description':'This is...']} see singularity.runscript.get_parameters for parsing different executable files for input args ''' if command == None: command = name # Meta data for spec json_spec = {'name': name} json_spec['command-line'] = "%s " %(command) json_spec['tool-version'] = version json_spec['schema-version'] = schema_version # Description if description == None: json_spec['description'] = "%s is a singularity container that can be integrated into a workflow." %(name) else: json_spec['description'] = "%s is a singularity container that can be integrated into a workflow: %s" %(name,description) # Inputs and outputs json_spec['inputs'] = [] json_spec['outputs'] = [] # Containers (not useful, we aren't using docker) if docker_image: tool_desc['docker-image'] = docker_image if docker_index: tool_desc['docker-index'] = docker_index # Add inputs for arg in inputs: new_input = get_boutiques_input(arg) json_spec['inputs'].append(new_input) json_spec['command-line'] += "%s " %(new_input['command-line-key']) # Currently, we don't know outputs - they are TBD using singularity-hub # not clear if we can determine from container, or generating on cluster # Writes JSON string to file if output_file != None: write_json(json_spec,output_file) return json_spec
) levels = get_levels() # Let's assess what files are identical across pairs of images in different sets # Singularity Hub (replicate): meaning same base os, different build host # These should be equal for base, environment, runscript, replicate, but # not identical. os.chdir(replication) image_files = glob('*.img') diffs = assess_differences(image_files[0],image_files[1],levels=levels) print("SINGULARITY HUB REPLICATES") print(diffs) write_json(diffs,'%s/diff_hub_replicates_pair.json' %base) # Local Replicate: if we produce an equivalent image at a different time, we might have # variance in package directories (anything involving variable with mirrors, etc) # these images should also be assessed as equivalent on the level of runscript, # environment, base, replicate, labels, but not identical. They should be MORE # identical than the Singularity Hub replicate by way of being produced on the same host. os.chdir(replicates) image_files = glob('*.img') diffs = assess_differences(image_files[0],image_files[1],levels=levels) print("LOCAL REPLICATES") print(diffs) write_json(diffs,'%s/diff_local_replicates_pair.json' %base) # Identical: all files are the same
def zip_up(file_list, zip_name, output_folder=None): '''zip_up will zip up some list of files into a package (.zip) :param file_list: a list of files to include in the zip. :param output_folder: the output folder to create the zip in. If not :param zip_name: the name of the zipfile to return. specified, a temporary folder will be given. ''' tmpdir = tempfile.mkdtemp() # Make a new archive output_zip = "%s/%s" % (tmpdir, zip_name) zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True) # Write files to zip, depending on type for filename, content in file_list.items(): bot.debug("Adding %s to package..." % filename) # If it's the files list, move files into the archive if filename.lower() == "files": if not isinstance(content, list): content = [content] for copyfile in content: zf.write(copyfile, os.path.basename(copyfile)) os.remove(copyfile) else: output_file = "%s/%s" % (tmpdir, filename) # If it's a list, write to new file, and save if isinstance(content, list): write_file(output_file, "\n".join(content)) # If it's a dict, save to json elif isinstance(content, dict): write_json(content, output_file) # If bytes, need to decode elif isinstance(content, bytes): write_file(output_file, content.decode('utf-8')) # String or other else: output_file = write_file(output_file, content) if os.path.exists(output_file): zf.write(output_file, filename) os.remove(output_file) # Close the zip file zf.close() if output_folder is not None: shutil.copyfile(output_zip, "%s/%s" % (output_folder, zip_name)) shutil.rmtree(tmpdir) output_zip = "%s/%s" % (output_folder, zip_name) return output_zip
entity_folder = "%s/%s" % (base, entity) text_folder = "%s/text" % entity_folder if not os.path.exists(entity_folder): os.mkdir(entity_folder) os.mkdir(text_folder) repo_folder = "%s/%s" % (text_folder, repo_name) if not os.path.exists(repo_folder): os.mkdir(repo_folder) dockerfile = "%s/Dockerfile" % (repo_folder) os.remove(dockerfile) dockerfile = "%s/Dockerfile.txt" % (repo_folder) with open(dockerfile, 'w') as filey: filey.writelines(content) # Now let's use Singularity client to generate a manifest (metadata) for each os.chdir( '/home/vanessa/Documents/Dropbox/Code/singularity/singularity/libexec/python' ) from singularity.utils import write_json from docker.api import * for name, content in dockerfiles.items(): entity, repo_name = name.split('/') repo_folder = "%s/%s/text/%s" % (base, entity, repo_name) client = DockerApiConnection(image=name) manifestv1 = client.get_manifest(old_version=True) manifest = client.get_manifest() manifests = {'v1': manifestv1, 'v2': manifest} metadata_file = "%s/Dockerfile.json" % (repo_folder) write_json(manifests, metadata_file)
def docker2singularity(self,docker_image,output_dir=None): '''docker2singularity will convert a docker image to singularity :param docker_image: the name of the docker image, doesn't have to be on local machine (eg, ubuntu:latest) ''' if check_install('docker') == True: sudo = True cmd = ['docker','run','-d',docker_image,'tail','-f','/dev/null'] runningid = self.run_command(cmd,sudo=sudo) # sha256:d59bdb51bb5c4fb7b2c8d90ae445e0720c169c553bcf553f67cb9dd208a4ec15 # Take the first 12 characters to get id of container container_id=runningid[0:12] # Network address, if needed cmd = ['docker','inspect','--format="{{.NetworkSettings.IPAddress}}"',container_id] network_addr = self.run_command(cmd,sudo=sudo) # Get image name cmd = ['docker','inspect','--format="{{.Config.Image}}"',container_id] image_name = self.run_command(cmd,sudo=sudo) # Get creation date cmd = ['docker','inspect','--format="{{.Created}}"',docker_image] creation_date = self.run_command(cmd,sudo=sudo)[0:10] # Estimate image size cmd = ['docker','inspect','--format="{{.Size}}"',docker_image] size = int(self.run_command(cmd,sudo=sudo)) # convert size in MB (it seems too small for singularity containers ...?). Add 1MB and round up (minimum). size=int(round(size/1000000.0)+1.0) # adding half of the container size seems to work (do not know why exactly...?) # I think it would be Ok by adding 1/3 of the size. size=size+size/2 # Create the image tmpdir = tempfile.mkdtemp() new_container_name = "%s-%s.img" %(image_name,creation_date) json_file = "%s/singularity.json" %(tmpdir) runscript = "%s/singularity" %(tmpdir) if output_dir != None: new_container_name = "%s/%s" %(output_dir,new_container_name) self.create(image_path=new_container_name,size=size) # export running docker container cmd = ['docker','export',container_id,'|','sudo','singularity','import',new_container_name] self.run_command(cmd,sudo=sudo,suppress=True) # Add stuff about the container to it cmd = ['docker','inspect', container_id] json_data = json.loads(self.run_command(cmd,sudo=sudo)) write_json(json_data,json_file) #TODO: we might be able to extract boutique / make package here # Add singularity.json to container cmd = ['singularity','copy',new_container_name,json_file,'/'] self.run_command(cmd,sudo=sudo) invalid_commands = ["","null","none"] # Bootstrap the image to set up scripts for environemnt setup self.run_command(['singularity','bootstrap',new_container_name],sudo=sudo) self.run_command(['chmod','a+rw','-R',tmpdir],sudo=sudo) # Runscript command = self.run_command(['docker','inspect','--format="{{json .Config.Cmd}}"',docker_image],sudo=sudo) if command not in invalid_commands: command = "/bin/sh -c %s" %(command) # Remove quotes, commas, and braces command = re.sub('"|[[]|[]]',"",command) # Get the entrypoint entrypoint = self.run_command(['docker','inspect','--format="{{json .Config.Entrypoint}}"',docker_image],sudo=sudo) if entrypoint not in invalid_commands: entrypoint = "/bin/sh -c %s" %(entrypoint) # Remove quotes, commas, and braces entrypoint = re.sub('"|[[]|[]]',"",entrypoint) runscript = "%s/singularity" %(tmpdir) runscript_file = open(runscript,'w') runscript_file.writelines('#!/bin/sh\n') if entrypoint not in invalid_commands: runscript_file.writelines('%s $@' %(entrypoint)) else: if command not in invalid_commands: runscript_file.writelines('%s $@' %(command)) runscript_file.close() self.run_command(['chmod','+x',runscript],sudo=sudo) self.run_command(['singularity','copy',new_container_name,runscript,'/'],sudo=sudo) # Set up the environment docker_environment = "%s/docker_environment" %(tmpdir) environment = self.run_command(['docker','run','--rm','--entrypoint="/usr/bin/env"',docker_image],sudo=sudo).split('\n') # don't include HOME and HOSTNAME - they mess with local config environment = [x for x in environment if not re.search("^HOME",x) and not re.search("^HOSTNAME",x)] write_file(docker_environment,"\n".join(environment)) # Write to container self.run_command(["chmod","u+x",docker_environment],sudo=sudo) self.run_command(['singularity','copy',new_container_name,docker_environment,'/'],sudo=sudo) self.run_command(['singularity','exec','--writable',new_container_name,'/bin/sh -c ',docker_environment,'/'],sudo=sudo) cmd = """singularity exec --writable %s /bin/sh -c "echo '. /docker_environment' >> /environment" """ %(new_container_name) self.run_command([cmd.replace('\n','')],sudo=sudo,suppress=True) shutil.rmtree(tmpdir) # Permissions - make sure any user can execute everything in container cmd = """singularity exec --writable --contain %s /bin/sh -c "find /* -maxdepth 0 -not -path '/dev*' -not -path '/proc*' -not -path '/sys*' -exec chmod a+r -R '{}' \;" """ %(new_container_name) self.run_command([cmd.replace('\n','')],sudo=sudo,suppress=True) cmd = """singularity exec --writable --contain %s /bin/sh -c "find / -executable -perm -u+x,o-x -not -path '/dev*' -not -path '/proc*' -not -path '/sys*' -exec chmod a+x '{}' \;" """ %(new_container_name) self.run_command([cmd.replace('\n','')],sudo=sudo,suppress=True) print("Stopping container... please wait!") self.run_command(['docker','stop',container_id],sudo=sudo) return new_container_name else: return None
def docker2singularity(self,docker_image,output_dir=None): '''docker2singularity will convert a docker image to singularity :param docker_image: the name of the docker image, doesn't have to be on local machine (eg, ubuntu:latest) ''' if check_install('docker') == True: sudo = True cmd = ['docker','run','-d',docker_image,'tail','-f','/dev/null'] runningid = self.run_command(cmd,sudo=sudo) # sha256:d59bdb51bb5c4fb7b2c8d90ae445e0720c169c553bcf553f67cb9dd208a4ec15 # Take the first 12 characters to get id of container container_id=runningid[0:12] # Network address, if needed cmd = ['docker','inspect','--format="{{.NetworkSettings.IPAddress}}"',container_id] network_addr = self.run_command(cmd,sudo=sudo) # Get image name cmd = ['docker','inspect','--format="{{.Config.Image}}"',container_id] image_name = self.run_command(cmd,sudo=sudo) # Get creation date cmd = ['docker','inspect','--format="{{.Created}}"',docker_image] creation_date = self.run_command(cmd,sudo=sudo)[0:10] # Estimate image size cmd = ['docker','inspect','--format="{{.Size}}"',docker_image] size = int(self.run_command(cmd,sudo=sudo)) # convert size in MB (it seems too small for singularity containers ...?). Add 1MB and round up (minimum). size=int(round(size/1000000.0)+1.0) # adding half of the container size seems to work (do not know why exactly...?) # I think it would be Ok by adding 1/3 of the size. size=size+size/2 # Create the image new_container_name = "%s-%s.img" %(image_name,creation_date) json_file = "singularity.json" runscript = "singularity" if output_dir != None: new_container_name = "%s/%s" %(output_dir,new_container_name) json_file = "%s/%s" %(output_dir,json_file) runscript = "%s/%s" %(output_dir,runscript) self.create(image_path=new_container_name,size=size) # export running docker container cmd = ['docker','export',container_id,'|','sudo','singularity','import',new_container_name] self.run_command(cmd,sudo=sudo,suppress=True) # Add stuff about the container to it cmd = ['docker','inspect', container_id] json_data = json.loads(self.run_command(cmd,sudo=sudo)) write_json(json_data,json_file) #TODO: we might be able to extract boutique / make package here # Add singularity.json to container cmd = ['singularity','copy',new_container_name,json_file,'/'] self.run_command(cmd,sudo=sudo) # Merge the /etc/group file, and add to container self.run_command(['docker','cp',"%s:/etc/group" %(container_id),'grouphost'],sudo=sudo) self.run_command(['sort','/etc/group','grouphost','|','uniq','-u','>','group'],sudo=sudo) self.run_command(['singularity','copy',new_container_name,'group','/etc/group'],sudo=sudo) os.remove(json_file) os.remove('grouphost') os.remove('group') # Runscript! Currently disabling - having some bugs with this #cmd = ['docker','inspect',"--format='{{json .Config.Cmd}}'",docker_image] #command = self.run_command(cmd,sudo=sudo) #command = command.replace('["',"").replace('"]',"") #if command != "none": # write_file(runscript,command) # cmd = ['chmod','+x',runscript] # self.run_command(cmd,sudo=sudo) # cmd = ['singularity','copy',new_container_name,runscript,'/'] # self.run_command(cmd,sudo=sudo) # os.remove(runscript) print("Stopping container... please wait!") cmd = ['docker','stop',container_id] self.run_command(cmd,sudo=sudo) return new_container_name else: return None
text_folder = "%s/text" %entity_folder if not os.path.exists(entity_folder): os.mkdir(entity_folder) os.mkdir(text_folder) repo_folder = "%s/%s" %(text_folder,repo_name) if not os.path.exists(repo_folder): os.mkdir(repo_folder) dockerfile = "%s/Dockerfile" %(repo_folder) os.remove(dockerfile) dockerfile = "%s/Dockerfile.txt" %(repo_folder) with open(dockerfile,'w') as filey: filey.writelines(content) # Now let's use Singularity client to generate a manifest (metadata) for each os.chdir('/home/vanessa/Documents/Dropbox/Code/singularity/singularity/libexec/python') from singularity.utils import write_json from docker.api import * for name,content in dockerfiles.items(): entity,repo_name = name.split('/') repo_folder = "%s/%s/text/%s" %(base,entity,repo_name) client = DockerApiConnection(image=name) manifestv1 = client.get_manifest(old_version=True) manifest = client.get_manifest() manifests = {'v1':manifestv1, 'v2':manifest} metadata_file = "%s/Dockerfile.json" %(repo_folder) write_json(manifests,metadata_file)