def _update_secrets(self, base=None): '''update secrets will update/get the base for the server, along with the bucket name, defaulting to sregistry. ''' # We are required to have a base, either from environment or terminal self.base = self._get_and_update_setting('SREGISTRY_S3_BASE', self.base) self._id = self._get_and_update_setting('AWS_ACCESS_KEY_ID') self._key = self._get_and_update_setting('AWS_SECRET_ACCESS_KEY') if not self._id or not self._key: bot.warning( "Accessing the bucket anonymously. Consider defining AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY if access fails." ) # Get the desired S3 signature. Default is the current "s3v4" signature. # If specified, user can request "s3" (v2 old) signature self._signature = self._get_and_update_setting( 'SREGISTRY_S3_SIGNATURE') if self._signature == 's3': # Requested signature is S3 V2 self._signature = 's3' else: # self._signature is not set or not set to s3 (v2), default to s3v4 self._signature = 's3v4' # Define self.bucket_name, self.s3, then self.bucket self.get_bucket_name() self.get_resource() self.get_bucket()
def get_ipaddress(self, name, retries=3, delay=3): '''get the ip_address of an inserted instance. Will try three times with delay to give the instance time to start up. Parameters ========== name: the name of the instance to get the ip address for. retries: the number of retries before giving up delay: the delay between retry Note from @vsoch: this function is pretty nasty. ''' for rr in range(retries): # Retrieve list of instances instances = self._get_instances() for instance in instances['items']: if instance['name'] == name: # Iterate through network interfaces for network in instance['networkInterfaces']: if network['name'] == 'nic0': # Access configurations for subnet in network['accessConfigs']: if subnet['name'] == 'External NAT': if 'natIP' in subnet: return subnet['natIP'] sleep(delay) bot.warning('Did not find IP address, check Cloud Console!')
def get_uri(image): '''get the uri for an image, if within acceptable Parameters ========== image: the image uri, in the format <uri>://<registry>/<namespace>:<tag> ''' # Ensure we have a string image = image or '' # Find uri prefix, including :// regexp = re.compile('^.+://') uri = regexp.match(image) if uri is not None: uri = (uri.group().lower().replace('_', '-').replace('://', '')) accepted_uris = [ 'aws', 'docker', 'dropbox', 'gitlab', 'globus', 'google-build', 'google-storage', 'google-drive' 'hub', 'nvidia', 'registry', 's3', 'swift' ] # Allow for Singularity compatability if uri == "shub": uri = "hub" if uri not in accepted_uris: bot.warning('%s is not a recognized uri.' % uri) uri = None return uri
def download(url, file_name, headers=None, show_progress=True): '''stream to a temporary file, rename on successful completion Parameters ========== file_name: the file name to stream to url: the url to stream from headers: additional headers to add ''' fd, tmp_file = tempfile.mkstemp(prefix=("%s.tmp." % file_name)) os.close(fd) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') verify = not DISABLE_SSL_CHECK # Does the url being requested exist? if requests.head(url, verify=verify).status_code in [200, 401]: response = stream(url, headers=headers, stream_to=tmp_file) if isinstance(response, HTTPError): bot.exit("Error downloading %s, exiting." %url) shutil.move(tmp_file, file_name) else: bot.error("Invalid url or permissions %s" %url) return file_name
def get_ipaddress(self, name, retries=3, delay=3): """get the ip_address of an inserted instance. Will try three times with delay to give the instance time to start up. Parameters ========== name: the name of the instance to get the ip address for. retries: the number of retries before giving up delay: the delay between retry Note from @vsoch: this function is pretty nasty. """ for _ in range(retries): # Retrieve list of instances instances = self._get_instances() for instance in instances["items"]: if instance["name"] == name: # Iterate through network interfaces for network in instance["networkInterfaces"]: if network["name"] == "nic0": # Access configurations for subnet in network["accessConfigs"]: if subnet["name"] == "External NAT": if "natIP" in subnet: return subnet["natIP"] sleep(delay) bot.warning("Did not find IP address, check Cloud Console!")
def main(args, parser, extra): from sregistry.main import get_client if args.name is None: msg = "You must add the --name of a container uri to build." bot.exit(msg) cli = get_client(image=args.name, quiet=args.quiet) cli.announce(args.command) # If the client doesn't have the command, exit if not hasattr(cli, "build"): msg = "build is not implemented for %s. Why don't you add it?" bot.exit(msg % cli.client_name) # Singularity Registry Server uses build with a recipe if cli.client_name == "google-build": response = run_google_build(cli, args) elif cli.client_name == "google-storage": response = run_compute_build(cli, args) # Currently allows for google_build else: bot.warning( "No URI specified, assuming Singularity Registry with Builder") response = run_registry_build(cli, args, extra) # If the client wants to preview, the config is returned if args.preview: print(json.dumps(response, indent=4, sort_keys=True))
def get_singularity_version(singularity_version=None): '''get_singularity_version will determine the singularity version for a build first, an environmental variable is looked at, followed by using the system version. Parameters ========== singularity_version: if not defined, look for in environment. If still not find, try finding via executing --version to Singularity. Only return None if not set in environment or installed. ''' if singularity_version is None: singularity_version = os.environ.get("SINGULARITY_VERSION") if singularity_version is None: try: cmd = ['singularity','--version'] output = run_command(cmd) if isinstance(output['message'],bytes): output['message'] = output['message'].decode('utf-8') singularity_version = output['message'].strip('\n') bot.info("Singularity %s being used." % singularity_version) except: singularity_version = None bot.warning("Singularity version not found, so it's likely not installed.") return singularity_version
def get_template(name): """return a default template for some function in sregistry If there is no template, None is returned. Parameters ========== name: the name of the template to retrieve """ name = name.lower() templates = dict() templates["tarinfo"] = { "gid": 0, "uid": 0, "uname": "root", "gname": "root", "mode": 493, } if name in templates: bot.debug("Found template for %s" % (name)) return templates[name] else: bot.warning("Cannot find template %s" % (name))
def get_metadata(self, image_file, names=None): '''extract metadata using Singularity inspect, if the executable is found. If not, return a reasonable default (the parsed image name) Parameters ========== image_file: the full path to a Singularity image names: optional, an extracted or otherwise created dictionary of variables for the image, likely from utils.parse_image_name ''' if names is None: names = {} metadata = {} # We can't return anything without image_file or names if image_file: if not os.path.exists(image_file): bot.error('Cannot find %s.' % image_file) return names or metadata # The user provided a file, but no names if not names: names = parse_image_name(remove_uri(image_file)) # Look for the Singularity Executable singularity = which('singularity')['message'] # Inspect the image, or return names only if os.path.exists(singularity) and image_file: from spython.main import Client as Singularity # Store the original quiet setting is_quiet = Singularity.quiet # We try and inspect, but not required (wont work within Docker) try: Singularity.quiet = True updates = Singularity.inspect(image=image_file) except: bot.warning( 'Inspect command not supported, metadata not included.') updates = None # Restore the original quiet setting Singularity.quiet = is_quiet # Try loading the metadata if updates is not None: try: updates = json.loads(updates) metadata.update(updates) except: pass metadata.update(names) return metadata
def cp(self, move_to, image_name=None, container=None, command="copy"): '''_cp is the shared function between mv (move) and rename, and performs the move, and returns the updated container Parameters ========== image_name: an image_uri to look up a container in the database container: the container object to move (must have a container.image move_to: the full path to move it to ''' if container is None and image_name is None: bot.error('A container or image_name must be provided to %s' % command) sys.exit(1) # If a container isn't provided, look for it from image_uri if container is None: container = self.get(image_name, quiet=True) image = container.image or '' if os.path.exists(image): filedir = os.path.dirname(move_to) # If the two are the same, doesn't make sense if move_to == image: bot.warning('%s is already the name.' % image) sys.exit(1) # Ensure directory exists if not os.path.exists(filedir): bot.error('%s does not exist. Ensure exists first.' % filedir) sys.exit(1) # Ensure writable for user if not os.access(filedir, os.W_OK): bot.error('%s is not writable' % filedir) sys.exit(1) original = os.path.basename(image) try: shutil.move(image, move_to) container.image = move_to self.session.commit() bot.info('[%s] %s => %s' % (command, original, move_to)) return container except: bot.error('Cannot %s %s to %s' % (command, original, move_to)) sys.exit(1) bot.warning('''This operation is not permitted on a remote image. Please pull %s and then %s to the appropriate location.''' % (container.uri, command))
def create_metadata_tar(self, destination=None, metadata_folder=".singularity.d"): '''create a metadata tar (runscript and environment) to add to the downloaded image. This function uses all functions in this section to obtain key--> values from the manifest config, and write to a .tar.gz Parameters ========== metadata_folder: the metadata folder in the singularity image. default is .singularity.d ''' tar_file = None # We will add these files to it files = [] # Extract and add environment environ = self._extract_env() if environ not in [None, ""]: bot.verbose3('Adding Docker environment to metadata tar') template = get_template('tarinfo') template['name'] = './%s/env/10-docker.sh' % (metadata_folder) template['content'] = environ files.append(template) # Extract and add labels labels = self._extract_labels() if labels is not None: labels = print_json(labels) bot.verbose3('Adding Docker labels to metadata tar') template = get_template('tarinfo') template['name'] = "./%s/labels.json" % metadata_folder template['content'] = labels files.append(template) # Runscript runscript = self._extract_runscript() if runscript is not None: bot.verbose3('Adding Docker runscript to metadata tar') template = get_template('tarinfo') template['name'] = "./%s/runscript" % metadata_folder template['content'] = runscript files.append(template) if len(files) > 0: dest = self._get_download_cache(destination, subfolder='metadata') tar_file = create_tar(files, dest) else: bot.warning("No metadata will be included.") return tar_file
def verify(self): ''' verify will return a True or False to determine to verify the requests call or not. If False, we should the user a warning message, as this should not be done in production! ''' from sregistry.defaults import DISABLE_SSL_CHECK if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') return not DISABLE_SSL_CHECK
def stream(url, headers, stream_to=None, retry=True): """stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. """ bot.debug("GET %s" % url) if DISABLE_SSL_CHECK is True: bot.warning("Verify of certificates disabled! ::TESTING USE ONLY::") # Ensure headers are present, update if not response = requests.get(url, headers=headers, verify=not DISABLE_SSL_CHECK, stream=True) # Deal with token if necessary if response.status_code == 401 and retry is True: headers = update_token(response, headers) return stream(url, headers, stream_to, retry=False) if response.status_code == 200: # Keep user updated with Progress Bar content_size = None if "Content-Length" in response.headers: progress = 0 content_size = int(response.headers["Content-Length"]) bot.show_progress(progress, content_size, length=35) chunk_size = 1 << 20 with open(stream_to, "wb") as filey: for chunk in response.iter_content(chunk_size=chunk_size): filey.write(chunk) if content_size is not None: progress += chunk_size bot.show_progress( iteration=progress, total=content_size, length=35, carriage_return=False, ) # Newline to finish download sys.stdout.write("\n") return stream_to bot.exit("Problem with stream, response %s" % response.status_code)
def stream(url, headers, stream_to=None, retry=True): '''stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. ''' bot.debug("GET %s" % url) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') # Ensure headers are present, update if not response = requests.get(url, headers=headers, verify=not DISABLE_SSL_CHECK, stream=True) # If we get permissions error, one more try with updated token if response.status_code in [401, 403]: headers = update_token(headers) return stream(url, headers, stream_to, retry=False) # Successful Response elif response.status_code == 200: # Keep user updated with Progress Bar content_size = None if 'Content-Length' in response.headers: progress = 0 content_size = int(response.headers['Content-Length']) bot.show_progress(progress,content_size,length=35) chunk_size = 1 << 20 with open(stream_to,'wb') as filey: for chunk in response.iter_content(chunk_size=chunk_size): filey.write(chunk) if content_size is not None: progress+=chunk_size bot.show_progress(iteration=progress, total=content_size, length=35, carriage_return=False) # Newline to finish download sys.stdout.write('\n') return stream_to bot.error("Problem with stream, response %s" %(response.status_code)) sys.exit(1)
def cp(self, move_to, image_name=None, container=None, command="copy"): """_cp is the shared function between mv (move) and rename, and performs the move, and returns the updated container Parameters ========== image_name: an image_uri to look up a container in the database container: the container object to move (must have a container.image move_to: the full path to move it to """ if not container and not image_name: bot.exit("A container or image_name must be provided to %s" % command) # If a container isn't provided, look for it from image_uri if not container: container = self.get(image_name, quiet=True) image = container.image or "" if os.path.exists(image): filedir = os.path.dirname(move_to) # If the two are the same, doesn't make sense if move_to == image: bot.exit("%s is already the name." % image) # Ensure directory exists if not os.path.exists(filedir): bot.exit("%s does not exist. Ensure exists first." % filedir) # Ensure writable for user if not os.access(filedir, os.W_OK): bot.exit("%s is not writable" % filedir) original = os.path.basename(image) try: shutil.move(image, move_to) container.image = move_to self.session.commit() bot.info("[%s] %s => %s" % (command, original, move_to)) return container except: bot.exit("Cannot %s %s to %s" % (command, original, move_to)) bot.warning("""Not found! Please pull %s and then %s to the appropriate location.""" % (container.uri, command))
def get_uri(image, validate=True): """get the uri for an image, if within acceptable Parameters ========== image: the image uri, in the format <uri>://<registry>/<namespace>:<tag> validate: if True, check if uri is in list of supported (default True) """ # Ensure we have a string image = image or "" # Find uri prefix, including :// regexp = re.compile("^.+://") uri = regexp.match(image) if uri is not None: uri = uri.group().lower().replace("_", "-").replace("://", "") accepted_uris = [ "aws", "docker", "http", "https", # Must be allowed for pull "dropbox", "gitlab", "globus", "google-build", "google-storage", "google-drive", "hub", "nvidia", "registry", "s3", "swift", ] # Allow for Singularity compatability if "shub" in uri: uri = "hub" if validate is True and uri not in accepted_uris: bot.warning("%s is not a recognized uri." % uri) uri = None return uri
def search_all(self, quiet=False): """a "show all" search that doesn't require a query Parameters ========== quiet: if quiet is True, we only are using the function to return rows of results. """ results = [] for obj in self.bucket.objects.all(): subsrc = obj.Object() # Metadata bug will capitalize all fields, workaround is to lowercase # https://github.com/boto/boto3/issues/1709 try: metadata = dict((k.lower(), v) for k, v in subsrc.metadata.items()) except botocore.exceptions.ClientError as e: bot.warning("Could not get metadata for {}: {}".format( subsrc.key, str(e))) continue size = "" # MM-DD-YYYY datestr = "%s-%s-%s" % ( obj.last_modified.month, obj.last_modified.day, obj.last_modified.year, ) if "sizemb" in metadata: size = "%sMB" % metadata["sizemb"] results.append([obj.key, datestr, size]) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) if not quiet: bot.info("Containers") bot.table(results) return results
def get_build_template(name="singularity-cloudbuild-local.json"): """get default build template. Parameters ========== name: singularity-cloudbuild-local.json (default) that will build a container interactively, waiting for the build to finish. singularity-cloudbuild-git.json build a recipe from a GitHub repository. """ base = get_installdir() name = "%s/main/templates/build/%s" % (base, name) if os.path.exists(name): bot.debug("Found template %s" % name) return read_json(name) bot.warning("Template %s not found." % name)
def _update_secrets(self): """update secrets will take a secrets credential file either located at .sregistry or the environment variable SREGISTRY_CLIENT_SECRETS and update the current client secrets as well as the associated API base. For the case of using Docker Hub, if we find a .docker secrets file, we update from there. """ # If the user has defined secrets, use them credentials = self._get_setting("SREGISTRY_DOCKERHUB_SECRETS") # First try for SINGULARITY exported, then try sregistry username = self._get_setting("SINGULARITY_DOCKER_USERNAME") password = self._get_setting("SINGULARITY_DOCKER_PASSWORD") username = self._get_setting("SREGISTRY_DOCKERHUB_USERNAME", username) password = self._get_setting("SREGISTRY_DOCKERHUB_PASSWORD", password) # Option 1: the user exports username and password auth = None if username is not None and password is not None: auth = basic_auth_header(username, password) self.headers.update(auth) # Option 2: look in .docker config file if credentials is not None and auth is None: if os.path.exists(credentials): credentials = read_json(credentials) # Find a matching auth in .docker config if "auths" in credentials: for auths, params in credentials["auths"].items(): if self._base in auths: if "auth" in params: auth = "Basic %s" % params["auth"] self.headers["Authorization"] = auth # Also update headers if "HttpHeaders" in credentials: for key, value in credentials["HttpHeaders"].items(): self.headers[key] = value else: bot.warning("Credentials file set to %s, but does not exist.")
def rename(self, image_name, path): '''rename performs a move, but ensures the path is maintained in storage Parameters ========== image_name: the image name (uri) to rename to. path: the name to rename (basename is taken) ''' container = self.get(image_name, quiet=True) if container is not None: if container.image is not None: # The original directory for the container stays the same dirname = os.path.dirname(container.image) # But we derive a new filename and uri names = parse_image_name(remove_uri(path)) storage = os.path.join(self.storage, os.path.dirname(names['storage'])) # This is the collection folder if not os.path.exists(storage): os.mkdir(storage) # Here we get the new full path, rename the container file fullpath = os.path.abspath(os.path.join(dirname, names['storage'])) container = self.cp(move_to=fullpath, container=container, command="rename") # On successful rename of file, update the uri if container is not None: container.uri = names['uri'] self.session.commit() return container bot.warning('%s not found' % (image_name))
def download(url, file_name, headers=None, show_progress=True): """stream to a temporary file, rename on successful completion Parameters ========== file_name: the file name to stream to url: the url to stream from headers: additional headers to add """ fd, tmp_file = tempfile.mkstemp(prefix=("%s.tmp." % file_name)) os.close(fd) if DISABLE_SSL_CHECK is True: bot.warning("Verify of certificates disabled! ::TESTING USE ONLY::") stream(url, headers=headers, stream_to=tmp_file) shutil.move(tmp_file, file_name) return file_name
def get_build_template(name=None, manager="apt"): """get a particular build template, by default we return templates that are based on package managers. Parameters ========== name: the full path of the template file to use. manager: the package manager to use in the template (yum or apt) """ base = get_installdir() if name is None: name = "%s/main/templates/build/singularity-builder-%s.sh" % (base, manager) if os.path.exists(name): bot.debug("Found template %s" % name) return "".join(read_file(name)) bot.warning("Template %s not found." % name)
def rm(self, image_name): '''Delete an image record and file from the database. ''' container = self.get(image_name) if container is not None: name = container.uri or container.get_uri() image = container.image self.session.delete(container) self.session.commit() if image is not None: if os.path.exists(image): os.remove(image) else: bot.warning( "image file {} does not exist on the file system!".format( image)) return None return image bot.info("[rm] %s" % name)
def mv(self, image_name, path): '''Move an image from it's current location to a new path. Removing the image from organized storage is not the recommended approach however is still a function wanted by some. Parameters ========== image_name: the parsed image name. path: the location to move the image to ''' container = self.get(image_name, quiet=True) if container is not None: name = container.uri or container.get_uri() image = container.image or '' # Only continue if image file exists if os.path.exists(image): # Default assume directory, use image name and path fully filename = os.path.basename(image) filedir = os.path.abspath(path) # If it's a file, use filename provided if not os.path.isdir(path): filename = os.path.basename(path) filedir = os.path.dirname(path) # If directory is empty, assume $PWD if filedir == '': filedir = os.getcwd() # Copy to the fullpath from the storage fullpath = os.path.abspath(os.path.join(filedir, filename)) return self.cp(move_to=fullpath, container=container, command="move") bot.warning('%s not found' % (image_name))
def rename(self, image_name, path): '''rename performs a move, but ensures the path is maintained in storage Parameters ========== image_name: the image name (uri) to rename to. path: the name to rename (basename is taken) ''' container = self.get(image_name, quiet=True) if container: if container.image: # Derive a new filename and url in storage names = parse_image_name(remove_uri(path), version=container.version) storage = self._get_storage_name(names) dirname = os.path.dirname(storage) # This is the collection folder if not os.path.exists(dirname): os.makedirs(dirname) container = self.cp(move_to=storage, container=container, command="rename") # On successful rename of file, update the uri if container is not None: # Create the collection if doesn't exist collection = self.get_or_create_collection(names['collection']) self.session.commit() # Then update the container container = update_container_metadata(container, collection, names) self.session.commit() return container bot.warning('%s not found' % image_name)
def rename(self, image_name, path): '''rename performs a move, but ensures the path is maintained in storage Parameters ========== image_name: the parsed image name. path: the name to rename (basename is taken) ''' container = self.get(image_name, quiet=True) if container is not None: if container.image is not None: dirname = os.path.dirname(container.image) filename = os.path.basename(path) fullpath = os.path.abspath(os.path.join(dirname, filename)) return self.cp(move_to=fullpath, container=container, command="rename") bot.warning('%s not found' % (image_name))
def get_endpoints(self, query=None): '''use a transfer client to get endpoints. If a search term is included, we use it to search a scope of "all" in addition to personal and shared endpoints. Endpoints are organized by type (my-endpoints, shared-with-me, optionally all) and then id. Parameters ========== query: an endpoint search term to add to a scope "all" search. If not defined, no searches are done with "all" ''' self.endpoints = {} if not hasattr(self, 'transfer_client'): self._init_transfer_client() # We assume the user wants to always see owned and shared scopes = {'my-endpoints':None, 'shared-with-me': None} # If the user provides query, add to search if query is not None: scopes.update({'all': query}) for scope, q in scopes.items(): self.endpoints[scope] = {} for ep in self.transfer_client.endpoint_search(q, filter_scope=scope): ep = ep.__dict__['_data'] self.endpoints[scope][ep['id']] = ep # Alert the user not possible without personal lookup if len(self.endpoints['my-endpoints']) == 0: bot.warning('No personal endpoint found for local transfer.') bot.warning('https://www.globus.org/globus-connect-personal') return self.endpoints
def push(self, path, name, tag=None): '''push an image to Singularity Registry''' path = os.path.abspath(path) image = os.path.basename(path) bot.debug("PUSH %s" % path) if not os.path.exists(path): bot.error('%s does not exist.' %path) sys.exit(1) # Interaction with a registry requires secrets self.require_secrets() # Extract the metadata names = parse_image_name(remove_uri(name), tag=tag) metadata = self.get_metadata(path, names=names) # Try to add the size try: image_size = os.path.getsize(path) >> 20 if metadata['data']['attributes']['labels'] is None: metadata['data']['attributes']['labels'] = {'SREGISTRY_SIZE_MB': image_size } else: metadata['data']['attributes']['labels']['SREGISTRY_SIZE_MB'] = image_size except: bot.warning("Cannot load metadata to add calculated size.") pass if "deffile" in metadata['data']['attributes']: if metadata['data']['attributes']['deffile'] is not None: fromimage = parse_header(metadata['data']['attributes']['deffile'], header="from", remove_header=True) metadata['data']['attributes']['labels']['SREGISTRY_FROM'] = fromimage bot.debug("%s was built from a definition file." % image) # Prepare push request with multipart encoder url = '%s/push/' % self.base upload_to = os.path.basename(names['storage']) SREGISTRY_EVENT = self.authorize(request_type="push", names=names) encoder = MultipartEncoder(fields={'collection': names['collection'], 'name':names['image'], 'metadata': json.dumps(metadata), 'tag': names['tag'], 'datafile': (upload_to, open(path, 'rb'), 'text/plain')}) progress_callback = create_callback(encoder) monitor = MultipartEncoderMonitor(encoder, progress_callback) headers = {'Content-Type': monitor.content_type, 'Authorization': SREGISTRY_EVENT } try: r = requests.post(url, data=monitor, headers=headers) message = self._read_response(r) print('\n[Return status {0} {1}]'.format(r.status_code, message)) except KeyboardInterrupt: print('\nUpload cancelled.')
def get_client(image=None, quiet=False, **kwargs): ''' get the correct client depending on the driver of interest. The selected client can be chosen based on the environment variable SREGISTRY_CLIENT, and later changed based on the image uri parsed If there is no preference, the default is to load the singularity hub client. Parameters ========== image: if provided, we derive the correct client based on the uri of an image. If not provided, we default to environment, then hub. quiet: if True, suppress most output about the client (e.g. speak) ''' from sregistry.defaults import SREGISTRY_CLIENT # Give the user a warning: if not check_install(): bot.warning('Singularity is not installed, function might be limited.') # If an image is provided, use to determine client client_name = get_uri(image) if client_name is not None: SREGISTRY_CLIENT = client_name # If no obvious credential provided, we can use SREGISTRY_CLIENT if SREGISTRY_CLIENT == 'aws': from .aws import Client elif SREGISTRY_CLIENT == 'docker': from .docker import Client elif SREGISTRY_CLIENT == 'dropbox': from .dropbox import Client elif SREGISTRY_CLIENT == 'gitlab': from .gitlab import Client elif SREGISTRY_CLIENT == 'globus': from .globus import Client elif SREGISTRY_CLIENT == 'nvidia': from .nvidia import Client elif SREGISTRY_CLIENT == 'hub': from .hub import Client elif SREGISTRY_CLIENT == 'google-drive': from .google_drive import Client elif SREGISTRY_CLIENT == 'google-compute': from .google_storage import Client elif SREGISTRY_CLIENT == 'google-storage': from .google_storage import Client elif SREGISTRY_CLIENT == 'google-build': from .google_build import Client elif SREGISTRY_CLIENT == 'registry': from .registry import Client elif SREGISTRY_CLIENT == 's3': from .s3 import Client elif SREGISTRY_CLIENT == 'swift': from .swift import Client else: from .hub import Client Client.client_name = SREGISTRY_CLIENT Client.quiet = quiet # Create credentials cache, if it doesn't exist Client._credential_cache = get_credential_cache() # Add the database, if wanted if SREGISTRY_DATABASE is not None: # These are global functions used across modules from sregistry.database import ( init_db, add, cp, get, mv, rm, images, inspect, rename, get_container, get_collection, get_or_create_collection ) # Actions Client._init_db = init_db Client.add = add Client.cp = cp Client.get = get Client.inspect = inspect Client.mv = mv Client.rename = rename Client.rm = rm Client.images = images # Collections Client.get_or_create_collection = get_or_create_collection Client.get_container = get_container Client.get_collection = get_collection # If no database, import dummy functions that return the equivalent else: from sregistry.database import ( add, init_db ) Client.add = add Client._init_db = init_db # Initialize the database cli = Client() if hasattr(Client, '_init_db'): cli._init_db(SREGISTRY_DATABASE) return cli
######################### # Database and Storage ######################### # Database folder, inside where we put storage and credentials folder _database = os.path.join(USERHOME, ".singularity") SREGISTRY_DATABASE = None SREGISTRY_STORAGE = None SREGISTRY_BASE = None # If sqlalchemy isn't installed, user doesn't have support for database try: from sqlalchemy import or_ except ImportError: bot.warning('Database disabled. Install sqlalchemy for full functionality') DISABLE_DATABASE = True # If the user didn't disable caching or the database if not DISABLE_CACHE and DISABLE_DATABASE is False: # First priority goes to database path set in environment, # and if it's not set, default to home folder SREGISTRY_BASE = getenv("SREGISTRY_DATABASE", _database) # Storage defaults to a subfolder of the database, shub _storage = os.path.join(_database, "shub") SREGISTRY_STORAGE = getenv("SREGISTRY_STORAGE", _storage) SREGISTRY_DATABASE = "%s/sregistry.db" % SREGISTRY_BASE #########################