def _build(self, parsed_args): """Build container """ if self.build: try: self._dockerpy_test() tag = self._repo_tag() dockerfile = self._dockerfile() print_stderr('Building {}'.format(tag)) start_time = milliseconds() # TODO - build_args, labels, nocache, quiet, forcerm result = self.dockerpy.images.build(path=self.working_dir, tag=tag, dockerfile=dockerfile, pull=self.pull, rm=True) print_stderr('Finished ({} msec)'.format(milliseconds() - start_time)) for log_line in result[1]: txt = log_line.get('stream', '').strip() if txt is not None and txt != '': self.messages.append( ('build', log_line.get('stream', None))) return True except Exception as err: if self.ignore_errors: self.messages.append(('push', str(err))) return False else: raise
def _push(self, parsed_args): """Push built container """ if self.push: try: self._dockerpy_test() tag = self._repo_tag() print_stderr('Pushing {}'.format(tag)) start_time = milliseconds() # TODO - auth_config for log_line in self.dockerpy.images.push(tag, stream=True, decode=True): text_line = log_line.get('status', '').strip() if text_line not in ('Preparing', 'Waiting', 'Layer already exists', 'Pushing', 'Pushed', 'denied'): self.messages.append(('push', text_line)) print_stderr('Finished ({} msec)'.format(milliseconds() - start_time)) return True except Exception as err: if self.ignore_errors: self.messages.append(('push', str(err))) return False else: raise
def _backup(self, parsed_args): """Backup existing deployed assets """ if self.backup: dep_sys = self.document['deploymentSystem'] dep_path = self.document['deploymentPath'] backup_dep_path = dep_path + '.' + str(seconds()) print_stderr('Backing up agave://{}/{}'.format(dep_sys, dep_path)) start_time = milliseconds() self.messages.append( ('backup', 'src: agave://{}/{}'.format(dep_sys, dep_path))) self.messages.append( ('backup', 'dst: agave://{}/{}'.format(dep_sys, backup_dep_path))) try: manage.move(dep_path, system_id=dep_sys, destination=backup_dep_path, agave=self.tapis_client) print_stderr('Finished ({} msec)'.format(milliseconds() - start_time)) return True except Exception as exc: self.messages.append(('backup', str(exc))) print_stderr('Failed ({} msec)'.format(milliseconds() - start_time)) return False return True
def take_action(self, parsed_args): parsed_args = self.preprocess_args(parsed_args) self.requests_client.setup(API_NAME, SERVICE_VERSION) self.update_payload(parsed_args) (storage_system, file_path) = self.parse_url(parsed_args.agave_uri) headers = ['deleted', 'skipped', 'warnings', 'elapsed_msec'] (deleted, skipped, warnings, elapsed) = [[], [], [], 0] try: start_time = milliseconds() delete(file_path, storage_system, agave=self.tapis_client) deleted.append(file_path) elapsed = milliseconds() - start_time if parsed_args.progress: print_stderr('Deleted {0}'.format(parsed_args.agave_uri)) except Exception as exc: skipped.append(file_path) warnings.append(exc) if parsed_args.formatter in ('json', 'yaml'): data = [deleted, skipped, [str(w) for w in warnings], elapsed] else: data = [len(deleted), len(skipped), len(warnings), elapsed] return (tuple(headers), tuple(data))
def _upload(self, parsed_args): """Upload asset bundle """ if self.upload: dep_sys = self.document['deploymentSystem'] dep_path = self.document['deploymentPath'] dep_path_parent = os.path.dirname(dep_path) dep_path_temp = os.path.join(dep_path_parent, self._bundle()) print_stderr('Uploading app bundle to agave://{}/{}'.format( dep_sys, dep_path)) start_time = milliseconds() try: # TODO - incorporate working directory manage.makedirs(dep_path_parent, system_id=dep_sys, permissive=True, agave=self.tapis_client) manage.delete(dep_path, system_id=dep_sys, permissive=True, agave=self.tapis_client) uploaded, skipped, errors, ul_bytes, ec_download = upload.upload( self._bundle(), system_id=dep_sys, destination=dep_path_parent, progress=True, agave=self.tapis_client) manage.move(dep_path_temp, system_id=dep_sys, destination=dep_path, agave=self.tapis_client) # Rename dep_path_parent/bundle to dep_path print_stderr('Finished ({} msec)'.format(milliseconds() - start_time)) for u in uploaded: self.messages.append(('upload', u)) for e in errors: self.messages.append(('upload', e)) return True except Exception as exc: self.messages.append(('upload', str(exc))) print_stderr('Failed ({} msec)'.format(milliseconds() - start_time)) return False return True
def take_action(self, parsed_args): parsed_args = self.preprocess_args(parsed_args) self.requests_client.setup(API_NAME, SERVICE_VERSION) app_id = AppIdentifier.get_identifier(self, parsed_args) interactive = parsed_args.interactive app_def = {} exc_def = {} app_def = self.tapis_client.apps.get(appId=app_id) exc_def = self.tapis_client.systems.get( systemId=app_def.get('executionSystem')) # Intrepret parsed_args in light of contents of app and exec system definiitions # 1. allow --queue to over-ride app['defaultQueue'] exc_queue_names = [q['name'] for q in exc_def['queues']] queue_name = getattr(parsed_args, 'queue_name', None) if queue_name is None: queue_name = app_def.get('defaultQueue', None) # Get queue details for app execution system # # 1. Select either named queue -or- default queue # 2. ValueError if queue named and not found sys_queue = None if queue_name is not None: for q in exc_def['queues']: if q['name'] == queue_name: sys_queue = q break else: for q in exc_def['queues']: if q['default'] is True: sys_queue = q queue_name = sys_queue['name'] break if sys_queue is None: raise ValueError( 'Job queue "{0}" does not exist on system "{1}"'.format( queue_name, exc_def['id'])) # TODO - Rewire so that we can check the queue name after prompting if interactive: print('Job configuration') print('-----------------') if interactive: queue_name = prompt('Queue ({0})'.format( '|'.join(exc_queue_names)), queue_name, allow_empty=False) # Continue interpreting parsed_args # # Normally, we could just chain the getattr on parsed_args # and the successive chained get() to the app definition and # preferred system queue, but Tapis apps will actually return # an 'null' value for app.PROPERTY, which translates # to a value of None for job.PROPERTY. mem_per_node = getattr(parsed_args, 'memory_per_node', app_def.get('defaultMemoryPerNode', None)) if mem_per_node is None: mem_per_node = sys_queue['maxMemoryPerNode'] # if interactive: # queue_name = int( # prompt('Memory (GB)', mem_per_node, allow_empty=False)) if isinstance(mem_per_node, int): mem_per_node = str(mem_per_node) + 'GB' cpu_per_node = getattr(parsed_args, 'cpu_per_node', app_def.get('defaultProcessorsPerNode', None)) if cpu_per_node is None: cpu_per_node = sys_queue['maxProcessorsPerNode'] # if interactive: # cpu_per_node = int( # prompt('CPU/Node', cpu_per_node, allow_empty=False)) node_count = getattr(parsed_args, 'node_count', app_def.get('defaultNodeCount', None)) if node_count is None: # There is no default node count in a system queue definition node_count = 1 if interactive: node_count = int(prompt('Nodes', node_count, allow_empty=False)) # TODO - Validate that max_run_time is LTE sys_queue.maxRequestedTime max_run_time = getattr(parsed_args, 'max_run_time', app_def.get('defaultMaxRunTime', None)) if max_run_time is None: # max_run_time = sys_queue['maxRequestedTime'] max_run_time = DEFAULT_JOB_RUNTIME if interactive: max_run_time = prompt('Run Time (max {0})'.format( sys_queue['maxRequestedTime']), max_run_time, allow_empty=False) # validate max_run_time if not re.search('[0-9][0-9]:[0-9][0-9]:[0-9][0-9]', max_run_time): raise ValueError( '{0} is not a valid job duration. Format must be HH:MM:SS'. format(max_run_time)) # Safen provided job name or synthesize one if not provided job_name = getattr(parsed_args, 'job_name', None) if job_name is not None: job_name = slugify(job_name, separator='_') else: job_name = '{0}-job-{1}'.format(app_def['name'], milliseconds()) if interactive: job_name = prompt('Job Name', job_name, allow_empty=False) # Build out the job definition job = JOB_TEMPLATE # Populate notifications config notify_job = not (parsed_args.no_notifications) if interactive: notify_job = prompt_boolean('Send status notifications', notify_job) if notify_job is True: try: if parsed_args.notifications_uri is not None: nuri = parsed_args.notifications_uri else: nuri = self.tapis_client.profiles.get()['email'] if interactive: nuri = prompt('Status notifications URI', nuri, allow_empty=False) notification = {'event': '*', 'persistent': True, 'url': nuri} job['notifications'].append(notification) except Exception: pass # Populate archiving config archive_job = not (parsed_args.no_archive) if interactive: archive_job = prompt_boolean('Archive job outputs', archive_job) job['archive'] = archive_job if archive_job: aui = getattr(parsed_args, 'archive_uri', None) if interactive: aui = prompt('Archive destination (Agave URI or leave empty)', aui, allow_empty=True) if aui == '': aui = None if aui is not None: asys, apath = parse_uri(parsed_args.archive_uri) job['archiveSystem'] = asys job['archivePath'] = apath # Populate name and resource requirements job['name'] = job_name job['appId'] = app_id job['batchQueue'] = queue_name job['maxRunTime'] = max_run_time job['nodeCount'] = node_count job['processorsPerNode'] = cpu_per_node job['memoryPerNode'] = mem_per_node # Populate Inputs if interactive: print('Inputs') print('------') for inp in app_def.get('inputs', {}): if inp['value']['visible']: if inp['value']['required'] or parsed_args.all_fields is True: job['inputs'][inp['id']] = inp['value'].get('default', '') if interactive: inp_label = inp['details']['label'] if inp_label is None or inp_label == '': inp_label = inp['id'] resp = prompt(inp_label, job['inputs'][inp['id']], allow_empty=False) # Validate URI if re.search('^(agave://|http://|https://|ftp://)', resp): job['inputs'][inp['id']] = resp else: raise ValueError( 'Input value {0} must be a URI'.format(resp)) # Populate Parameters # # The behavior implemented here is different than the original bash # jobs-template in that we make no attempt to fake values for # parameters that don't have a default value if interactive: print('Parameters') print('----------') for prm in app_def.get('parameters', {}): if prm['value']['visible']: if prm['value']['required'] or parsed_args.all_fields is True: job['parameters'][prm['id']] = prm['value'].get( 'default', '') if job['parameters'][prm['id']] is None: job['parameters'][prm['id']] = '' if interactive: prm_label = prm['details']['label'] if prm_label is None or prm_label == '': prm_label = prm['id'] # Typecast and validate response resp = prompt(prm_label, job['parameters'][prm['id']], allow_empty=False) try: if prm['value']['type'] in ('string', 'enumeration'): resp = str(resp) elif prm['value']['type'] in ('number'): resp = num(resp) elif prm['value']['type'] in ('bool', 'flag'): resp = parse_boolean(resp) except Exception: raise ValueError( 'Unable to typecast {0} to type {1}'.format( resp, prm['value']['type'])) job['parameters'][prm['id']] = resp # Raw output outfile_dest = parsed_args.output if interactive: outfile_dest = prompt('Output destination', outfile_dest, allow_empty=True) if outfile_dest == '': of = sys.stdout else: of = open(outfile_dest, 'w') json.dump(job, fp=of, indent=2) sys.exit(0)
def _upload(self, parsed_args): """Upload asset bundle """ if self.upload: dep_sys = self.document['deploymentSystem'] dep_path = self.document['deploymentPath'] dep_path_parent = os.path.dirname(dep_path) # need the bundle basename for the upload/move workflow to work bundle_basename = os.path.basename(os.path.normpath( self._bundle())) # add date to make tmpdir unique from bundle and deploymentPath dep_path_temp = os.path.join(dep_path_parent, bundle_basename) \ + datetime.now().strftime("-%Y-%m-%d") print_stderr( 'Uploading app asset directory "{0}" to agave://{1}/{2}'. format(self._bundle(), dep_sys, dep_path)) start_time = milliseconds() try: # First, check existence of bundle. No point in taking other action # if it does not exist if not os.path.exists(self._bundle()): raise FileNotFoundError( 'Unable to locate asset directory "{}"'.format( self._bundle())) try: # need relative destination here because # agavepy permissions check will fail on '/' # for public systems manage.makedirs(os.path.basename(dep_path_temp), system_id=dep_sys, permissive=True, destination=dep_path_parent, agave=self.tapis_client) # clear out destination directory manage.delete(dep_path, system_id=dep_sys, permissive=True, agave=self.tapis_client) except Exception as err: self.messages.append(('upload', str(err))) # upload bundle to tmp dir uploaded, skipped, errors, ul_bytes, ec_download = upload.upload( self._bundle(), system_id=dep_sys, destination=dep_path_temp, progress=True, agave=self.tapis_client) # move tmp dir bundle to the destination dir manage.move(os.path.join(dep_path_temp, bundle_basename), system_id=dep_sys, destination=dep_path, agave=self.tapis_client) # delete tmp dir manage.delete(dep_path_temp, system_id=dep_sys, permissive=True, agave=self.tapis_client) print_stderr('Finished ({} msec)'.format(milliseconds() - start_time)) for u in uploaded: self.messages.append(('upload', u)) for e in errors: self.messages.append(('upload', e)) if len(errors) > 0: if self.ignore_errors is False: raise Exception('Upload failures: {}'.format( errors.join(';'))) return True except Exception as exc: if self.ignore_errors: self.messages.append(('upload', str(exc))) print_stderr('Failed ({} msec)'.format(milliseconds() - start_time)) return False else: raise return True