def get_pattern(template_dir): pattern_choices = [] print '' print fill('The following common ' + BOLD() + 'execution patterns' + ENDC() + ' are currently available for your programming language:') pattern_choices.append('basic') print ' ' + BOLD() + 'basic' + ENDC() print fill('Your app will run on a single machine from beginning to end.', initial_indent=' ', subsequent_indent=' ') if os.path.isdir(os.path.join(template_dir, 'parallelized')): pattern_choices.append('parallelized') print ' ' + BOLD() + 'parallelized' + ENDC() print fill('Your app will subdivide a large chunk of work into multiple pieces that can be processed in parallel and independently of each other, followed by a final stage that will merge and process the results as necessary.', initial_indent=' ', subsequent_indent=' ') if os.path.isdir(os.path.join(template_dir, 'scatter-process-gather')): pattern_choices.append('scatter-process-gather') print ' ' + BOLD() + 'scatter-process-gather' + ENDC() print fill('Similar to ' + BOLD() + 'parallelized' + ENDC() + ' but with the addition of a "scatter" entry point. This allows you to break out the execution for splitting up the input, or you can call a separate app/applet to perform the splitting.', initial_indent=' ', subsequent_indent=' ') if len(pattern_choices) == 1: print 'Automatically using the execution pattern "basic"' return 'basic' use_completer(Completer(pattern_choices)) pattern = prompt_for_var('Execution pattern', 'basic', choices=pattern_choices) use_completer() return pattern
def JOB_STATES(state): if state == 'failed': return BOLD() + RED() + state + ENDC() elif state == 'done': return BOLD() + GREEN() + state + ENDC() elif state in ['running', 'in_progress']: return GREEN() + state + ENDC() elif state == 'partially_failed': return RED() + state + ENDC() else: return YELLOW() + state + ENDC()
def get_metadata(api_version): print '' print fill('The ' + BOLD() + 'title' + ENDC() + ', if provided, is what is shown as the name of your app on the website. It can be any valid UTF-8 string.') title = prompt_for_var('Title', '') print '' print fill('The ' + BOLD() + 'summary' + ENDC() + ' of your app is a short phrase or one-line description of what your app does. It can be any UTF-8 human-readable string.') summary = prompt_for_var('Summary', '') print '' print fill('The ' + BOLD() + 'description' + ENDC() + ' of your app is a longer piece of text describing your app. It can be any UTF-8 human-readable string, and it will be interpreted using Markdown (see http://daringfireball.net/projects/markdown/syntax/ for more details).') description = prompt_for_var('Description', '') return title, summary, description
def get_version(default=None): if default is None: default = '0.0.1' print '' print fill('You can publish multiple versions of your app, and the ' + BOLD() + 'version' + ENDC() + ' of your app is a string with which to tag a particular version. We encourage the use of Semantic Versioning for labeling your apps (see http://semver.org/ for more details).') version = prompt_for_var('Version', default) return version
def get_ls_l_desc(desc, include_folder=False, include_project=False): if 'state' in desc: state_len = len(desc['state']) if desc['state'] != 'closed': state_str = YELLOW() + desc['state'] + ENDC() else: state_str = GREEN() + desc['state'] + ENDC() else: state_str = '' state_len = 0 name_str = '' if include_folder: name_str += desc['folder'] + ('/' if desc['folder'] != '/' else '') name_str += desc['name'] if desc['class'] in [ 'applet', 'workflow' ] or (desc['class'] == 'record' and 'pipeline' in desc['types']): name_str = BOLD() + GREEN() + name_str + ENDC() size_str = '' if 'size' in desc and desc['class'] == 'file': size_str = get_size_str(desc['size']) elif 'length' in desc: size_str = str(desc['length']) + ' rows' size_padding = ' ' * (max(0, 8 - len(size_str))) return state_str + DELIMITER( ' ' * (8 - state_len)) + render_short_timestamp( desc['modified']) + DELIMITER(' ') + size_str + DELIMITER( size_padding + ' ') + name_str + DELIMITER(' (') + ( (desc['project'] + DELIMITER(':')) if include_project else '') + desc['id'] + DELIMITER(')')
def get_ls_desc(desc, print_id=False): addendum = ' : ' + desc['id'] if print_id is True else '' if desc['class'] in [ 'applet', 'workflow' ] or (desc['class'] == 'record' and 'pipeline' in desc['types']): return BOLD() + GREEN() + desc['name'] + ENDC() + addendum else: return desc['name'] + addendum
def get_find_jobs_string(jobdesc, has_children, single_result=False, show_outputs=True): ''' :param jobdesc: hash of job describe output :param has_children: whether the job has subjobs to be printed :param single_result: whether the job is displayed as a single result or as part of a job tree ''' is_origin_job = jobdesc['parentJob'] is None or single_result result = ("* " if is_origin_job and get_delimiter() is None else "") canonical_job_name = jobdesc['executableName'] + ":" + jobdesc['function'] job_name = jobdesc.get('name', '<no name>') result += BOLD() + BLUE() + job_name + ENDC() if job_name != canonical_job_name and job_name + ":main" != canonical_job_name: result += ' (' + canonical_job_name + ')' result += DELIMITER(' (') + JOB_STATES( jobdesc['state']) + DELIMITER(') ') + jobdesc['id'] result += DELIMITER('\n' + (u'│ ' if is_origin_job and has_children else (" " if is_origin_job else ""))) result += jobdesc['launchedBy'][5:] + DELIMITER(' ') result += render_short_timestamp(jobdesc['created']) if jobdesc['state'] in [ 'done', 'failed', 'terminated', 'waiting_on_output' ]: # TODO: Remove this check once all jobs are migrated to have these values if 'stoppedRunning' in jobdesc and 'startedRunning' in jobdesc: result += " (runtime {r})".format(r=str( datetime.timedelta(seconds=int(jobdesc['stoppedRunning'] - jobdesc['startedRunning']) / 1000))) elif jobdesc['state'] == 'running': result += " (running for {rt})".format(rt=datetime.timedelta( seconds=int(time.time() - jobdesc['startedRunning'] / 1000))) if show_outputs: prefix = DELIMITER('\n' + (u'│ ' if is_origin_job and has_children else (" " if is_origin_job else ""))) if jobdesc.get("output") != None: result += job_output_to_str(jobdesc['output'], prefix=prefix) elif jobdesc['state'] == 'failed' and 'failureReason' in jobdesc: result += prefix + BOLD() + jobdesc['failureReason'] + ENDC( ) + ": " + fill(jobdesc.get('failureMessage', ''), subsequent_indent=prefix.lstrip('\n')) return result
def get_language(): #language_choices = language_options.keys() language_choices = ["Python", "C++", "bash"] use_completer(Completer(language_choices)) print '' print fill('You can write your app in any ' + BOLD() + 'programming language' + ENDC() + ', but we provide templates for the following supported languages' + ENDC() + ": " + ', '.join(language_choices)) language = prompt_for_var('Programming language', 'Python', choices=language_choices) use_completer() return language
def get_parallelized_io(file_input_names, gtable_input_names, gtable_output_names): input_field = '' output_field = '' if len(file_input_names) > 0 or len(gtable_input_names) > 0: print '' print fill('Your app template can be initialized to split and process a ' + BOLD() + 'GTable' + ENDC() + ' input. The following of your input fields are eligible for this template pattern:') print ' ' + '\n '.join(gtable_input_names) use_completer(Completer(gtable_input_names)) input_field = prompt_for_var('Input field to process (press ENTER to skip)', '', choices=file_input_names + gtable_input_names) use_completer() if input_field != '' and len(gtable_output_names) > 0: print '' print fill('Your app template can be initialized to build a ' + BOLD() + 'GTable' + ENDC() + ' in parallel for your output. The following of your output fields are eligible for this template pattern:') print ' ' + '\n '.join(gtable_output_names) use_completer(Completer(gtable_output_names)) output_field = prompt_for_var('Output gtable to build in parallel (press ENTER to skip)', '', choices=gtable_output_names) return input_field, output_field
def print_intro(api_version): print DNANEXUS_LOGO() + ' App Wizard, API v' + api_version print '' print BOLD() + 'Basic Metadata' + ENDC() print '' print fill('''Please enter basic metadata fields that will be used to describe your app. Optional fields are denoted by options with square brackets. At the end of this wizard, the files necessary for building your app will be generated from the answers you provide.''') print ''
def get_name(default=None): print fill('The ' + BOLD() + 'name' + ENDC() + ' of your app must be unique on the DNAnexus platform. After creating your app for the first time, you will be able to publish new versions using the same app name. App names are restricted to alphanumeric characters (a-z, A-Z, 0-9), and the characters ".", "_", and "-".') name_pattern = re.compile('^[a-zA-Z0-9._-]+$') while True: name = prompt_for_var('App Name', default) if name_pattern.match(name) is None: print fill('The name of your app must match /^[a-zA-Z0-9._-]+$/') else: if os.path.exists(name): if os.path.isdir(name): remove_dir = prompt_for_yn('The directory %s already exists. Would you like to remove all of its contents and create a new directory in its place?' % name) if remove_dir: shutil.rmtree(name) print fill('Replacing all contents of directory %s...' % name) else: print '' continue else: print fill('A file named %s already exists. Please choose another name or rename your file') continue break return name
def main(**kwargs): """ Entry point for dx-app-wizard. Note that this function is not meant to be used as a subroutine in your program. """ manifest = [] print_intro(API_VERSION) if args.json_file is not None: with open(args.json_file, 'r') as json_file: app_json = json.loads(json_file.read()) # Re-confirm the name name = get_name(default=args.name or app_json.get('name')) app_json['name'] = name version = get_version(default=app_json.get('version')) app_json['version'] = version try: os.mkdir(app_json['name']) except: sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n') sys.exit(1) else: ################## # BASIC METADATA # ################## name = get_name(default=args.name) try: os.mkdir(name) except: sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n') sys.exit(1) title, summary = get_metadata(API_VERSION) version = get_version() app_json = OrderedDict() app_json["name"] = name app_json["title"] = title or name app_json['summary'] = summary or name app_json["dxapi"] = API_VERSION app_json["version"] = version ############ # IO SPECS # ############ class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash', 'array:int', 'array:float', 'array:string', 'array:boolean', 'record', 'file', 'applet', 'array:record', 'array:file', 'array:applet']) bool_completer = Completer(['true', 'false']) print('') print(BOLD() + 'Input Specification' + ENDC()) print('') input_spec = True input_names = [] printed_classes = False if input_spec: app_json['inputSpec'] = [] print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.')) while True: print('') ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1) input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True) if input_name == '': break if input_name in input_names: print(fill('Error: Cannot use the same input parameter name twice. Please choose again.')) continue if not IO_NAME_PATTERN.match(input_name): print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.')) continue input_names.append(input_name) input_label = prompt_for_var('Label (optional human-readable name)', '') use_completer(class_completer) if not printed_classes: print('Your input parameter must be of one of the following classes:') print('''applet array:file array:record file int array:applet array:float array:string float record array:boolean array:int boolean hash string ''') printed_classes = True while True: input_class = prompt_for_var('Choose a class (<TAB> twice for choices)') if input_class in class_completer.choices: break else: print(fill('Not a recognized class; please choose again.')) use_completer() optional = prompt_for_yn('This is an optional parameter') default_val = None if optional and input_class in ['int', 'float', 'string', 'boolean']: default_val = prompt_for_yn('A default value should be provided') if default_val: while True: if input_class == 'boolean': use_completer(bool_completer) default_val = prompt_for_var(' Default value', choices=['true', 'false']) use_completer() elif input_class == 'string': default_val = prompt_for_var(' Default value', allow_empty=True) else: default_val = prompt_for_var(' Default value') try: if input_class == 'boolean': default_val = (default_val == 'true') elif input_class == 'int': default_val = int(default_val) elif input_class == 'float': default_val = float(default_val) break except: print('Not a valid default value for the given class ' + input_class) else: default_val = None # Fill in the input parameter's JSON parameter_json = OrderedDict() parameter_json["name"] = input_name if input_label != '': parameter_json['label'] = input_label parameter_json["class"] = input_class parameter_json["optional"] = optional if default_val is not None: parameter_json['default'] = default_val # Fill in patterns and blank help string if input_class == 'file' or input_class == 'array:file': parameter_json["patterns"] = ["*"] parameter_json["help"] = "" app_json['inputSpec'].append(parameter_json) print('') print(BOLD() + 'Output Specification' + ENDC()) print('') output_spec = True output_names = [] if output_spec: app_json['outputSpec'] = [] print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.')) while True: print('') ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1) output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True) if output_name == '': break if output_name in output_names: print(fill('Error: Cannot use the same output parameter name twice. Please choose again.')) continue if not IO_NAME_PATTERN.match(output_name): print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.')) continue output_names.append(output_name) output_label = prompt_for_var('Label (optional human-readable name)', '') use_completer(class_completer) if not printed_classes: print('Your output parameter must be of one of the following classes:') print('''applet array:file array:record file int array:applet array:float array:string float record array:boolean array:int boolean hash string''') printed_classes = True while True: output_class = prompt_for_var('Choose a class (<TAB> twice for choices)') if output_class in class_completer.choices: break else: print(fill('Not a recognized class; please choose again.')) use_completer() # Fill in the output parameter's JSON parameter_json = OrderedDict() parameter_json["name"] = output_name if output_label != '': parameter_json['label'] = output_label parameter_json["class"] = output_class # Fill in patterns and blank help string if output_class == 'file' or output_class == 'array:file': parameter_json["patterns"] = ["*"] parameter_json["help"] = "" app_json['outputSpec'].append(parameter_json) required_file_input_names = [] optional_file_input_names = [] required_file_array_input_names = [] optional_file_array_input_names = [] file_output_names = [] if 'inputSpec' in app_json: for param in app_json['inputSpec']: may_be_missing = param['optional'] and "default" not in param if param['class'] == 'file': param_list = optional_file_input_names if may_be_missing else required_file_input_names elif param['class'] == 'array:file': param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names else: param_list = None if param_list is not None: param_list.append(param['name']) if 'outputSpec' in app_json: file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file'] ################## # TIMEOUT POLICY # ################## print('') print(BOLD() + 'Timeout Policy' + ENDC()) app_json["runSpec"] = OrderedDict({}) app_json['runSpec'].setdefault('timeoutPolicy', {}) timeout, timeout_units = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*')) app_json['runSpec']['timeoutPolicy'].setdefault('*', {}) app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout) ######################## # LANGUAGE AND PATTERN # ######################## print('') print(BOLD() + 'Template Options' + ENDC()) # Prompt for programming language if not specified language = args.language if args.language is not None else get_language() interpreter = language_options[language].get_interpreter() app_json["runSpec"]["interpreter"] = interpreter # Prompt the execution pattern iff the args.pattern is provided and invalid template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path()) if not os.path.isdir(os.path.join(template_dir, args.template)): print(fill('The execution pattern "' + args.template + '" is not available for your programming language.')) pattern = get_pattern(template_dir) else: pattern = args.template template_dir = os.path.join(template_dir, pattern) with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file: file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version) template_app_json = json.loads(file_text) for key in template_app_json['runSpec']: app_json['runSpec'][key] = template_app_json['runSpec'][key] if (language == args.language) and (pattern == args.template): print('All template options are supplied in the arguments.') ########################## # APP ACCESS PERMISSIONS # ########################## print('') print(BOLD('Access Permissions')) print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' + BOLD('https://documentation.dnanexus.com/developer/apps/app-permissions') + '.')) print('') print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).')) if prompt_for_yn("Will this app need access to the Internet?", default=False): app_json.setdefault('access', {}) app_json['access']['network'] = ['*'] print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' + UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.')) print('') print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs, which will be copied into the project after it's done running.''')) if prompt_for_yn("Will this app need access to the parent project?", default=False): app_json.setdefault('access', {}) app_json['access']['project'] = 'CONTRIBUTE' print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' + 'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') + ' fields of dxapp.json once we generate the app.')) ####################### # SYSTEM REQUIREMENTS # ####################### print('') print(BOLD('System Requirements')) print('') print(BOLD('Common AWS instance types:')) print(format_table(InstanceTypesCompleter.aws_preferred_instance_types.values(), column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields)) print(BOLD('Common Azure instance types:')) print(format_table(InstanceTypesCompleter.azure_preferred_instance_types.values(), column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields)) print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' + 'your app unless you override it. See ' + BOLD('https://documentation.dnanexus.com/developer/api/running-analyses/instance-types') + ' for more information.')) use_completer(InstanceTypesCompleter()) instance_type = prompt_for_var('Choose an instance type for your app', default=InstanceTypesCompleter.default_instance_type.Name, choices=list(InstanceTypesCompleter.instance_types)) target_region = DEFAULT_REGION_AWS if instance_type in InstanceTypesCompleter.azure_preferred_instance_types.keys(): target_region = DEFAULT_REGION_AZURE app_json['regionalOptions'] = OrderedDict({}) app_json['regionalOptions'][target_region] = OrderedDict({}) app_json['regionalOptions'][target_region].setdefault('systemRequirements', {}) app_json['regionalOptions'][target_region]['systemRequirements'].setdefault('*', {}) app_json['regionalOptions'][target_region]['systemRequirements']['*']['instanceType'] = instance_type ###################### # HARDCODED DEFAULTS # ###################### app_json['runSpec']['distribution'] = 'Ubuntu' app_json['runSpec']['release'] = '20.04' app_json['runSpec']['version'] = "0" ################# # WRITING FILES # ################# print('') print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC()) with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file: prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n') manifest.append(os.path.join(name, 'dxapp.json')) print('') print(fill('''Your app specification has been written to the dxapp.json file. You can specify more app options by editing this file directly (see https://documentation.dnanexus.com/developer for complete documentation).''' + (''' Note that without an input and output specification, your app can only be built as an APPLET on the system. To publish it to the DNAnexus community, you must first specify your inputs and outputs. ''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else ""))) print('') for subdir in 'src', 'test', 'resources': try: os.mkdir(os.path.join(name, subdir)) manifest.append(os.path.join(name, subdir, '')) except: sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir)) sys.exit(1) entry_points = ['main'] if pattern == 'parallelized': entry_points = ['main', 'process', 'postprocess'] elif pattern == 'scatter-process-gather': entry_points = ['main', 'scatter', 'map', 'process', 'postprocess'] manifest += create_files_from_templates(template_dir, app_json, language, required_file_input_names, optional_file_input_names, required_file_array_input_names, optional_file_array_input_names, file_output_names, pattern, description='<!-- Insert a description of your app here -->', entry_points=entry_points) print("Created files:") for filename in sorted(manifest): print("\t", filename) print("\n" + fill('''App directory created! See https://documentation.dnanexus.com/developer for tutorials on how to modify these files, or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n") print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ''' + BOLD() + 'resources' + ENDC() + ''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))
def col_head(i): if column_specs is not None: return BOLD() + type_colormap[column_specs[i] ['type']] + column_names[i] + ENDC() else: return BOLD() + WHITE() + column_names[i] + ENDC()
def get_input_single(param_desc): in_class = param_desc['class'] typespec = param_desc.get('type', None) print '\nInput: ' + fill(UNDERLINE() + param_desc.get('label', param_desc['name']) + ENDC() + ' (' + param_desc['name'] + ')') print 'Class: ' + param_desc['class'] if 'type' in param_desc: print 'Type(s): ' + parse_typespec(param_desc['type']) if 'suggestions' in param_desc: print format_choices_or_suggestions('Suggestions:', param_desc['suggestions'], param_desc['class'], initial_indent='', subsequent_indent=' ') if 'choices' in param_desc: print format_choices_or_suggestions('Choices:', param_desc['choices'], param_desc['class'], initial_indent='', subsequent_indent=' ') print prompt = "Enter {_class} {value} ({hint}'" + WHITE() + BOLD() + '?' + ENDC( ) + "' for more options)" hint = '' if in_class in dx_data_classes: hint = '<TAB> twice for compatible ' + in_class + 's in current directory, ' elif 'suggestions' in param_desc: hint = '<TAB> twice for suggestions, ' elif 'choices' in param_desc: hint = '<TAB> twice for choices, ' prompt = prompt.format( _class=in_class, value='ID or path' if in_class in dx_data_classes else 'value', hint=hint) print fill(prompt) try: import readline if in_class in dx_data_classes: from dxpy.utils.completer import DXPathCompleter readline.set_completer( DXPathCompleter(classes=[in_class], typespec=typespec).complete) elif in_class == 'boolean': from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter(completions=['true', 'false']).complete) elif 'suggestions' in param_desc: from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter( completions=map(str, param_desc['suggestions'])).complete) elif 'choices' in param_desc: from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter( completions=map(str, param_desc['choices'])).complete) else: from dxpy.utils.completer import NoneCompleter readline.set_completer(NoneCompleter().complete) except: pass try: while True: prompt = param_desc['name'] + ': ' user_input = raw_input(prompt) if in_class == 'string': if user_input == '': user_input = [] else: user_input = [user_input] else: user_input = shlex.split(user_input) while user_input == ["?"]: user_input = interactive_help(in_class, param_desc, prompt) if len(user_input) > 1: print fill( 'Error: more than one argument given. Please quote your entire input or escape your whitespace with a backslash \'\\\'.' ) continue elif len(user_input) == 0: user_input = [''] try: value = parse_input_or_jbor(in_class, user_input[0]) except ValueError as details: print fill('Error occurred when parsing for class ' + in_class + ': ' + unicode(details)) continue except TypeError as details: print fill('Error occurred when parsing for class ' + in_class + ': ' + unicode(details)) continue if 'choices' in param_desc and value not in param_desc['choices']: print fill(RED() + BOLD() + 'Warning:' + ENDC() + ' value "' + unicode(value) + '" for input ' + WHITE() + BOLD() + param_desc['name'] + ENDC() + ' is not in the list of choices for that input') return value except EOFError: raise Exception('') except KeyboardInterrupt: raise Exception('')
def get_input_array(param_desc): in_class = param_desc['class'] if in_class.startswith("array:"): in_class = in_class[6:] typespec = param_desc.get('type', None) input_array = [] print '\nInput: ' + fill(UNDERLINE() + param_desc.get('label', param_desc['name']) + ENDC() + ' (' + param_desc['name'] + ')') print 'Class: ' + param_desc['class'] if 'type' in param_desc: print 'Type(s): ' + parse_typespec(param_desc['type']) print prompt = "Enter {_class} values, one at a time (^D or <ENTER> to finish, {hint}'" + WHITE( ) + BOLD() + '?' + ENDC() + "' for more options)" hint = '' if in_class in dx_data_classes: hint = '<TAB> twice for compatible ' + in_class + 's in current directory, ' elif 'suggestions' in param_desc: hint = '<TAB> twice for suggestions, ' elif 'choices' in param_desc: hint = '<TAB> twice for choices, ' prompt = prompt.format(_class=in_class, hint=hint) print fill(prompt) try: import readline if in_class in dx_data_classes: from dxpy.utils.completer import DXPathCompleter readline.set_completer( DXPathCompleter(classes=[in_class], typespec=typespec).complete) elif in_class == 'boolean': from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter(completions=['true', 'false']).complete) elif 'suggestions' in param_desc: from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter( completions=map(str, param_desc['suggestions'])).complete) elif 'choices' in param_desc: from dxpy.utils.completer import ListCompleter readline.set_completer( ListCompleter( completions=map(str, param_desc['choices'])).complete) else: from dxpy.utils.completer import NoneCompleter readline.set_completer(NoneCompleter().complete) except: pass try: while True: prompt = param_desc['name'] + '[' + str(len(input_array)) + "]: " user_input = raw_input(prompt) if in_class == 'string': if user_input == '': user_input = [] else: user_input = [user_input] else: user_input = shlex.split(user_input) while user_input == ['?']: user_input = interactive_help(in_class, param_desc, prompt) if len(user_input) > 1: print fill( 'Error: more than one argument given. Please quote your entire input or escape your whitespace with a backslash \'\\\'.' ) continue elif len(user_input) == 0: return input_array try: input_array.append(parse_input_or_jbor(in_class, user_input[0])) except ValueError as details: print fill('Error occurred when parsing for class ' + in_class + ': ' + unicode(details)) continue except TypeError as details: print fill('Error occurred when parsing for class ' + in_class + ': ' + unicode(details)) continue except EOFError: return input_array
def run_one_entry_point(job_id, function, input_hash, run_spec, depends_on, name=None): ''' :param job_id: job ID of the local job to run :type job_id: string :param function: function to run :type function: string :param input_hash: input for the job (may include job-based object references) :type input_hash: dict :param run_spec: run specification from the dxapp.json of the app :type run_spec: dict Runs the specified entry point and retrieves the job's output, updating job_outputs.json (in $DX_TEST_JOB_HOMEDIRS) appropriately. ''' print('======') job_homedir = os.path.join(environ['DX_TEST_JOB_HOMEDIRS'], job_id) job_env = environ.copy() job_env['HOME'] = os.path.join(environ['DX_TEST_JOB_HOMEDIRS'], job_id) all_job_outputs_path = os.path.join(environ['DX_TEST_JOB_HOMEDIRS'], 'job_outputs.json') with open(all_job_outputs_path, 'r') as fd: all_job_outputs = json.load(fd, object_pairs_hook=collections.OrderedDict) if isinstance(name, basestring): name += ' (' + job_id + ':' + function + ')' else: name = job_id + ':' + function job_name = BLUE() + BOLD() + name + ENDC() print(job_name) # Resolve local job-based object references try: resolve_job_references(input_hash, all_job_outputs) except Exception as e: exit_with_error(job_name + ' ' + JOB_STATES('failed') + ' when resolving input:\n' + fill(str(e))) # Get list of non-closed data objects in the input that appear as # DNAnexus links; append to depends_on if depends_on is None: depends_on = [] get_implicit_depends_on(input_hash, depends_on) try: wait_for_depends_on(depends_on, all_job_outputs) except Exception as e: exit_with_error(job_name + ' ' + JOB_STATES('failed') + ' when processing depends_on:\n' + fill(str(e))) # Save job input to job_input.json with open(os.path.join(job_homedir, 'job_input.json'), 'wb') as fd: json.dump(input_hash, fd, indent=4) fd.write(b'\n') print( job_output_to_str(input_hash, title=(BOLD() + 'Input: ' + ENDC()), title_len=len("Input: ")).lstrip()) if run_spec['interpreter'] == 'bash': # Save job input to env vars env_path = os.path.join(job_homedir, 'environment') with open(env_path, 'w') as fd: job_input_file = os.path.join(job_homedir, 'job_input.json') var_defs_hash = file_load_utils.gen_bash_vars( job_input_file, job_homedir=job_homedir) for key, val in var_defs_hash.iteritems(): fd.write("{}={}\n".format(key, val)) print(BOLD() + 'Logs:' + ENDC()) start_time = datetime.datetime.now() if run_spec['interpreter'] == 'bash': script = ''' cd hotexamples_com; . {env_path}; . {code_path}; if [[ $(type -t {function}) == "function" ]]; then {function}; else echo "$0: Global scope execution complete. Not invoking entry point function {function} because it was not found" 1>&2; fi'''.format(homedir=pipes.quote(job_homedir), env_path=pipes.quote( os.path.join(job_env['HOME'], 'environment')), code_path=pipes.quote(environ['DX_TEST_CODE_PATH']), function=function) invocation_args = ['bash', '-c', '-e'] + ( ['-x'] if environ.get('DX_TEST_X_FLAG') else []) + [script] elif run_spec['interpreter'] == 'python2.7': script = '''#!/usr/bin/env python import os os.chdir(hotexamples_com) {code} import dxpy, json if dxpy.utils.exec_utils.RUN_COUNT == 0: dxpy.run() '''.format(homedir=repr(job_homedir), code=run_spec['code']) job_env['DX_TEST_FUNCTION'] = function invocation_args = ['python', '-c', script] if USING_PYTHON2: invocation_args = [ arg.encode(sys.stdout.encoding) for arg in invocation_args ] env = {k: v.encode(sys.stdout.encoding) for k, v in job_env.items()} else: env = job_env fn_process = subprocess.Popen(invocation_args, env=env) fn_process.communicate() end_time = datetime.datetime.now() if fn_process.returncode != 0: exit_with_error(job_name + ' ' + JOB_STATES('failed') + ', exited with error code ' + str(fn_process.returncode) + ' after ' + str(end_time - start_time)) # Now updating job output aggregation file with job's output job_output_path = os.path.join(job_env['HOME'], 'job_output.json') if os.path.exists(job_output_path): try: with open(job_output_path, 'r') as fd: job_output = json.load( fd, object_pairs_hook=collections.OrderedDict) except Exception as e: exit_with_error('Error: Could not load output of ' + job_name + ':\n' + fill(str(e.__class__) + ': ' + str(e))) else: job_output = {} print(job_name + ' -> ' + GREEN() + 'finished running' + ENDC() + ' after ' + str(end_time - start_time)) print( job_output_to_str(job_output, title=(BOLD() + "Output: " + ENDC()), title_len=len("Output: ")).lstrip()) with open( os.path.join(environ['DX_TEST_JOB_HOMEDIRS'], 'job_outputs.json'), 'r') as fd: all_job_outputs = json.load(fd, object_pairs_hook=collections.OrderedDict) all_job_outputs[job_id] = job_output # Before dumping, see if any new jbors should be resolved now for other_job_id in all_job_outputs: if all_job_outputs[other_job_id] is None: # Skip if job is not done yet (true for ancestor jobs) continue resolve_job_references(all_job_outputs[other_job_id], all_job_outputs, should_resolve=False) with open( os.path.join(environ['DX_TEST_JOB_HOMEDIRS'], 'job_outputs.json'), 'wb') as fd: json.dump(all_job_outputs, fd, indent=4) fd.write(b'\n')
dest="json") parser.add_argument("--no-json", help=argparse.SUPPRESS, action="store_false", dest="json") parser.add_argument( "--extra-args", help= "Arguments (in JSON format) to pass to the /applet/new API method, overriding all other settings" ) parser.add_argument( "--run", help= "Run the app or applet after building it (options following this are passed to " + BOLD("dx run") + ")", nargs=argparse.REMAINDER) class DXSyntaxError(Exception): def __init__(self, message): self.message = message def __str__(self): return self.message def _get_timestamp_version_suffix(version): if "+" in version: return ".build." + datetime.today().strftime('%Y%m%d.%H%M') else:
app_options.set_defaults(update=True) app_options.add_argument("--update", help=argparse.SUPPRESS, action="store_true", dest="update") app_options.add_argument("--no-update", help="Never update an existing unpublished app in place.", action="store_false", dest="update") # --[no-]dx-toolkit-autodep parser.set_defaults(dx_toolkit_autodep="stable") parser.add_argument("--dx-toolkit-legacy-git-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="git") parser.add_argument("--dx-toolkit-stable-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="stable") parser.add_argument("--dx-toolkit-beta-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="beta") # deprecated parser.add_argument("--dx-toolkit-unstable-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="unstable") # deprecated parser.add_argument("--dx-toolkit-autodep", help=argparse.SUPPRESS, action="store_const", dest="dx_toolkit_autodep", const="stable") parser.add_argument("--no-dx-toolkit-autodep", help="Do not auto-insert the dx-toolkit dependency (default is to add it if it would otherwise be absent from the runSpec)", action="store_false", dest="dx_toolkit_autodep") # --[no-]parallel-build parser.set_defaults(parallel_build=True) parser.add_argument("--parallel-build", help=argparse.SUPPRESS, action="store_true", dest="parallel_build") parser.add_argument("--no-parallel-build", help="Build with " + BOLD("make") + " instead of " + BOLD("make -jN") + ".", action="store_false", dest="parallel_build") app_options.set_defaults(use_temp_build_project=True) app_options.add_argument("--no-temp-build-project", help="When building an app, build its applet in the current project instead of a temporary project", action="store_false", dest="use_temp_build_project") # --yes app_options.add_argument('-y', '--yes', dest='confirm', help='Do not ask for confirmation for potentially dangerous operations', action='store_false') # --[no-]json (undocumented): dumps the JSON describe of the app or # applet that was created. Useful for tests. parser.set_defaults(json=False) parser.add_argument("--json", help=argparse.SUPPRESS, action="store_true", dest="json") parser.add_argument("--no-json", help=argparse.SUPPRESS, action="store_false", dest="json") parser.add_argument("--extra-args", help="Arguments (in JSON format) to pass to the /applet/new API method, overriding all other settings")