def validate_fastqc_config_file(strict): ''' Validate the FastQC config file of a run. ''' # initialize the control variable and the error list OK = True error_list = [] # intitialize variable used when value is not found not_found = '***NOTFOUND***'.upper() # get the option dictionary try: fastqc_option_dict = xlib.get_option_dict(get_fastqc_config_file()) except: error_list.append('*** ERROR: The syntax is WRONG.') OK = False else: # get the sections list sections_list = [] for section in fastqc_option_dict.keys(): sections_list.append(section) sections_list.sort() # check section "identification" if 'identification' not in sections_list: error_list.append( '*** ERROR: the section "identification" is not found.') OK = False else: # check section "identification" - key "experiment_id" experiment_id = fastqc_option_dict.get('identification', {}).get( 'experiment_id', not_found) if experiment_id == not_found: error_list.append( '*** ERROR: the key "experiment_id" is not found in the section "identification".' ) OK = False # check section "identification" - key "read_dataset_id" read_dataset_id = fastqc_option_dict.get('identification', {}).get( 'read_dataset_id', not_found) if read_dataset_id == not_found: error_list.append( '*** ERROR: the key "read_dataset_id" is not found in the section "identification".' ) OK = False # check section "FastQC parameters" if 'FastQC parameters' not in sections_list: error_list.append( '*** ERROR: the section "FastQC parameters" is not found.') OK = False else: # check section "FastQC parameters" - key "threads" threads = fastqc_option_dict.get('FastQC parameters', {}).get('threads', not_found) if threads == not_found: error_list.append( '*** ERROR: the key "threads" is not found in the section "FastQC parameters".' ) OK = False else: try: if int(threads) < 1: error_list.append( '*** ERROR: the key "threads" in the section "FastQC parameters" must be an integer value greater or equal to 1.' ) OK = False except: error_list.append( '*** ERROR: the key "threads" in the section "FastQC parameters" must be an integer value greater or equal to 1.' ) OK = False # check section "file-1" if 'file-1' not in sections_list: error_list.append('*** ERROR: the section "file-1" is not found.') OK = False # check all sections "file-n" for section in sections_list: if section not in ['identification', 'FastQC parameters']: # verify than the section identification is like file-n if not re.match('^file-[0-9]+$', section): error_list.append( '*** ERROR: the section "{0}" has a wrong identification.' .format(section)) OK = False else: # check section "file-n" - key "file_name" file_name = fastqc_option_dict.get(section, {}).get( 'file_name', not_found) if file_name == not_found: error_list.append( '*** ERROR: the key "file_name" is not found in the section "{0}".' .format(section)) OK = False elif not xlib.is_valid_path(file_name, 'linux'): error_list.append( '*** ERROR: the file {0} in the key "file_name" of the section "{1}" has a non valid file name.' .format(file_name, section)) OK = False # warn that the results config file is not valid if there are any errors if not OK: error_list.append( '\nThe {0} config file is not valid. Please, correct this file or recreate it.' .format(xlib.get_fastqc_name())) # return the control variable and the error list return (OK, error_list)
def build_fastqc_process_script(cluster_name, current_run_dir): ''' Build the current FastQC process script. ''' # initialize the control variable and the error list OK = True error_list = [] # get the FastQC option dictionary fastqc_option_dict = xlib.get_option_dict(get_fastqc_config_file()) # get the options experiment_id = fastqc_option_dict['identification']['experiment_id'] read_dataset_id = fastqc_option_dict['identification']['read_dataset_id'] threads = fastqc_option_dict['FastQC parameters']['threads'] # get the sections list sections_list = [] for section in fastqc_option_dict.keys(): sections_list.append(section) sections_list.sort() # build the file name list file_name_list = [] for section in sections_list: # if the section identification is like library-n if re.match('^file-[0-9]+$', section): file_name = fastqc_option_dict[section]['file_name'] file_name_list.append(file_name) # write the FastQC process script try: if not os.path.exists(os.path.dirname(get_fastqc_process_script())): os.makedirs(os.path.dirname(get_fastqc_process_script())) with open(get_fastqc_process_script(), mode='w', encoding='utf8', newline='\n') as file_id: file_id.write('{0}\n'.format('#!/bin/bash')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format( 'FASTQC_PATH={0}/{1}/envs/{2}/bin'.format( xlib.get_cluster_app_dir(), xlib.get_miniconda3_name(), xlib.get_fastqc_bioconda_code()))) file_id.write('{0}\n'.format('PATH=$FASTQC_PATH:$PATH')) file_id.write('{0}\n'.format( 'SEP="#########################################"')) file_id.write('{0}\n'.format('cd {0}/{1}/bin'.format( xlib.get_cluster_app_dir(), xlib.get_miniconda3_name()))) file_id.write('{0}\n'.format('source activate {0}'.format( xlib.get_fastqc_bioconda_code()))) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('function init')) file_id.write('{0}\n'.format('{')) file_id.write('{0}\n'.format(' INIT_DATETIME=`date --utc +%s`')) file_id.write('{0}\n'.format( ' FORMATTED_INIT_DATETIME=`date --date="@$INIT_DATETIME" "+%Y-%m-%d %H:%M:%S"`' )) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format( ' echo "Script started in node $HOSTNAME of cluster {0} at $FORMATTED_INIT_DATETIME UTC."' .format(cluster_name))) file_id.write('{0}\n'.format('}')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('function run_fastqc_process')) file_id.write('{0}\n'.format('{')) file_id.write('{0}\n'.format(' cd {0}'.format(current_run_dir))) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format(' fastqc --version')) for file_name in file_name_list: file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format(' /usr/bin/time \\')) file_id.write('{0}\n'.format( ' --format="$SEP\\nElapsed real time (s): %e\\nCPU time in kernel mode (s): %S\\nCPU time in user mode (s): %U\\nPercentage of CPU: %P\\nMaximum resident set size(Kb): %M\\nAverage total memory use (Kb):%K" \\' )) file_id.write('{0}\n'.format(' fastqc \\')) file_id.write('{0}\n'.format(' {0} \\'.format( xlib.get_cluster_read_file(experiment_id, read_dataset_id, file_name)))) file_id.write('{0}\n'.format( ' --threads={0} \\'.format(threads))) file_id.write('{0}\n'.format( ' --outdir={0}'.format(current_run_dir))) file_id.write('{0}\n'.format(' RC=$?')) file_id.write('{0}\n'.format( ' if [ $RC -ne 0 ]; then manage_error fastqc $RC; fi')) file_id.write('{0}\n'.format('}')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('function end')) file_id.write('{0}\n'.format('{')) file_id.write('{0}\n'.format(' END_DATETIME=`date --utc +%s`')) file_id.write('{0}\n'.format( ' FORMATTED_END_DATETIME=`date --date="@$END_DATETIME" "+%Y-%m-%d %H:%M:%S"`' )) file_id.write('{0}\n'.format(' calculate_duration')) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format( ' echo "Script ended OK at $FORMATTED_END_DATETIME UTC with a run duration of $DURATION s ($FORMATTED_DURATION)."' )) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format(' RECIPIENT={0}'.format( xconfiguration.get_contact_data()))) file_id.write('{0}\n'.format( ' SUBJECT="{0}: {1} process"'.format( xlib.get_project_name(), xlib.get_fastqc_name()))) file_id.write('{0}\n'.format( ' MESSAGE="The {0} process in node $HOSTNAME of cluster {1} ended OK at $FORMATTED_END_DATETIME with a run duration of $DURATION s ($FORMATTED_DURATION). Please review its log.<br/><br/>Regards,<br/>GI Genetica, Fisiologia e Historia Forestal<br/>Dpto. Sistemas y Recursos Naturales<br/>ETSI Montes, Forestal y del Medio Natural<br/>Universidad Politecnica de Madrid<br/>https://github.com/ggfhf/"' .format(xlib.get_fastqc_name(), cluster_name))) file_id.write('{0}\n'.format( ' mail --append "Content-type: text/html;" --subject "$SUBJECT" "$RECIPIENT" <<< "$MESSAGE"' )) file_id.write('{0}\n'.format(' exit 0')) file_id.write('{0}\n'.format('}')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('function manage_error')) file_id.write('{0}\n'.format('{')) file_id.write('{0}\n'.format(' END_DATETIME=`date --utc +%s`')) file_id.write('{0}\n'.format( ' FORMATTED_END_DATETIME=`date --date="@$END_DATETIME" "+%Y-%m-%d %H:%M:%S"`' )) file_id.write('{0}\n'.format(' calculate_duration')) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write( '{0}\n'.format(' echo "ERROR: $1 returned error $2"')) file_id.write('{0}\n'.format( ' echo "Script ended WRONG at $FORMATTED_END_DATETIME UTC with a run duration of $DURATION s ($FORMATTED_DURATION)."' )) file_id.write('{0}\n'.format(' echo "$SEP"')) file_id.write('{0}\n'.format(' RECIPIENT={0}'.format( xconfiguration.get_contact_data()))) file_id.write('{0}\n'.format( ' SUBJECT="{0}: {1} process"'.format( xlib.get_project_name(), xlib.get_fastqc_name()))) file_id.write('{0}\n'.format( ' MESSAGE="The {0} process in node $HOSTNAME of cluster {1} ended WRONG at $FORMATTED_END_DATETIME with a run duration of $DURATION s ($FORMATTED_DURATION). Please review its log.<br/><br/>Regards,<br/>GI Genetica, Fisiologia e Historia Forestal<br/>Dpto. Sistemas y Recursos Naturales<br/>ETSI Montes, Forestal y del Medio Natural<br/>Universidad Politecnica de Madrid<br/>https://github.com/ggfhf/"' .format(xlib.get_fastqc_name(), cluster_name))) file_id.write('{0}\n'.format( ' mail --append "Content-type: text/html;" --subject "$SUBJECT" "$RECIPIENT" <<< "$MESSAGE"' )) file_id.write('{0}\n'.format(' exit 3')) file_id.write('{0}\n'.format('}')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('function calculate_duration')) file_id.write('{0}\n'.format('{')) file_id.write('{0}\n'.format( ' DURATION=`expr $END_DATETIME - $INIT_DATETIME`')) file_id.write('{0}\n'.format(' HH=`expr $DURATION / 3600`')) file_id.write( '{0}\n'.format(' MM=`expr $DURATION % 3600 / 60`')) file_id.write('{0}\n'.format(' SS=`expr $DURATION % 60`')) file_id.write('{0}\n'.format( ' FORMATTED_DURATION=`printf "%03d:%02d:%02d\\n" $HH $MM $SS`' )) file_id.write('{0}\n'.format('}')) file_id.write('{0}\n'.format( '#-------------------------------------------------------------------------------' )) file_id.write('{0}\n'.format('init')) file_id.write('{0}\n'.format('run_fastqc_process')) file_id.write('{0}\n'.format('end')) except: error_list.append('*** ERROR: The file {0} can not be created'.format( get_fastqc_process_script())) OK = False # return the control variable and the error list return (OK, error_list)
def run_fastqc_process(cluster_name, log, function=None): ''' Run a FastQC process. ''' # initialize the control variable OK = True # get the FastQC option dictionary fastqc_option_dict = xlib.get_option_dict(get_fastqc_config_file()) # get the experiment identification experiment_id = fastqc_option_dict['identification']['experiment_id'] # warn that the log window must not be closed if not isinstance(log, xlib.DevStdOut): log.write( 'This process might take several minutes. Do not close this window, please wait!\n' ) # validate the FastQC config file log.write('{0}\n'.format(xlib.get_separator())) log.write('Validating the {0} config file ...\n'.format( xlib.get_fastqc_name())) (OK, error_list) = validate_fastqc_config_file(strict=True) if OK: log.write('The config file is OK.\n') else: log.write('*** ERROR: The config file is not valid.\n') log.write('Please correct this file or recreate the config files.\n') # create the SSH client connection if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Connecting the SSH client ...\n') (OK, error_list, ssh_client) = xssh.create_ssh_client_connection( cluster_name, 'master') if OK: log.write('The SSH client is connected.\n') else: for error in error_list: log.write('{0}\n'.format(error)) # create the SSH transport connection if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Connecting the SSH transport ...\n') (OK, error_list, ssh_transport) = xssh.create_ssh_transport_connection( cluster_name, 'master') if OK: log.write('The SSH transport is connected.\n') else: for error in error_list: log.write('{0}\n'.format(error)) # create the SFTP client if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Connecting the SFTP client ...\n') sftp_client = xssh.create_sftp_client(ssh_transport) log.write('The SFTP client is connected.\n') # warn that the requirements are being verified if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Verifying process requirements ...\n') # verify the master is running if OK: (master_state_code, master_state_name) = xec2.get_node_state(cluster_name, 'master') if master_state_code != 16: log.write( '*** ERROR: The cluster {0} is not running. Its state is {1} ({2}).\n' .format(cluster_name, master_state_code, master_state_name)) OK = False # verify the FastQC is set up if OK: (OK, error_list, is_setup) = xbioinfoapp.is_setup_bioconda_package( xlib.get_fastqc_bioconda_code(), cluster_name, True, ssh_client) if OK: if not is_setup: log.write('*** ERROR: {0} is not setup.\n'.format( xlib.get_fastqc_name())) OK = False else: log.write( '*** ERROR: The verification of {0} setup could not be performed.\n' .format(xlib.get_fastqc_name())) # warn that the requirements are OK if OK: log.write('Process requirements are OK.\n') # determine the run directory in the cluster if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Determining the run directory in the cluster ...\n') current_run_dir = xlib.get_cluster_current_run_dir( experiment_id, xlib.get_fastqc_code()) command = 'mkdir --parents {0}'.format(current_run_dir) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: log.write('The directory path is {0}.\n'.format(current_run_dir)) else: log.write('*** ERROR: Wrong command ---> {0}\n'.format(command)) # build the FastQC process script if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Building the process script {0} ...\n'.format( get_fastqc_process_script())) (OK, error_list) = build_fastqc_process_script(cluster_name, current_run_dir) if OK: log.write('The file is built.\n') if not OK: log.write('*** ERROR: The file could not be built.\n') # upload the FastQC process script to the cluster if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write( 'Uploading the process script {0} to the directory {1} of the master ...\n' .format(get_fastqc_process_script(), current_run_dir)) cluster_path = '{0}/{1}'.format( current_run_dir, os.path.basename(get_fastqc_process_script())) (OK, error_list) = xssh.put_file(sftp_client, get_fastqc_process_script(), cluster_path) if OK: log.write('The file is uploaded.\n') else: for error in error_list: log.write('{0}\n'.format(error)) # set run permision to the FastQC process script in the cluster if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Setting on the run permision of {0}/{1} ...\n'.format( current_run_dir, os.path.basename(get_fastqc_process_script()))) command = 'chmod u+x {0}/{1}'.format( current_run_dir, os.path.basename(get_fastqc_process_script())) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: log.write('The run permision is set.\n') else: log.write('*** ERROR: Wrong command ---> {0}\n'.format(command)) # build the FastQC process starter if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Building the process starter {0} ...\n'.format( get_fastqc_process_starter())) (OK, error_list) = build_fastqc_process_starter(current_run_dir) if OK: log.write('The file is built.\n') if not OK: log.write('***ERROR: The file could not be built.\n') # upload the FastQC process starter to the cluster if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write( 'Uploading the process starter {0} to the directory {1} of the master ...\n' .format(get_fastqc_process_starter(), current_run_dir)) cluster_path = '{0}/{1}'.format( current_run_dir, os.path.basename(get_fastqc_process_starter())) (OK, error_list) = xssh.put_file(sftp_client, get_fastqc_process_starter(), cluster_path) if OK: log.write('The file is uploaded.\n') else: for error in error_list: log.write('{0}\n'.format(error)) # set run permision to the FastQC process starter in the cluster if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Setting on the run permision of {0}/{1} ...\n'.format( current_run_dir, os.path.basename(get_fastqc_process_starter()))) command = 'chmod u+x {0}/{1}'.format( current_run_dir, os.path.basename(get_fastqc_process_starter())) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: log.write('The run permision is set.\n') else: log.write('*** ERROR: Wrong command ---> {0}\n'.format(command)) # submit the FastQC process if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Submitting the process script {0}/{1} ...\n'.format( current_run_dir, os.path.basename(get_fastqc_process_starter()))) sge_env = xcluster.get_sge_env() command = '{0}; qsub -V -b n -cwd {1}/{2}'.format( sge_env, current_run_dir, os.path.basename(get_fastqc_process_starter())) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: for line in stdout: log.write('{0}\n'.format(line)) else: log.write('*** ERROR: Wrong command ---> {0}\n'.format(command)) # close the SSH transport connection if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Closing the SSH transport connection ...\n') xssh.close_ssh_transport_connection(ssh_transport) log.write('The connection is closed.\n') # close the SSH client connection if OK: log.write('{0}\n'.format(xlib.get_separator())) log.write('Closing the SSH client connection ...\n') xssh.close_ssh_client_connection(ssh_client) log.write('The connection is closed.\n') # warn that the log window can be closed if not isinstance(log, xlib.DevStdOut): log.write('{0}\n'.format(xlib.get_separator())) log.write('You can close this window now.\n') # execute final function if function is not None: function() # return the control variable return OK
def get_result_dataset_dict(cluster_name, experiment_id, status, passed_connection, ssh_client): ''' Get a dictionary with the result datasets of an experiment in the cluster. ''' # initialize the control variable and the error list OK = True error_list = [] # get the result directory in the cluster cluster_result_dir = xlib.get_cluster_result_dir() # initialize the dictionary of the result datasets result_dataset_dict = {} # create the SSH client connection if not passed_connection: (OK, error_list, ssh_client) = xssh.create_ssh_client_connection(cluster_name, 'master') # verify the result directory is created if OK: command = '[ -d {0} ] && echo RC=0 || echo RC=1'.format(cluster_result_dir) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if stdout[len(stdout) - 1] != 'RC=0': error_list.append('*** ERROR: There is not any volume mounted in the result directory.\n') error_list.append('You must link a volume in the mounting point {0} for the template {1}.\n'.format(cluster_result_dir, cluster_name)) OK = False # get the dictionary of the result datasets if OK: if status == 'uncompressed': command = 'cd {0}/{1}; for list in `ls`; do ls -ld $list | grep -v ^- > /dev/null && echo $list; done;'.format(cluster_result_dir, experiment_id) elif status == 'compressed': command = 'cd {0}/{1}; for list in `ls`; do ls -ld $list | grep -v ^d > /dev/null && echo $list; done;'.format(cluster_result_dir, experiment_id) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: if status == 'uncompressed': input_pattern = '{0}-(.+)-(.+)' output_pattern = '{0} ({1} {2})' elif status == 'compressed': input_pattern = '{0}-(.+)-(.+).tar.gz' output_pattern = '{0} ({1} {2}) [compressed]' for line in stdout: line = line.rstrip('\n') if line != 'lost+found': result_dataset_id = line if result_dataset_id.startswith(xlib.get_cd_hit_est_code()+'-'): mo = re.match(input_pattern.format(xlib.get_cd_hit_est_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_cd_hit_est_name(), date, time) elif result_dataset_id.startswith(xlib.get_fastqc_code()+'-'): mo = re.match(input_pattern.format(xlib.get_fastqc_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_fastqc_name(), date, time) elif result_dataset_id.startswith(xlib.get_gzip_code()+'-'): mo = re.match(input_pattern.format(xlib.get_gzip_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_gzip_name(), date, time) elif result_dataset_id.startswith(xlib.get_insilico_read_normalization_code()+'-'): mo = re.match(input_pattern.format(xlib.get_insilico_read_normalization_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_insilico_read_normalization_name(), date, time) elif result_dataset_id.startswith(xlib.get_quast_code()+'-'): mo = re.match(input_pattern.format(xlib.get_quast_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_quast_name(), date, time) elif result_dataset_id.startswith(xlib.get_ref_eval_code()+'-'): mo = re.match(input_pattern.format(xlib.get_ref_eval_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_ref_eval_name(), date, time) elif result_dataset_id.startswith(xlib.get_rnaquast_code()+'-'): mo = re.match(input_pattern.format(xlib.get_rnaquast_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_rnaquast_name(), date, time) elif result_dataset_id.startswith(xlib.get_rsem_eval_code()+'-'): mo = re.match(input_pattern.format(xlib.get_rsem_eval_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_rsem_eval_name(), date, time) elif result_dataset_id.startswith(xlib.get_soapdenovotrans_code()+'-'): mo = re.match(input_pattern.format(xlib.get_soapdenovotrans_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_soapdenovotrans_name(), date, time) elif result_dataset_id.startswith(xlib.get_star_code()+'-'): mo = re.match(input_pattern.format(xlib.get_star_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_star_name(), date, time) elif result_dataset_id.startswith(xlib.get_transabyss_code()+'-'): mo = re.match(input_pattern.format(xlib.get_transabyss_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_transabyss_name(), date, time) elif result_dataset_id.startswith(xlib.get_transcript_filter_code()+'-'): mo = re.match(input_pattern.format(xlib.get_transcript_filter_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_transcript_filter_name(), date, time) elif result_dataset_id.startswith(xlib.get_transcriptome_blastx_code()+'-'): mo = re.match(input_pattern.format(xlib.get_transcriptome_blastx_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_transcriptome_blastx_name(), date, time) elif result_dataset_id.startswith(xlib.get_transrate_code()+'-'): mo = re.match(input_pattern.format(xlib.get_transrate_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_transrate_name(), date, time) elif result_dataset_id.startswith(xlib.get_trimmomatic_code()+'-'): mo = re.match(input_pattern.format(xlib.get_trimmomatic_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_trimmomatic_name(), date, time) elif result_dataset_id.startswith(xlib.get_trinity_code()+'-'): mo = re.match(input_pattern.format(xlib.get_trinity_code()), result_dataset_id) date = mo.group(1) time = mo.group(2) result_dataset_name = output_pattern.format(xlib.get_trinity_name(), date, time) else: result_dataset_name = result_dataset_id result_dataset_dict[result_dataset_id] = {'result_dataset_id': result_dataset_id, 'result_dataset_name': result_dataset_name} # close the SSH client connection if OK and not passed_connection: xssh.close_ssh_client_connection(ssh_client) # return the control variable, error list and dictionary of the result datasets return (OK, error_list, result_dataset_dict)
def form_list_cluster_experiment_processes(): ''' List the processes of an experiment in the cluster. ''' # initialize the control variable OK = True # print the header clib.clear_screen() clib.print_headers_with_environment('Logs - List experiment processes in the cluster') # get the cluster name print(xlib.get_separator()) if xec2.get_running_cluster_list(volume_creator_included=False) != []: cluster_name = cinputs.input_cluster_name(volume_creator_included=False, help=True) else: print('WARNING: There is not any running cluster.') OK = False # create the SSH client connection if OK: (OK, error_list, ssh_client) = xssh.create_ssh_client_connection(cluster_name, 'master') for error in error_list: log.write('{0}\n'.format(error)) # get experiment identification if OK: experiment_id = cinputs.input_experiment_id(ssh_client, help=True) if experiment_id == '': print('WARNING: The cluster {0} has not experiment data.'.format(cluster_name)) OK = False # get the result dataset list of the experiment if OK: command = 'cd {0}/{1}; for list in `ls`; do ls -ld $list | grep -v ^- > /dev/null && echo $list; done;'.format(xlib.get_cluster_result_dir(), experiment_id) (OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command) if OK: result_dataset_id_list = [] for line in stdout: line = line.rstrip('\n') if line != 'lost+found': result_dataset_id_list.append(line) # print the result dataset identification list of the experiment if OK: print(xlib.get_separator()) if result_dataset_id_list == []: print('*** WARNING: There is not any result dataset of the experiment {0}.'.format(experiment_id)) else: result_dataset_id_list.sort() # set data width result_dataset_width = 25 bioinfo_app_width = 25 # set line template line_template = '{0:' + str(result_dataset_width) + '} {1:' + str(bioinfo_app_width) + '}' # print header print(line_template.format('Result dataset', 'Bioinfo app / Utility')) print(line_template.format('=' * result_dataset_width, '=' * bioinfo_app_width)) # print detail lines for result_dataset_id in result_dataset_id_list: if result_dataset_id.startswith(xlib.get_bedtools_code()+'-'): bioinfo_app_name = xlib.get_bedtools_name() elif result_dataset_id.startswith(xlib.get_blastplus_code()+'-'): bioinfo_app_name = xlib.get_blastplus_name() elif result_dataset_id.startswith(xlib.get_bowtie2_code()+'-'): bioinfo_app_name = xlib.get_bowtie2_name() elif result_dataset_id.startswith(xlib.get_busco_code()+'-'): bioinfo_app_name = xlib.get_busco_name() elif result_dataset_id.startswith(xlib.get_cd_hit_code()+'-'): bioinfo_app_name = xlib.get_cd_hit_est_name() elif result_dataset_id.startswith(xlib.get_cd_hit_code()+'-'): bioinfo_app_name = xlib.get_cd_hit_est_name() elif result_dataset_id.startswith(xlib.get_detonate_code()+'-'): bioinfo_app_name = xlib.get_detonate_name() elif result_dataset_id.startswith(xlib.get_emboss_code()+'-'): bioinfo_app_name = xlib.get_emboss_name() elif result_dataset_id.startswith(xlib.get_fastqc_code()+'-'): bioinfo_app_name = xlib.get_fastqc_name() elif result_dataset_id.startswith(xlib.get_gmap_code()+'-'): bioinfo_app_name = xlib.get_gmap_name() elif result_dataset_id.startswith(xlib.get_gmap_gsnap_code()+'-'): bioinfo_app_name = xlib.get_gmap_gsnap_name() elif result_dataset_id.startswith(xlib.get_gzip_code()+'-'): bioinfo_app_name = xlib.get_gzip_name() elif result_dataset_id.startswith(xlib.get_insilico_read_normalization_code()+'-'): bioinfo_app_name = xlib.get_insilico_read_normalization_name() elif result_dataset_id.startswith(xlib.get_miniconda3_code()+'-'): bioinfo_app_name = xlib.get_miniconda3_name() elif result_dataset_id.startswith(xlib.get_ngshelper_code()+'-'): bioinfo_app_name = xlib.get_ngshelper_name() elif result_dataset_id.startswith(xlib.get_quast_code()+'-'): bioinfo_app_name = xlib.get_quast_name() elif result_dataset_id.startswith(xlib.get_r_code()+'-'): bioinfo_app_name = xlib.get_r_name() elif result_dataset_id.startswith(xlib.get_ref_eval_code()+'-'): bioinfo_app_name = xlib.get_ref_eval_name() elif result_dataset_id.startswith(xlib.get_rnaquast_code()+'-'): bioinfo_app_name = xlib.get_rnaquast_name() elif result_dataset_id.startswith(xlib.get_rsem_code()+'-'): bioinfo_app_name = xlib.get_rsem_name() elif result_dataset_id.startswith(xlib.get_rsem_eval_code()+'-'): bioinfo_app_name = xlib.get_rsem_eval_name() elif result_dataset_id.startswith(xlib.get_samtools_code()+'-'): bioinfo_app_name = xlib.get_samtools_name() elif result_dataset_id.startswith(xlib.get_soapdenovotrans_code()+'-'): bioinfo_app_name = xlib.get_soapdenovotrans_name() elif result_dataset_id.startswith(xlib.get_star_code()+'-'): bioinfo_app_name = xlib.get_star_name() elif result_dataset_id.startswith(xlib.get_transabyss_code()+'-'): bioinfo_app_name = xlib.get_transabyss_name() elif result_dataset_id.startswith(xlib.get_transcript_filter_code()+'-'): bioinfo_app_name = xlib.get_transcript_filter_name() elif result_dataset_id.startswith(xlib.get_transcriptome_blastx_code()+'-'): bioinfo_app_name = xlib.get_transcriptome_blastx_name() elif result_dataset_id.startswith(xlib.get_transrate_code()+'-'): bioinfo_app_name = xlib.get_transrate_name() elif result_dataset_id.startswith(xlib.get_trimmomatic_code()+'-'): bioinfo_app_name = xlib.get_trimmomatic_name() elif result_dataset_id.startswith(xlib.get_trinity_code()+'-'): bioinfo_app_name = xlib.get_trinity_name() else: bioinfo_app_name = 'xxx' print(line_template.format(result_dataset_id, bioinfo_app_name)) # close the SSH client connection if OK: xssh.close_ssh_client_connection(ssh_client) # show continuation message print(xlib.get_separator()) input('Press [Intro] to continue ...')
def execute(self, event=None): ''' Execute the list the result logs in the cluster. ''' # validate inputs OK = self.validate_inputs() if not OK: message = 'Some input values are not OK.' tkinter.messagebox.showerror('{0} - {1}'.format(xlib.get_project_name(), self.head), message) # get the run dictionary of the experiment if OK: # -- command = 'ls {0}/{1}'.format(xlib.get_cluster_result_dir(), self.wrapper_experiment_id.get()) command = 'cd {0}/{1}; for list in `ls`; do ls -ld $list | grep -v ^- > /dev/null && echo $list; done;'.format(xlib.get_cluster_result_dir(), self.wrapper_experiment_id.get()) (OK, stdout, stderr) = xssh.execute_cluster_command(self.ssh_client, command) if OK: result_dataset_dict = {} for line in stdout: line = line.rstrip('\n') if line != 'lost+found': result_dataset_id = line try: pattern = r'^(.+)\-(.+)\-(.+)$' mo = re.search(pattern, result_dataset_id) bioinfo_app_code = mo.group(1).strip() yymmdd = mo.group(2) hhmmss = mo.group(3) date = '20{0}-{1}-{2}'.format(yymmdd[:2], yymmdd[2:4], yymmdd[4:]) time = '{0}:{1}:{2}'.format(hhmmss[:2], hhmmss[2:4], hhmmss[4:]) except: bioinfo_app_code = 'xxx' date = '0000-00-00' time = '00:00:00' if result_dataset_id.startswith(xlib.get_bedtools_code()+'-'): bioinfo_app_name = xlib.get_bedtools_name() elif result_dataset_id.startswith(xlib.get_blastplus_code()+'-'): bioinfo_app_name = xlib.get_blastplus_name() elif result_dataset_id.startswith(xlib.get_bowtie2_code()+'-'): bioinfo_app_name = xlib.get_bowtie2_name() elif result_dataset_id.startswith(xlib.get_busco_code()+'-'): bioinfo_app_name = xlib.get_busco_name() elif result_dataset_id.startswith(xlib.get_cd_hit_code()+'-'): bioinfo_app_name = xlib.get_cd_hit_name() elif result_dataset_id.startswith(xlib.get_cd_hit_est_code()+'-'): bioinfo_app_name = xlib.get_cd_hit_est_name() elif result_dataset_id.startswith(xlib.get_detonate_code()+'-'): bioinfo_app_name = xlib.get_detonate_name() elif result_dataset_id.startswith(xlib.get_emboss_code()+'-'): bioinfo_app_name = xlib.get_emboss_name() elif result_dataset_id.startswith(xlib.get_fastqc_code()+'-'): bioinfo_app_name = xlib.get_fastqc_name() elif result_dataset_id.startswith(xlib.get_gmap_code()+'-'): bioinfo_app_name = xlib.get_gmap_name() elif result_dataset_id.startswith(xlib.get_gmap_gsnap_code()+'-'): bioinfo_app_name = xlib.get_gmap_gsnap_name() elif result_dataset_id.startswith(xlib.get_gzip_code()+'-'): bioinfo_app_name = xlib.get_gzip_name() elif result_dataset_id.startswith(xlib.get_insilico_read_normalization_code()+'-'): bioinfo_app_name = xlib.get_insilico_read_normalization_name() elif result_dataset_id.startswith(xlib.get_miniconda3_code()+'-'): bioinfo_app_name = xlib.get_miniconda3_name() elif result_dataset_id.startswith(xlib.get_ngshelper_code()+'-'): bioinfo_app_name = xlib.get_ngshelper_name() elif result_dataset_id.startswith(xlib.get_quast_code()+'-'): bioinfo_app_name = xlib.get_quast_name() elif result_dataset_id.startswith(xlib.get_r_code()+'-'): bioinfo_app_name = xlib.get_r_name() elif result_dataset_id.startswith(xlib.get_ref_eval_code()+'-'): bioinfo_app_name = xlib.get_ref_eval_name() elif result_dataset_id.startswith(xlib.get_rnaquast_code()+'-'): bioinfo_app_name = xlib.get_rnaquast_name() elif result_dataset_id.startswith(xlib.get_rsem_code()+'-'): bioinfo_app_name = xlib.get_rsem_name() elif result_dataset_id.startswith(xlib.get_rsem_eval_code()+'-'): bioinfo_app_name = xlib.get_rsem_eval_name() elif result_dataset_id.startswith(xlib.get_samtools_code()+'-'): bioinfo_app_name = xlib.get_samtools_name() elif result_dataset_id.startswith(xlib.get_soapdenovotrans_code()+'-'): bioinfo_app_name = xlib.get_soapdenovotrans_name() elif result_dataset_id.startswith(xlib.get_star_code()+'-'): bioinfo_app_name = xlib.get_star_name() elif result_dataset_id.startswith(xlib.get_transabyss_code()+'-'): bioinfo_app_name = xlib.get_transabyss_name() elif result_dataset_id.startswith(xlib.get_transcript_filter_code()+'-'): bioinfo_app_name = xlib.get_transcript_filter_name() elif result_dataset_id.startswith(xlib.get_transcriptome_blastx_code()+'-'): bioinfo_app_name = xlib.get_transcriptome_blastx_name() elif result_dataset_id.startswith(xlib.get_transrate_code()+'-'): bioinfo_app_name = xlib.get_transrate_name() elif result_dataset_id.startswith(xlib.get_trimmomatic_code()+'-'): bioinfo_app_name = xlib.get_trimmomatic_name() elif result_dataset_id.startswith(xlib.get_trinity_code()+'-'): bioinfo_app_name = xlib.get_trinity_name() else: bioinfo_app_name = 'xxx' result_dataset_dict[result_dataset_id] = {'experiment_id': self.wrapper_experiment_id.get(), 'result_dataset_id': result_dataset_id, 'bioinfo_app': bioinfo_app_name, 'date': date, 'time': time} # verify if there are any nodes running if OK: if result_dataset_dict == {}: message = 'There is not any run.' tkinter.messagebox.showwarning('{0} - {1}'.format(xlib.get_project_name(), self.head), message) # build the data list if OK: data_list = ['experiment_id', 'result_dataset_id', 'bioinfo_app', 'date', 'time'] # build the data dictionary if OK: data_dict = {} data_dict['experiment_id']= {'text': 'Experiment id. / Process', 'width': 200, 'aligment': 'left'} data_dict['result_dataset_id'] = {'text': 'Result dataset', 'width': 200, 'aligment': 'left'} data_dict['bioinfo_app'] = {'text': 'Bioinfo app / Utility', 'width': 200, 'aligment': 'left'} data_dict['date'] = {'text': 'Date', 'width': 80, 'aligment': 'right'} data_dict['time'] = {'text': 'Time', 'width': 80, 'aligment': 'right'} # create the dialog Table to show the nodes running if OK: dialog_table = gdialogs.DialogTable(self, 'Experiment runs in {0}/{1}'.format(xlib.get_cluster_result_dir(), self.wrapper_experiment_id.get()), 400, 900, data_list, data_dict, result_dataset_dict, 'view_result_logs', [self.wrapper_cluster_name.get()]) self.wait_window(dialog_table) # close the form if OK: self.close()
def form_list_cluster_experiment_processes(): ''' List the processes of an experiment in the cluster. ''' # initialize the control variable OK = True # print the header clib.clear_screen() clib.print_headers_with_environment( 'Logs - List experiment processes in the cluster') # get the cluster name print(xlib.get_separator()) if xec2.get_running_cluster_list(only_environment_cluster=True, volume_creator_included=False) != []: cluster_name = cinputs.input_cluster_name( volume_creator_included=False, help=True) else: print('WARNING: There is not any running cluster.') OK = False # create the SSH client connection if OK: (OK, error_list, ssh_client) = xssh.create_ssh_client_connection(cluster_name) for error in error_list: print(error) # get experiment identification if OK: experiment_id = cinputs.input_experiment_id(ssh_client, help=True) if experiment_id == '': print( f'WARNING: The cluster {cluster_name} does not have experiment data.' ) OK = False # get the result dataset list of the experiment if OK: command = f'cd {xlib.get_cluster_result_dir()}/{experiment_id}; for list in `ls`; do ls -ld $list | grep -v ^- > /dev/null && echo $list; done;' (OK, stdout, _) = xssh.execute_cluster_command(ssh_client, command) if OK: result_dataset_id_list = [] for line in stdout: line = line.rstrip('\n') if line != 'lost+found': result_dataset_id_list.append(line) # print the result dataset identification list of the experiment if OK: print(xlib.get_separator()) if result_dataset_id_list == []: print( f'*** WARNING: There is not any result dataset of the experiment {experiment_id}.' ) else: result_dataset_id_list.sort() # set data width result_dataset_width = 30 bioinfo_app_width = 25 # set line line = '{0:' + str(result_dataset_width) + '} {1:' + str( bioinfo_app_width) + '}' # print header print(line.format('Result dataset', 'Bioinfo app / Utility')) print( line.format('=' * result_dataset_width, '=' * bioinfo_app_width)) # print detail lines for result_dataset_id in result_dataset_id_list: if result_dataset_id.startswith(xlib.get_bedtools_code() + '-'): bioinfo_app_name = xlib.get_bedtools_name() elif result_dataset_id.startswith(xlib.get_blastplus_code() + '-'): bioinfo_app_name = xlib.get_blastplus_name() elif result_dataset_id.startswith(xlib.get_bcftools_code() + '-'): bioinfo_app_name = xlib.get_bcftools_name() elif result_dataset_id.startswith(xlib.get_bowtie2_code() + '-'): bioinfo_app_name = xlib.get_bowtie2_name() elif result_dataset_id.startswith(xlib.get_busco_code() + '-'): bioinfo_app_name = xlib.get_busco_name() elif result_dataset_id.startswith(xlib.get_cd_hit_code() + '-'): bioinfo_app_name = xlib.get_cd_hit_name() elif result_dataset_id.startswith(xlib.get_cd_hit_est_code() + '-'): bioinfo_app_name = xlib.get_cd_hit_est_name() elif result_dataset_id.startswith(xlib.get_cuffdiff_code() + '-'): bioinfo_app_name = xlib.get_cuffdiff_name() elif result_dataset_id.startswith(xlib.get_cufflinks_code() + '-'): bioinfo_app_name = xlib.get_cufflinks_name() elif result_dataset_id.startswith( xlib.get_cufflinks_cuffmerge_code() + '-'): bioinfo_app_name = xlib.get_cufflinks_cuffmerge_name() elif result_dataset_id.startswith(xlib.get_cuffnorm_code() + '-'): bioinfo_app_name = xlib.get_cuffnorm_name() elif result_dataset_id.startswith(xlib.get_cuffquant_code() + '-'): bioinfo_app_name = xlib.get_cuffquant_name() elif result_dataset_id.startswith(xlib.get_cutadapt_code() + '-'): bioinfo_app_name = xlib.get_cutadapt_name() elif result_dataset_id.startswith( xlib.get_ddradseq_simulation_code() + '-'): bioinfo_app_name = xlib.get_ddradseq_simulation_name() elif result_dataset_id.startswith( xlib.get_ddradseqtools_code() + '-'): bioinfo_app_name = xlib.get_ddradseqtools_name() elif result_dataset_id.startswith(xlib.get_detonate_code() + '-'): bioinfo_app_name = xlib.get_detonate_name() elif result_dataset_id.startswith(xlib.get_diamond_code() + '-'): bioinfo_app_name = xlib.get_diamond_name() elif result_dataset_id.startswith(xlib.get_emboss_code() + '-'): bioinfo_app_name = xlib.get_emboss_name() elif result_dataset_id.startswith( xlib.get_entrez_direct_code() + '-'): bioinfo_app_name = xlib.get_entrez_direct_name() elif result_dataset_id.startswith(xlib.get_express_code() + '-'): bioinfo_app_name = xlib.get_express_name() elif result_dataset_id.startswith(xlib.get_fastqc_code() + '-'): bioinfo_app_name = xlib.get_fastqc_name() elif result_dataset_id.startswith(xlib.get_ggtrinity_code() + '-'): bioinfo_app_name = xlib.get_ggtrinity_name() elif result_dataset_id.startswith(xlib.get_gmap_gsnap_code() + '-'): bioinfo_app_name = xlib.get_gmap_gsnap_name() elif result_dataset_id.startswith(xlib.get_gmap_code() + '-'): bioinfo_app_name = xlib.get_gmap_name() elif result_dataset_id.startswith(xlib.get_gsnap_code() + '-'): bioinfo_app_name = xlib.get_gsnap_name() elif result_dataset_id.startswith(xlib.get_gzip_code() + '-'): bioinfo_app_name = xlib.get_gzip_name() elif result_dataset_id.startswith(xlib.get_hisat2_code() + '-'): bioinfo_app_name = xlib.get_hisat2_name() elif result_dataset_id.startswith(xlib.get_htseq_code() + '-'): bioinfo_app_name = xlib.get_htseq_name() elif result_dataset_id.startswith(xlib.get_htseq_count_code() + '-'): bioinfo_app_name = xlib.get_htseq_count_name() elif result_dataset_id.startswith( xlib.get_insilico_read_normalization_code() + '-'): bioinfo_app_name = xlib.get_insilico_read_normalization_name( ) elif result_dataset_id.startswith(xlib.get_ipyrad_code() + '-'): bioinfo_app_name = xlib.get_ipyrad_name() elif result_dataset_id.startswith(xlib.get_kallisto_code() + '-'): bioinfo_app_name = xlib.get_kallisto_name() elif result_dataset_id.startswith(xlib.get_miniconda3_code() + '-'): bioinfo_app_name = xlib.get_miniconda3_name() elif result_dataset_id.startswith(xlib.get_ngshelper_code() + '-'): bioinfo_app_name = xlib.get_ngshelper_name() elif result_dataset_id.startswith(xlib.get_quast_code() + '-'): bioinfo_app_name = xlib.get_quast_name() elif result_dataset_id.startswith(xlib.get_r_code() + '-'): bioinfo_app_name = xlib.get_r_name() elif result_dataset_id.startswith(xlib.get_raddesigner_code() + '-'): bioinfo_app_name = xlib.get_raddesigner_name() elif result_dataset_id.startswith(xlib.get_ref_eval_code() + '-'): bioinfo_app_name = xlib.get_ref_eval_name() elif result_dataset_id.startswith(xlib.get_rnaquast_code() + '-'): bioinfo_app_name = xlib.get_rnaquast_name() elif result_dataset_id.startswith(xlib.get_rsem_code() + '-'): bioinfo_app_name = xlib.get_rsem_name() elif result_dataset_id.startswith(xlib.get_rsem_eval_code() + '-'): bioinfo_app_name = xlib.get_rsem_eval_name() elif result_dataset_id.startswith(xlib.get_rsitesearch_code() + '-'): bioinfo_app_name = xlib.get_rsitesearch_name() elif result_dataset_id.startswith(xlib.get_samtools_code() + '-'): bioinfo_app_name = xlib.get_samtools_name() elif result_dataset_id.startswith(xlib.get_soapdenovo2_code() + '-'): bioinfo_app_name = xlib.get_soapdenovo2_name() elif result_dataset_id.startswith( xlib.get_soapdenovotrans_code() + '-'): bioinfo_app_name = xlib.get_soapdenovotrans_name() elif result_dataset_id.startswith(xlib.get_star_code() + '-'): bioinfo_app_name = xlib.get_star_name() elif result_dataset_id.startswith(xlib.get_starcode_code() + '-'): bioinfo_app_name = xlib.get_starcode_name() elif result_dataset_id.startswith(xlib.get_toa_code() + '-'): bioinfo_app_name = xlib.get_toa_name() elif result_dataset_id.startswith( xlib.get_toa_process_download_basic_data_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_basic_data_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_dicots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_dicots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_gene_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_gene_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_go_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_go_name() elif result_dataset_id.startswith( xlib.get_toa_process_download_gymno_01_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_gymno_01_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_interpro_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_interpro_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_monocots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_monocots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_download_taxonomy_code() + '-'): bioinfo_app_name = xlib.get_toa_process_download_taxonomy_name( ) elif result_dataset_id.startswith( xlib. get_toa_process_gilist_viridiplantae_nucleotide_gi_code( ) + '-'): bioinfo_app_name = xlib.get_toa_process_gilist_viridiplantae_nucleotide_gi_name( ) elif result_dataset_id.startswith( xlib. get_toa_process_gilist_viridiplantae_protein_gi_code() + '-'): bioinfo_app_name = xlib.get_toa_process_gilist_viridiplantae_protein_gi_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_load_basic_data_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_basic_data_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_load_dicots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_dicots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_load_gene_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_gene_name() elif result_dataset_id.startswith( xlib.get_toa_process_load_go_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_go_name() elif result_dataset_id.startswith( xlib.get_toa_process_load_gymno_01_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_gymno_01_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_load_interpro_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_interpro_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_load_monocots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_load_monocots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_merge_annotations_code() + '-'): bioinfo_app_name = xlib.get_toa_process_merge_annotations_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_nr_blastplus_db_code() + '-'): bioinfo_app_name = xlib.get_toa_process_nr_blastplus_db_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_nr_diamond_db_code() + '-'): bioinfo_app_name = xlib.get_toa_process_nr_diamond_db_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_nt_blastplus_db_code() + '-'): bioinfo_app_name = xlib.get_toa_process_nt_blastplus_db_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_pipeline_aminoacid_code() + '-'): bioinfo_app_name = xlib.get_toa_process_pipeline_aminoacid_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_pipeline_nucleotide_code() + '-'): bioinfo_app_name = xlib.get_toa_process_pipeline_nucleotide_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_proteome_dicots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_proteome_dicots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_proteome_gymno_01_code() + '-'): bioinfo_app_name = xlib.get_toa_process_proteome_gymno_01_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_proteome_monocots_04_code() + '-'): bioinfo_app_name = xlib.get_toa_process_proteome_monocots_04_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_proteome_refseq_plant_code() + '-'): bioinfo_app_name = xlib.get_toa_process_proteome_refseq_plant_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_rebuild_toa_database_code() + '-'): bioinfo_app_name = xlib.get_get_toa_process_rebuild_toa_database_name( ) elif result_dataset_id.startswith( xlib.get_toa_process_recreate_toa_database_code() + '-'): bioinfo_app_name = xlib.get_get_toa_process_recreate_toa_database_name( ) elif result_dataset_id.startswith(xlib.get_tophat_code() + '-'): bioinfo_app_name = xlib.get_tophat_name() elif result_dataset_id.startswith(xlib.get_transabyss_code() + '-'): bioinfo_app_name = xlib.get_transabyss_name() elif result_dataset_id.startswith( xlib.get_transcript_filter_code() + '-'): bioinfo_app_name = xlib.get_transcript_filter_name() elif result_dataset_id.startswith( xlib.get_transcriptome_blastx_code() + '-'): bioinfo_app_name = xlib.get_transcriptome_blastx_name() elif result_dataset_id.startswith( xlib.get_transdecoder_code() + '-'): bioinfo_app_name = xlib.get_transdecoder_name() elif result_dataset_id.startswith(xlib.get_transrate_code() + '-'): bioinfo_app_name = xlib.get_transrate_name() elif result_dataset_id.startswith(xlib.get_trimmomatic_code() + '-'): bioinfo_app_name = xlib.get_trimmomatic_name() elif result_dataset_id.startswith(xlib.get_trinity_code() + '-'): bioinfo_app_name = xlib.get_trinity_name() elif result_dataset_id.startswith( xlib.get_variant_calling_code() + '-'): bioinfo_app_name = xlib.get_variant_calling_name() elif result_dataset_id.startswith(xlib.get_vcftools_code() + '-'): bioinfo_app_name = xlib.get_vcftools_name() elif result_dataset_id.startswith( xlib.get_vcftools_perl_libraries_code() + '-'): bioinfo_app_name = xlib.get_vcftools_perl_libraries_name() elif result_dataset_id.startswith(xlib.get_vsearch_code() + '-'): bioinfo_app_name = xlib.get_vsearch_name() else: bioinfo_app_name = 'xxx' print(line.format(result_dataset_id, bioinfo_app_name)) # close the SSH client connection if OK: xssh.close_ssh_client_connection(ssh_client) # show continuation message print(xlib.get_separator()) input('Press [Intro] to continue ...')