def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Calibrate page self.calibrate = Calibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.calibrate.get_widget(), "Calibrate") # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.preflight.get_widget(), "Preflight") # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.launch.get_widget(), "Launch") # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.circle.get_widget(), "Circle") # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.chirp.get_widget(), "Chirp") # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.land.get_widget(), "Land")
def launches(update, context): pass j = requests.get('https://fdo.rocketlaunch.live/json/launches/next/5') n_results = 5 api_json = json.loads(j.text)['result'] txts = [] livestream = "" for i in range(0, n_results): padlocation = str( api_json[i]['pad']['location']['statename']) + ', ' + str( api_json[i]['pad']['location']['country']) print(str(api_json[i]['pad']['location']['statename'])) if str(api_json[i]['pad']['location']['statename']) == "None": padlocation = str(api_json[i]['pad']['location']['country']) print(padlocation) launch_date = str(api_json[i]['win_open']) if launch_date == "None": if str(api_json[i]['est_date']['month']) != "None": day = '0' month = '0' if int(api_json[i]['est_date']['month']) < 10: month = month + str(api_json[i]['est_date']['month']) else: month = str(api_json[i]['est_date']['month']) if str(api_json[i]['est_date']['day']) != "None": if int(api_json[i]['est_date']['day']) < 10: day = day + str(api_json[i]['est_date']['day']) else: day = str(api_json[i]['est_date']['day']) else: day = "??" launch_date = month + '-' + day + '-' + str( api_json[i]['est_date']['year']) l = Launch(str(api_json[i]['name']), str(api_json[i]['provider']['name']), str(api_json[i]['vehicle']['name']), str(api_json[i]['pad']['location']['name']), padlocation, 'nodescforlist', launch_date, livestream) txts.append(rocket + ' /' + str(i+1) + ' - <i>' + str(api_json[i]['name']) + '</i> ' + rocket \ + '\n' + l.getFormattedText()) endtext = "" for i in range(0, len(txts)): endtext = endtext + txts[i] context.bot.send_message(chat_id=update.effective_chat.id, text=endtext, disable_web_page_preview=True, parse_mode=ParseMode.HTML)
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) ap.add_argument('-rl', '--read_length', help='The length of reads.', type=int, choices=['32', '36', '40', '50', '58', '72', '76', '100'], default='100', required=False) ap.add_argument('--umi', help='Treat fastqs as having UMI flags embedded.', action='store_true', required=False) ap.add_argument('--no_hotspot', help='Stop before calling hotspots (default: include all steps).', action='store_true', required=False) # NOTE: Could override get_args() to have this non-generic control message #ap.add_argument('-c', '--control', # help='The control bam for peak calling.', # required=False) return ap.parse_args()
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' #args.pe = True # This is necessary to ensure templating does what it must. psv = Launch.pipeline_specific_vars(self, args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv[ 'annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome'] + " has no " + psv[ 'annotation'] + " annotation." sys.exit(1) # Some specific settings psv['assay_type'] = "rampage" if self.exp["assay_term_name"] == "CAGE": psv['assay_type'] = "cage" psv['nthreads'] = 8 if not self.template: psv['control'] = args.control if psv['paired_end'] and psv['assay_type'] == "cage": print "ERROR: CAGE is always expected to be single-end but mapping says otherwise." sys.exit(1) elif not psv['paired_end'] and psv['assay_type'] == "rampage": print "Rampage is always expected to be paired-end but mapping says otherwise." sys.exit(1) if not psv["stranded"]: print "Detected unstranded library" # run will either be for combined or single rep. if not self.combined_reps: run = psv['reps'][ 'a'] # If not combined then run will be for the first (only) replicate else: run = psv # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] if self.exp["assay_term_name"] == "CAGE": psv['name'] = psv['assay_type'] + psv['name'][4:] psv['title'] = "CAGE" + psv['title'][7:] # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Some specific settings psv['nthreads'] = 8 psv['map_thresh'] = 10 psv['sample_size'] = 15000000 psv['read_length'] = args.read_length psv['pe_or_se'] = "pe" for ltr in sorted(psv['reps'].keys()): rep = psv['reps'][ltr] if not rep['paired_end']: psv['pe_or_se'] = "se" if rep['paired_end'] and 'barcode' in rep and rep[ 'barcode'] == "undetected": del rep['barcode'] if args.umi: psv['umi'] = "yes" psv['upper_limit'] = 0 # Crawford fastqs require trimming psv["trim_len"] = 0 if not self.template and not psv['paired_end'] and "crawford" in psv[ 'lab']: print "Detected that fastqs will be trimmed to 20" psv["trim_len"] = 20 self.multi_rep = True # For DNase, a single tech_rep moves on to merge/filter. self.combined_reps = True if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Recalibrate page self.recalibrate = Recalibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.recalibrate.get_widget(), "Recalibrate" ) # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.preflight.get_widget(), "Preflight" ) # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.launch.get_widget(), "Launch" ) # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.circle.get_widget(), "Circle" ) # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.chirp.get_widget(), "Chirp" ) # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.land.get_widget(), "Land" )
def __init__(self, root): self.root = root self.manifest = PackageXML(self.root + '/package.xml') self.name = self.manifest.name self.cmake = parse_file(self.root + '/CMakeLists.txt') package_structure = get_package_structure(root) self.source_code = SourceCode(package_structure['source'], self.name) self.source_code.setup_tags(self.cmake) self.launches = [] self.plugin_configs = [] for rel_fn, file_path in package_structure['launch'].iteritems(): self.launches.append(Launch(rel_fn, file_path)) for rel_fn, file_path in package_structure['plugin_config'].iteritems( ): self.plugin_configs.append(PluginXML(rel_fn, file_path)) self.setup_py = None if 'setup.py' in package_structure['key']: self.setup_py = SetupPy(self.name, package_structure['key']['setup.py']) self.generators = collections.defaultdict(list) for rel_fn, path in package_structure['generators'].iteritems(): gen = ROSGenerator(rel_fn, path) self.generators[gen.type].append(gen) self.dynamic_reconfigs = package_structure['cfg'].keys() self.misc_files = package_structure[None].keys()
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Some specific settings psv['nthreads'] = 8 psv['map_thresh'] = 10 psv['sample_size'] = 15000000 psv['read_length'] = args.read_length psv['pe_or_se'] = "pe" for ltr in sorted( psv['reps'].keys() ): rep = psv['reps'][ltr] if not rep['paired_end']: psv['pe_or_se'] = "se" if rep['paired_end'] and 'barcode' in rep and rep['barcode'] == "undetected": del rep['barcode'] if args.umi: psv['umi'] = "yes" psv['upper_limit'] = 0 # Crawford fastqs require trimming psv["trim_len"] = 0 if not self.template and not psv['paired_end'] and "crawford" in psv['lab']: print "Detected that fastqs will be trimmed to 20" psv["trim_len"] = 20 self.multi_rep = True # For DNase, a single tech_rep moves on to merge/filter. self.combined_reps = True if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['rnd_seed'] = 12345 # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] self.no_tophat = args.no_tophat if not self.no_tophat: self.PRUNE_STEPS = [] # Must override results location because of annotation psv['resultsLoc'] = dxencode.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] + psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def test20_execute_feedback(self): self.ctl = Control(db='sqlite.db') self.ctl.initialize() self.ctl.reserve() self.lnc = Launch(db='sqlite.db', feedpath='/tmp', taskid=1, info1='INFO1', info2='INFO2', info3='INFO3', timeout=10, debug=True) command = "tests/testprog.py --scenario progressing --feedback /tmp/feedback_1.log --delay 0.002 --debug" result = self.lnc.execute(command=command) print("test20 result={}".format(result)) self.assertTrue(result)
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['rnd_seed'] = 12345 # If paired-end then read_strand might vary TruSeq or ScriptSeq, but only for quant-rsem psv["read_strand"] = "unstranded" # SE experiments are all unstranded if psv["paired_end"]: psv["read_strand"] = "reverse" # Usual ENCODE LRNA experiments are rd1-/rd2+ (AKA reverse) if not psv["stranded"]: psv["read_strand"] = "unstranded" # "ScriptSeq" experiments are rd1+/rd2- (AKA forward) print "Detected unstranded library" elif psv.get('ScriptSeq', False): # file.replicate.library.document contains "/documents/F17c31e10-1542-42c6-8b4c-3afff95564cf%2F" psv["read_strand"] = "ScriptSeq" # "ScriptSeq" experiments are rd1+/rd2- (AKA forward) print "Detected ScriptSeq" else: # SE if psv["stranded"]: psv["read_strand"] = psv.get("strand_direction", "unstranded") else: psv["read_strand"] = "unstranded" if psv["stranded"]: print "Strand orientation is '%s'" % (psv["read_strand"]) # print "Detected special cases" # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] self.no_tophat = True if args.tophat_also: self.no_tophat = False self.PRUNE_STEPS = [] # This blocks pruning... keeping tophat # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder, self.FOLDER_DEFAULT, self.proj_name, psv['exp_type'], psv['genome'], psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Now add pipline specific variables and tests # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv[ 'annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome'] + " has no " + psv[ 'annotation'] + " annotation." sys.exit(1) # Paired ends? if psv['paired_end']: print "Small-RNA is always expected to be single-end but mapping says otherwise." #print json.dumps(psv,indent=4,sort_keys=True) sys.exit(1) # Some specific settings psv['nthreads'] = 8 # By replicate: for ltr in psv['reps'].keys(): if len(ltr) != 1: # only simple reps continue rep = psv['reps'][ltr] rep["clipping_model"] = "ENCODE3" # Default if "a_tailing" in rep: rep["clipping_model"] = "A_Tailing_" + rep["a_tailing"] print "%s detected for %s" % (rep["clipping_model"], rep["rep_tech"]) # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] # Must override results location because of annotation genome = psv['genome'] if self.no_refs: # (no_refs is only True when templating) genome = None # If templating with no refs then this will hide genome and annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) # NOTE: Could override get_args() to have this non-generic control message #ap.add_argument('-c', '--control', # help='The control bam for peak calling.', # required=False) return ap.parse_args()
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) ap.add_argument('-a', '--annotation', help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, 'M2','M3','M4'], default=self.ANNO_DEFAULT, required=False) return ap.parse_args()
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' #args.pe = True # This is necessary to ensure templating does what it must. psv = Launch.pipeline_specific_vars(self,args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Some specific settings psv['assay_type'] = "rampage" if self.exp["assay_term_name"] == "CAGE": psv['assay_type'] = "cage" psv['nthreads'] = 8 if not self.template: psv['control'] = args.control if psv['paired_end'] and psv['assay_type'] == "cage": print "ERROR: CAGE is always expected to be single-end but mapping says otherwise." sys.exit(1) elif not psv['paired_end'] and psv['assay_type'] == "rampage": print "Rampage is always expected to be paired-end but mapping says otherwise." sys.exit(1) # run will either be for combined or single rep. if not self.combined_reps: run = psv['reps']['a'] # If not combined then run will be for the first (only) replicate else: run = psv # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] if self.exp["assay_term_name"] == "CAGE": psv['name'] = psv['assay_type'] + psv['name'][4:] psv['title'] = "CAGE" + psv['title'][7:] # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) if not psv['paired_end']: print "Rampage is always expected to be paired-end but mapping says otherwise." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['control'] = args.control # run will either be for combined or single rep. if not psv['combined']: run = psv['reps']['a'] # If not combined then run will be for the first (only) replicate else: run = psv # workflow labeling psv['description'] = "The ENCODE Rampage RNA pipeline for long RNAs" run['name'] = "rampage_"+psv['genome'] if psv['genome'] == 'mm10': run['name'] += psv['annotation'] if psv['gender'] == 'female': run['name'] += "XX" else: run['name'] += "XY" run['title'] = "Rampage RNA " + psv['experiment'] + " - " + run['rep_tech'] run['name'] += "_"+psv['experiment']+"_" + run['rep_tech'] if not psv['combined']: run['title'] += " [library '"+run['library_id']+"']" run['title'] += " on " + psv['genome']+" - "+psv['gender'] # Must override results location because of annotation psv['resultsLoc'] = dxencode.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] + psv['experiment'] + '/' psv['reps']['a']['resultsFolder'] = psv['resultsLoc'] + psv['experiment'] + '/' + \ psv['reps']['a']['rep_tech'] + '/' if psv['combined']: psv['reps']['b']['resultsFolder'] = psv['resultsLoc'] + psv['experiment'] + '/' + \ psv['reps']['b']['rep_tech'] + '/' if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Now add pipline specific variables and tests # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Paired ends? if psv['paired_end']: print "Small-RNA is always expected to be single-end but mapping says otherwise." #print json.dumps(psv,indent=4,sort_keys=True) sys.exit(1) # Some specific settings psv['nthreads'] = 8 # By replicate: for ltr in psv['reps'].keys(): if len(ltr) != 1: # only simple reps continue rep = psv['reps'][ltr] rep["clipping_model"] = "ENCODE3" # Default if "a_tailing" in rep: rep["clipping_model"] = "A_Tailing_" + rep["a_tailing"] print "%s detected for %s" % (rep["clipping_model"],rep["rep_tech"]) # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] # Must override results location because of annotation genome = psv['genome'] if self.no_refs: # (no_refs is only True when templating) genome = None # If templating with no refs then this will hide genome and annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def process_launch(self, launch_obj, conf, testItems): return Launch( str(launch_obj["_id"]), launch_obj["projectRef"], launch_obj["name"], launch_obj["description"] if "description" in launch_obj else "", conf, launch_obj["statistics"] if "statistics" in launch_obj else {}, launch_obj["start_time"], launch_obj["end_time"] if "end_time" in launch_obj else None, launch_obj["last_modified"] if "last_modified" in launch_obj else None, launch_obj["number"] if "number" in launch_obj else 0, testItems)
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self, parse=False) ap.add_argument('-a', '--annotation', help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, 'M2', 'M3', 'M4'], default=self.ANNO_DEFAULT, required=False) return ap.parse_args()
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv[ 'annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome'] + " has no " + psv[ 'annotation'] + " annotation." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['rnd_seed'] = 12345 # Override paired-end with TruSeq or ScriptSeq, but only for quant-rsem psv["paired_type"] = "true" if not psv["paired_end"]: psv["paired_type"] = "false" else: if psv.get( 'ScriptSeq', False ): # file.replicate.library.document contains "/documents/F17c31e10-1542-42c6-8b4c-3afff95564cf%2F" psv["paired_type"] = "ScriptSeq" print "Detected ScriptSeq" else: psv["paired_type"] = "TruSeq" # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] self.no_tophat = args.no_tophat if not self.no_tophat: self.PRUNE_STEPS = [] # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Some specific settings psv['nthreads'] = 8 psv['min_insert'] = 0 psv['max_insert'] = 500 if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def get_args(self): """Parse the input arguments.""" ap = Launch.get_args(self, parse=False) ap.add_argument( "-a", "--annotation", help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, "M2", "M3", "M4"], default=self.ANNO_DEFAULT, required=False, ) return ap.parse_args()
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['rnd_seed'] = 12345 # If paired-end then read_strand might vary TruSeq or ScriptSeq, but only for quant-rsem psv["read_strand"] = "unstranded" # SE experiments are all unstranded if psv["paired_end"]: psv["read_strand"] = "reverse" # Usual ENCODE LRNA experiments are rd1-/rd2+ (AKA reverse) if not psv["stranded"]: psv["read_strand"] = "unstranded" # "ScriptSeq" experiments are rd1+/rd2- (AKA forward) print "Detected unstranded library" elif psv.get('ScriptSeq', False): # file.replicate.library.document contains "/documents/F17c31e10-1542-42c6-8b4c-3afff95564cf%2F" psv["read_strand"] = "ScriptSeq" # "ScriptSeq" experiments are rd1+/rd2- (AKA forward) print "Detected ScriptSeq" # print "Detected special cases" # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] self.no_tophat = True if args.tophat_also: self.no_tophat = False self.PRUNE_STEPS = [] # This blocks pruning... keeping tophat # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder, self.FOLDER_DEFAULT, self.proj_name, psv['exp_type'], psv['genome'], psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) # NOTE: Could override get_args() to have this non-generic control message #ap.add_argument('-c', '--control', # help='The control bam for peak calling.', # required=False) ap.add_argument('-a', '--annotation', help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, 'M2','M3','M4'], default=self.ANNO_DEFAULT, required=False) return ap.parse_args()
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) ap.add_argument('-a', '--annotation', help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, 'M2','M3','M4'], default=self.ANNO_DEFAULT, required=False) ap.add_argument('--no_tophat', help='Do not include TopHat steps in pipeline (default: include TopHat steps).', action='store_true', required=False) return ap.parse_args()
def pipeline_specific_vars(self, args, verbose=False): """Adds pipeline specific variables to a dict, for use building the workflow.""" psv = Launch.pipeline_specific_vars(self, args) # Now add pipline specific variables and tests # Could be multiple annotations supported per genome psv["annotation"] = args.annotation if psv["genome"] != self.GENOME_DEFAULT and psv["annotation"] == self.ANNO_DEFAULT: psv["annotation"] = self.ANNO_DEFAULTS[psv["genome"]] if psv["annotation"] not in self.ANNO_ALLOWED[psv["genome"]]: print psv["genome"] + " has no " + psv["annotation"] + " annotation." sys.exit(1) # Paired ends? if psv["paired_end"]: print "Small-RNA is always expected to be single-end but mapping says otherwise." # print json.dumps(psv,indent=4,sort_keys=True) sys.exit(1) # Some specific settings psv["nthreads"] = 8 # If annotation is not default, then add it to title if psv["annotation"] != self.ANNO_DEFAULTS[psv["genome"]]: psv["title"] += ", " + psv["annotation"] psv["name"] += "_" + psv["annotation"] # Must override results location because of annotation genome = psv["genome"] if self.no_refs: # (no_refs is only True when templating) genome = None # If templating with no refs then this will hide genome and annotation psv["resultsLoc"] = self.umbrella_folder( args.folder, self.FOLDER_DEFAULT, self.proj_name, psv["exp_type"], psv["genome"], psv["annotation"] ) psv["resultsFolder"] = psv["resultsLoc"] if not self.template: psv["resultsFolder"] += psv["experiment"] + "/" self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self, parse=False) ap.add_argument('-a', '--annotation', help="Label of annotation (default: '" + self.ANNO_DEFAULT + "')", choices=[self.ANNO_DEFAULT, 'M2', 'M3', 'M4'], default=self.ANNO_DEFAULT, required=False) ap.add_argument( '--no_tophat', help= 'Do not include TopHat steps in pipeline (default: include TopHat steps).', action='store_true', required=False) return ap.parse_args()
def get_args(self): '''Parse the input arguments.''' ap = Launch.get_args(self,parse=False) ap.add_argument('-rl', '--read_length', help='The length of reads.', type=int, choices=['32', '36', '40', '50', '58', '72', '76', '100'], default='100', required=False) ap.add_argument('--umi', help='Treat fastqs as having UMI flags embedded.', action='store_true', required=False) # NOTE: Could override get_args() to have this non-generic control message #ap.add_argument('-c', '--control', # help='The control bam for peak calling.', # required=False) return ap.parse_args()
def pipeline_specific_vars(self, args, verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self, args) # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv[ 'annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome'] + " has no " + psv[ 'annotation'] + " annotation." sys.exit(1) # Some specific settings psv['nthreads'] = 8 psv['rnd_seed'] = 12345 # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] self.no_tophat = args.no_tophat if not self.no_tophat: self.PRUNE_STEPS = [] # Must override results location because of annotation psv['resultsLoc'] = self.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] if not self.template: psv['resultsFolder'] += psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv, indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Now add pipline specific variables and tests # Could be multiple annotations supported per genome psv['annotation'] = args.annotation if psv['genome'] != self.GENOME_DEFAULT and psv['annotation'] == self.ANNO_DEFAULT: psv['annotation'] = self.ANNO_DEFAULTS[psv['genome']] if psv['annotation'] not in self.ANNO_ALLOWED[psv['genome']]: print psv['genome']+" has no "+psv['annotation']+" annotation." sys.exit(1) # Paired ends? if psv['paired_end']: print "Small-RNA is always expected to be single-end but mapping says otherwise." #print json.dumps(psv,indent=4,sort_keys=True) sys.exit(1) # Some specific settings psv['nthreads'] = 8 # If annotation is not default, then add it to title if psv['annotation'] != self.ANNO_DEFAULTS[psv['genome']]: psv['title'] += ', ' + psv['annotation'] psv['name'] += '_' + psv['annotation'] # Must override results location because of annotation psv['resultsLoc'] = dxencode.umbrella_folder(args.folder,self.FOLDER_DEFAULT,self.proj_name,psv['exp_type'], \ psv['genome'],psv['annotation']) psv['resultsFolder'] = psv['resultsLoc'] + psv['experiment'] + '/' self.update_rep_result_folders(psv) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Now add pipline specific variables and tests # Paired ends? if psv['paired_end']: print "Small-RNA is always expected to be single-end but mapping says otherwise." #print json.dumps(psv,indent=4,sort_keys=True) sys.exit(1) # Some specific settings psv['nthreads'] = 8 # run will either be for combined or single rep. if self.combined_reps: print "Small-RNA-seq pipeline currently does not support combined-replicate processing." sys.exit(1) if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def pipeline_specific_vars(self,args,verbose=False): '''Adds pipeline specific variables to a dict, for use building the workflow.''' psv = Launch.pipeline_specific_vars(self,args) # Now add pipline specific variables and tests # Paired ends? if psv['paired_end']: print "Small-RNA is always expected to be single-end but mapping says otherwise." sys.exit(1) # Some specific settings psv['nthreads'] = 8 # run will either be for combined or single rep. if not psv['combined']: run = psv['reps']['a'] # If not combined then run will be for the first (only) replicate else: run = psv print "Small-RNA-seq pipeline currently does not support combined-replicate processing." sys.exit(1) # workflow labeling psv['description'] = "The ENCODE RNA Seq pipeline for short RNA" genderToken = "XY" if psv['gender'] == 'female': genderToken = "XX" run['title'] = "short RNA-seq " + psv['experiment'] + " - "+run['rep_tech'] + \ " (library '"+run['library_id']+"') on " + psv['genome'] + \ " - "+psv['gender'] run['name'] = "srna_"+psv['genome']+genderToken+"_"+psv['experiment'] + "_"+run['rep_tech'] if verbose: print "Pipeline Specific Vars:" print json.dumps(psv,indent=4) return psv
def __init__(self): Launch.__init__(self)
def nextLaunch(n_results): pass providers_list = [ "spacex", "ula", "nasa", "roscosmos", "jaxa", "china", "astra", "virgin", "rocketlab", "grumman" ] selected = "" j = requests.get('https://fdo.rocketlaunch.live/json/launches/next/5') #change this to select the right launch if n_results > 0 and n_results < 6: api_json = json.loads(j.text)['result'] txts = [] livestream = "" for i in range(n_results - 1, n_results): padlocation = str( api_json[i]['pad']['location']['statename']) + ', ' + str( api_json[i]['pad']['location']['country']) print(str(api_json[i]['pad']['location']['statename'])) if str(api_json[i]['pad']['location']['statename']) == "None": padlocation = str(api_json[i]['pad']['location']['country']) print(padlocation) launch_date = str(api_json[i]['win_open']) if launch_date == "None": if str(api_json[i]['est_date']['month']) != "None": day = '0' month = '0' if int(api_json[i]['est_date']['month']) < 10: month = month + str(api_json[i]['est_date']['month']) else: month = str(api_json[i]['est_date']['month']) if str(api_json[i]['est_date']['day']) != "None": if int(api_json[i]['est_date']['day']) < 10: day = day + str(api_json[i]['est_date']['day']) else: day = str(api_json[i]['est_date']['day']) else: day = "??" launch_date = month + '-' + day + '-' + str( api_json[i]['est_date']['year']) if str(api_json[i]['quicktext']) != "None": parts = str(api_json[i]['quicktext']).split('- ') for x in parts: if "https" in x: livestream = x print("Livestream link: " + livestream) l = Launch(str(api_json[i]['name']), str(api_json[i]['provider']['name']), str(api_json[i]['vehicle']['name']), str(api_json[i]['pad']['location']['name']), padlocation, str(api_json[i]['launch_description']), launch_date, livestream) txts.append(rocket + ' #' + str(i+1) + ' - <i>' + str(api_json[i]['name']) + '</i> ' + rocket \ + '\n' + l.getFormattedText()) for k in range(0, len(providers_list)): print("Lower data: " + str(api_json[i]['provider']['name']).lower()) print("Item in list: " + providers_list[k]) if providers_list[k] in str( api_json[i]['provider']['name']).lower(): selected = providers_list[k] + '.jpg' break return txts[0], selected, livestream #button_list = [ # InlineKeyboardButton("Watch Livestream (if available)", url=livestream) #] #reply_markup = InlineKeyboardMarkup(build_menu(button_list, n_cols=1)) #if selected == "": # context.bot.send_message(chat_id=update.effective_chat.id, # text=txts[0], # parse_mode=ParseMode.HTML) #else: # context.bot.send_photo(chat_id=update.effective_chat.id, # photo=open('imgs/' + selected, 'rb'), # caption=txts[0], # parse_mode=ParseMode.HTML, # reply_markup=reply_markup) else: print("param not in range\nn_results: " + str(n_results) + "\n")
class Tuner(QtGui.QWidget): def __init__(self, host="localhost", port=6499): super(Tuner, self).__init__() self.default_title = "Aura Tasks" #self.chirp = None #self.circle = None #self.land = None self.initUI() self.load(host=host, port=port) self.clean = True def initUI(self): self.setWindowTitle(self.default_title) layout = QtGui.QVBoxLayout() self.setLayout(layout) # Main work area self.tabs = QtGui.QTabWidget() layout.addWidget(self.tabs) #self.overview = Overview(changefunc=self.onChange) #self.tabs.addTab( self.overview.get_widget(), "Overview" ); # 'File' button bar file_group = QtGui.QFrame() layout.addWidget(file_group) file_layout = QtGui.QHBoxLayout() file_group.setLayout(file_layout) save = QtGui.QPushButton('Save') save.clicked.connect(self.save) file_layout.addWidget(save) quit = QtGui.QPushButton('Quit') quit.clicked.connect(self.quit) file_layout.addWidget(quit) file_layout.addStretch(1) self.resize(800, 700) self.show() def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Calibrate page self.calibrate = Calibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.calibrate.get_widget(), "Calibrate") # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.preflight.get_widget(), "Preflight") # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.launch.get_widget(), "Launch") # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.circle.get_widget(), "Circle") # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.chirp.get_widget(), "Chirp") # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.land.get_widget(), "Land") def save(self): print "called for save, but does nothing yet" def quit(self): QtCore.QCoreApplication.instance().quit() def onChange(self): #print "parent onChange() called!" #result = self.rebuildTabNames() #if result: # self.rebuildWingLists() self.clean = False def isClean(self): return self.clean def setClean(self): self.clean = True
# GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import sys import cherrypy from launch import Launch """ if hasattr(sys, "frozen"): cwd = os.path.dirname(os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))) else: cwd = os.path.dirname(os.path.abspath(__file__)) sys.path.append(cwd) """ if __name__ == '__main__': if os.name == "posix": launchyc = Launch() launchyc.handle_args() elif os.name == "nt": #importing here so that if the user isnt running windows #they wont get an import error from the win only libs from yamcatWin import LaunchWin launchyc = LaunchWin() launchyc.handle_args()
def __init__(self): Launch.__init__(self) self.detect_umi = True # Only in DNase is there a umi setting buried in the fastq metadata.
class Tuner(QtGui.QWidget): def __init__(self, host="localhost", port=6499): super(Tuner, self).__init__() self.default_title = "Aura Tasks" #self.chirp = None #self.circle = None #self.land = None self.initUI() self.load(host=host, port=port) self.clean = True def initUI(self): self.setWindowTitle( self.default_title ) layout = QtGui.QVBoxLayout() self.setLayout(layout) # Main work area self.tabs = QtGui.QTabWidget() layout.addWidget( self.tabs ) #self.overview = Overview(changefunc=self.onChange) #self.tabs.addTab( self.overview.get_widget(), "Overview" ); # 'File' button bar file_group = QtGui.QFrame() layout.addWidget(file_group) file_layout = QtGui.QHBoxLayout() file_group.setLayout( file_layout ) save = QtGui.QPushButton('Save') save.clicked.connect(self.save) file_layout.addWidget(save) quit = QtGui.QPushButton('Quit') quit.clicked.connect(self.quit) file_layout.addWidget(quit) file_layout.addStretch(1) self.resize(800, 700) self.show() def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Recalibrate page self.recalibrate = Recalibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.recalibrate.get_widget(), "Recalibrate" ) # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.preflight.get_widget(), "Preflight" ) # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.launch.get_widget(), "Launch" ) # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.circle.get_widget(), "Circle" ) # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.chirp.get_widget(), "Chirp" ) # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab( self.land.get_widget(), "Land" ) def save(self): print "called for save, but does nothing yet" def quit(self): global data_fetcher_quit data_fetcher_quit = True QtCore.QCoreApplication.instance().quit() def onChange(self): #print "parent onChange() called!" #result = self.rebuildTabNames() #if result: # self.rebuildWingLists() self.clean = False def isClean(self): return self.clean def setClean(self): self.clean = True
def test10_execute_nofeedback(self): self.lnc = Launch(db='sqlite.db', debug=True) command = "tests/testprog.py --scenario sleeping --sleep 1 --debug" result = self.lnc.execute(command=command) print("test10 result={}".format(result)) self.assertTrue(result)