def linkToStylesheet(self, _root=None): """Link the settings within this Settings to a stylesheet. _root is an internal parameter as this function is recursive.""" # build up root part of pathname to reference if _root is None: path = [] obj = self while not obj.parent.isWidget(): path.insert(0, obj.name) obj = obj.parent path = ['', 'StyleSheet', obj.parent.typename] + path + [''] _root = '/'.join(path) # iterate over subsettings for name, setn in self.setdict.iteritems(): thispath = _root + name if isinstance(setn, Settings): # call recursively if this is a Settings setn.linkToStylesheet(_root=thispath + '/') else: ref = Reference(thispath) try: # if the reference resolves, then set it ref.resolve(setn) setn.set(ref) setn.default = ref except Reference.ResolveException: pass
def _get_reference(self): """ Returns a reference to this object suitable for binding. """ reference = Reference( class_name=self.__class__.__name__, addresses=[Address(type='py_context', content=self.namespace)]) return reference
def _get_reference(self): """ Returns a reference to this object suitable for binding. """ abspath = os.path.abspath(self.path) reference = Reference( class_name=self.__class__.__name__, addresses=[Address(type='pyfs_context', content=abspath)]) return reference
def run(options): if options.input is None: sys.exit('\nInput RefSeq database file not specified.') if options.reference is None: sys.exit('\nReference genome file not specified.') # Print reference genome name print 'RefSeq transcript db: ' + options.input print 'Reference genome: ' + options.reference print '' # Initialize output file outfile = open(options.output, 'w') outfile.write( '#Created: ' + datetime.datetime.now().strftime("%d-%m-%Y") + '; Reference genome: ' + options.reference + '\n') outfile.write('#' + '\t'.join(['ID', 'VERSION', 'DIFFS']) + '\n') # Transcript database tdb = TranscriptDB(options.input) # Initialize Reference object reference = Reference(options.reference) sys.stdout.write('Processing transcripts ... ') sys.stdout.flush() # Iterate through transcripts counter = 0 for transcript in tdb.generator(): # Retrieve reference sequence corresponding to transcript reference_sequence = get_reference_sequence(transcript, reference) # Compare transcript sequence with reference sequence diffs = compare_sequences(reference_sequence, transcript.sequence, transcript.cdna_coding_start, transcript.cdna_coding_end) # Write results to output file outfile.write('\t'.join([transcript.id, transcript.version, diffs]) + '\n') counter += 1 print '- Done' # Close output files outfile.close() print '\nA total of {} transcripts have been processed.'.format(counter) # Print output file name print '\nOutput file: {}'.format(options.output)
def get_state_to_bind(self, obj, name, context): """ Returns the state of an object for binding. """ state = None if isinstance(obj, File): # If the file is not actually in the directory represented by the # context then we create and bind a reference to it. if obj.parent.path != context.path: state = Reference( class_name=obj.__class__.__name__, addresses=[Address(type='file', content=obj.path)]) return state
def __init__(self, filepath): with io.open(filepath, 'r', encoding='utf-8') as f: self.raw = json.load(f) self.compounds = [ Compound(x['name'], x['xrefs']) for x in self.raw['compounds'] ] self.remedies = [ Remedy(x['name'], x['xrefs']) for x in self.raw['remedies'] ] self.enzymes = [ Enzyme(x['name'], x['xrefs']) for x in self.raw['enzymes'] ] self.transporter = [ Transporter(x['name'], x['xrefs']) for x in self.raw['transporter'] ] self.drugs = [Drug(x['name'], x['xrefs']) for x in self.raw['drugs']] publication = self.raw['publication'] doi = publication['doi'] if 'doi' in publication else None self.publication = Reference(publication['pmid'], doi, publication['citation'])
def create(self): self.makePPMScale() self.newPlot() self.dataSource = ColumnDataSource( data=dict(ppm=self.ppmScale, data=self.pdata)) self.reference = Reference(self.logger, self.dataSource) self.reference.create() self.reference.draw(self.plot) self.peakPicking = PeakPicking(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.reference) self.peakPicking.create() self.peakPicking.draw(self.plot) self.integration = Integration(self.logger, self.id, self.pdata, self.dataSource, self.reference) self.integration.create() self.integration.draw(self.plot) self.multipletAnalysis = MultipletAnalysis(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.peakPicking, self.integration, self.reference) self.multipletAnalysis.create() self.multipletAnalysis.draw(self.plot) self.createMeasureJTool() self.plot.line('ppm', 'data', source=self.dataSource, line_width=2)
def __init__(self, name, source): """ Constructor called either by PhysicsObject or Tree. Parse the source text block and collect all information on this object. """ self.name = name self.includes = [] self.constants = [] self.enums = [] self.objbranches = [] self.branches = [] self.references = [] self.functions = [] while True: line = source.readline() line = line.strip() if line == '': break try: self.includes.append(Include(line)) continue except Definition.NoMatch: pass try: self.enums.append(Enum(line, source)) continue except Definition.NoMatch: pass try: self.constants.append(Constant(line, source)) continue except Definition.NoMatch: pass try: self.branches.append(RefBranch(line)) continue except Definition.NoMatch: pass try: self.branches.append(RefVectorBranch(line)) continue except Definition.NoMatch: pass try: self.objbranches.append(ObjBranch(line)) continue except Definition.NoMatch: pass try: self.branches.append(Branch(line)) continue except Definition.NoMatch: pass try: self.branches.append(GenericBranch(line)) continue except Definition.NoMatch: pass try: self.references.append(Reference(line)) continue except Definition.NoMatch: pass try: self.functions.append(Function(line, source)) continue except Definition.NoMatch: pass break
def __getattr__(self, name): if name[0:2] != "__": return Reference(name) else: raise AttributeError
def getReferences(self, json=False): references = self.soup.find_all('ref') referenceList = [] for reference in references: extractedReference = Reference() try: extractedReference.id = reference.attrs['id'] except AttributeError: print('id not present') continue try: authors = reference.find_all('string-name') extractedReference.authors[:] = [] for author in authors: surname = author.find('surname').get_text().encode("utf-8") givenName = author.find('given-names').get_text().encode( "utf-8") authorObject = Author() authorObject.surname = surname authorObject.givenName = givenName extractedReference.authors.append(authorObject) except AttributeError: print('author not present') try: extractedReference.year = reference.find( 'year').get_text().encode("utf-8") except AttributeError: print('year not present') try: extractedReference.articleTitle = reference.find_all( 'article-title')[-1].get_text().encode("utf-8") print(extractedReference.articleTitle) except AttributeError: print('article title not present') try: extractedReference.source = reference.find( 'source').get_text().encode("utf-8") except AttributeError: print('source not present') try: extractedReference.volume = reference.find( 'volume').get_text().encode("utf-8") except AttributeError: print('volume not present') try: extractedReference.fromPage = reference.find( 'fpage').get_text().encode("utf-8") except AttributeError: print('fromPage not present') try: extractedReference.toPage = reference.find( 'lpage').get_text().encode("utf-8") except AttributeError: print('toPage not present') referenceList.append(extractedReference) if json is True: referenceListJsons = [] for reference in referenceList: referenceListJsons.append(self.convertToJson(reference)) return self.convertToJson(referenceList) return referenceList
def main(): global lr, outputFile, opts opts, args = parseOptions() lr = args[1] animalId = args[0] + '_' + lr male = animalId[0] == 'M' # Open our files... ref = Reference(opts.reference + '_ordered.fa') indels = open(opts.indels) snps = open(opts.snps) dir = opts.dir if opts.output: outputFile = open(os.path.join(dir, opts.output), 'w') else: outputFile = sys.stdout # Prepare for processing the indels file # Skip the indels vcf header cruft iHeaders = skipHeaders(indels) iStrainTracker = StrainTracker(opts.control, iHeaders, 'I') Indel.setStrainTracker(iStrainTracker) # From here on out all lines in the indels file will be indel records. # Flag that we need to refresh the indel needIndel = True # Similarly set up the snps file sHeaders = skipHeaders(snps) sStrainTracker = StrainTracker(opts.control, sHeaders, 'S') Snp.setStrainTracker(sStrainTracker) # From here on out all lines in the snps file will be snp records. # Flag that we need to refresh the snp needSnp = True # Main processing loop... while True: if needIndel: indel = Indel(indels) if indel.chr == 'EOF': # We exhausted the indels file. We'll clean up the snps # after the loop break needIndel = False if needSnp: snp = Snp(snps) if snp.chr == 'EOF': # We exhausted the snps file. We'll clean up the indels # after the loop break needSnp = False # Now we have an indel and a snp. Process whichever is first. # This function will return True if it processed the indel, # False if it processed the snp. processedIndel = processNextEvent(ref, indel, snp, animalId) if processedIndel: needIndel = True else: needSnp = True # End of the main loop. We have exhausted one or the other input # file. Now clean up the remainder of the other file. if indel.chr == 'EOF': # Last parameter False indicates processing snps processRemaining(ref, snps, animalId, False) elif snp.chr == 'EOF': processRemaining(ref, indels, animalId, True) # That's about it! finishUp(ref, opts.reference, male, lr)
def main(): #Get arguments args = argParse() print("- ARGS :") print(args, "\n") #create indexedReference print("- REFERENCE :") reference = Reference() #check if already indexed fileName = path.splitext(args.ref)[0] # retrieve from .gz if (path.exists(fileName + ".dumped.gz")): reference.load(fileName) print("reference loaded from dumped.gz") #index sequence else: sequence = getSequence(args.ref) reference.createIndex(sequence) reference.save(fileName) print("reference indexed and saved") print("DEBUG", reference.N) print("Reference : ", "\n", reference.text[:10], "...", "\n") #open Reads print("- READS :") readStream = openReads(args.reads) readName, readContent = getNextRead(readStream) print("First read :", readName, readContent) print("RevCompl :", reverseCompl(readContent), "\n") #outputStream outputStream = open(args.out, "w") #Start the timer print("- FINDING ALIGNEMENT :") with Timer() as total_time: while (readContent != -1): print(readName, "(processing)") isRevCompl = False bestScore, bestPos = getBestSemiGlobalAlg(readContent, reference, args.k, args.dmax) bestScoreRev, bestPosRev = getBestSemiGlobalAlg( reverseCompl(readContent), reference, args.k, args.dmax) #if the reverse search is better we take it if (bestScoreRev < bestScore): bestScore, bestPos = bestScoreRev, bestPosRev isRevCompl = True #Found a result if (bestScore != len(readContent)): if bestScore <= args.dmax: appendResults(outputStream, readName, bestPos, isRevCompl, bestScore) #with tabs readName, readContent = getNextRead(readStream) total_time.print('\nIt tooks {} secondes.', 5) print() readStream.close() outputStream.close() #print out file print("- RESULTS :") f = open(args.out, "r") print(f.read()) f.close() print("exported in:", args.out)