def linkToStylesheet(self, _root=None): """Link the settings within this Settings to a stylesheet. _root is an internal parameter as this function is recursive.""" # build up root part of pathname to reference if _root is None: path = [] obj = self while not obj.parent.isWidget(): path.insert(0, obj.name) obj = obj.parent path = ['', 'StyleSheet', obj.parent.typename] + path + [''] _root = '/'.join(path) # iterate over subsettings for name, setn in self.setdict.iteritems(): thispath = _root + name if isinstance(setn, Settings): # call recursively if this is a Settings setn.linkToStylesheet(_root=thispath + '/') else: ref = Reference(thispath) try: # if the reference resolves, then set it ref.resolve(setn) setn.set(ref) setn.default = ref except Reference.ResolveException: pass
def surv_data_2_ref(surv_data): # instantiate an empty reference ref = Reference() # add meta data to reference # at present just a timestamp from time import gmtime, strftime ref.set_meta({'timestamp':strftime("%Y-%m-%d %H:%M:%S", gmtime())}) for channel_code in objectives_channel_codes: # add reference objective corresponding to each model objective if channel_code == 'prevalence': #debug_p('surv_data ' + str(surv_data['prevalence'])) d_points = prevalence_surv_data_2_d_points(surv_data['prevalence']) else: msg = "Channel " + channel_code + " not implemeneted yet!\nSetting reference data to None." warn_p(msg) d_points = None #debug_p('adding objective ' + channel_code) #debug_p('num d_points ' + str(len(d_points))) ref.add_objective(channel_code, d_points) return ref
def linkToStylesheet(self, _root=None): """Link the settings within this Settings to a stylesheet. _root is an internal parameter as this function is recursive.""" # build up root part of pathname to reference if _root is None: path = [] obj = self while not obj.parent.isWidget(): path.insert(0, obj.name) obj = obj.parent path = ['', 'StyleSheet', obj.parent.typename] + path + [''] _root = '/'.join(path) # iterate over subsettings for name, setn in self.setdict.iteritems(): thispath = _root + name if isinstance(setn, Settings): # call recursively if this is a Settings setn.linkToStylesheet(_root=thispath+'/') else: ref = Reference(thispath) try: # if the reference resolves, then set it ref.resolve(setn) setn.set(ref) setn.default = ref except Reference.ResolveException: pass
def __init__(self, reference_tokens, negative_value=0.0): """ @param reference the reference translation that hypotheses shall be scored against. Must be an iterable of tokens (any type). """ Reference.__init__(self, reference_tokens) self.negative_value = negative_value
def __init__(self): Reference.__init__(self, 'German Wiktionary', 'http://de.wiktionary.org/w/api.php', 'wiktionary.pickle') #TODO(PM) Add a text interface to login # Open dictionary of noun categories with open('reference/kategorien.json', 'r') as store: self.category_dict = json.load(store)
def __init__(self, reference_tokens, n=4): """ @param reference the reference translation that hypotheses shall be scored against. Must be an iterable of tokens (any type). @param n maximum n-gram order to consider. """ Reference.__init__(self, reference_tokens) self.n = n # preprocess reference self._reference_length = len(self._reference_tokens) self._reference_ngrams = self._get_ngrams(self._reference_tokens, self.n)
def _get_reference(self): """ Returns a reference to this object suitable for binding. """ reference = Reference( class_name=self.__class__.__name__, addresses=[Address(type='py_context', content=self.namespace)]) return reference
def decode(cls, dat): if len(dat) == 1: dat = dat[0] n = len(dat) if n != len(cls.fields): return None if n == 2: return list(dat) return Reference.decode(tuple(dat))
def _get_reference(self): """ Returns a reference to this object suitable for binding. """ abspath = os.path.abspath(self.path) reference = Reference( class_name=self.__class__.__name__, addresses=[Address(type='pyfs_context', content=abspath)]) return reference
def __init__(self, method, template): self.__method = method self.__iohelper = IOHelper() self.__iohelper.set_cache_directory(Benchmark.REFERENCES) self.__iohelper.set_output_directory(Benchmark.OUTPUT_DIR) if template: self.__iohelper.select_template(template) self.__annotator = Annotator(self.__iohelper) self.__analyzer = Analyzer(self.__annotator, self.__iohelper) self.__precision = None self.__recall = None self.__references = Reference.load_from_folder(Benchmark.REFERENCES)
def run(options): if options.input is None: sys.exit('\nInput RefSeq database file not specified.') if options.reference is None: sys.exit('\nReference genome file not specified.') # Print reference genome name print 'RefSeq transcript db: ' + options.input print 'Reference genome: ' + options.reference print '' # Initialize output file outfile = open(options.output, 'w') outfile.write( '#Created: ' + datetime.datetime.now().strftime("%d-%m-%Y") + '; Reference genome: ' + options.reference + '\n') outfile.write('#' + '\t'.join(['ID', 'VERSION', 'DIFFS']) + '\n') # Transcript database tdb = TranscriptDB(options.input) # Initialize Reference object reference = Reference(options.reference) sys.stdout.write('Processing transcripts ... ') sys.stdout.flush() # Iterate through transcripts counter = 0 for transcript in tdb.generator(): # Retrieve reference sequence corresponding to transcript reference_sequence = get_reference_sequence(transcript, reference) # Compare transcript sequence with reference sequence diffs = compare_sequences(reference_sequence, transcript.sequence, transcript.cdna_coding_start, transcript.cdna_coding_end) # Write results to output file outfile.write('\t'.join([transcript.id, transcript.version, diffs]) + '\n') counter += 1 print '- Done' # Close output files outfile.close() print '\nA total of {} transcripts have been processed.'.format(counter) # Print output file name print '\nOutput file: {}'.format(options.output)
def __init__(self, reference_tokens, additional_flags=''): """ Computes the TER of a sentence. :param reference_tokens: the reference translation that hypotheses shall be scored against. Must be an iterable of tokens (any /tmp/3420971.ref type). :param additional_flags: additional TERCOM flags. """ self.d = dict(os.environ.copy()) self.d['LANG'] = 'C' self.extension = str(random.randint(0, 10000000)) self.hyp_filename = "/tmp/" + self.extension + ".hyp" self.ref_filename = "/tmp/" + self.extension + ".ref" self.ter_cmd = "bash " + TER_JAR + " -r " + self.ref_filename + " -h " + self.hyp_filename \ + additional_flags + "| grep TER | awk '{print $3}'" self.clean_cmd = "rm -f " + self.ref_filename + " " + self.hyp_filename # Used to guarantee thread safety self.lock = threading.Lock() Reference.__init__(self, reference_tokens) self._gts_ter = ' '.join(reference_tokens) + '\t(sentence%d)\n' % 0 with open(self.ref_filename, 'w') as f: f.write(self._gts_ter)
def __init__(self, reference_tokens, n=6, beta=1): """ @param reference the reference translation that hypotheses shall be scored against. @param n maximum character n-gram order to consider. @param beta algorithm paramater beta (interpolation weight, needs to be > 0). """ if beta <= 0: raise ValueError("Value of beta needs to be larger than zero!") Reference.__init__(self, reference_tokens) self.n = n self.max_order = n self.beta_squared = beta ** 2 # The paper specifies that whitespace is ignored, but for a training objective, #it's perhaps better to leave it in. According to the paper, it makes no #difference in practise for scoring. self._reference_string = " ".join(reference_tokens).strip() # Get n-grams from reference: self._reference_ngrams = self._get_ngrams(self._reference_string, self.n)
def get_state_to_bind(self, obj, name, context): """ Returns the state of an object for binding. """ state = None if isinstance(obj, File): # If the file is not actually in the directory represented by the # context then we create and bind a reference to it. if obj.parent.path != context.path: state = Reference( class_name=obj.__class__.__name__, addresses=[Address(type='file', content=obj.path)]) return state
def create(self): """Create a VLArray for containing frames as variable-length sequences of t,width, pixels...""" f = Reference.create(self) if not f: return False self.outfile.create_vlarray(where=f, name=self.handle, atom=tables.UInt8Atom(shape=()), title=self.name, filters=cfilter, createparents=True, reference_class=self.__class__.__name__) self.path = self.folder + self.handle return True
def create(self): self.makePPMScale() self.newPlot() self.dataSource = ColumnDataSource( data=dict(ppm=self.ppmScale, data=self.pdata)) self.reference = Reference(self.logger, self.dataSource) self.reference.create() self.reference.draw(self.plot) self.peakPicking = PeakPicking(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.reference) self.peakPicking.create() self.peakPicking.draw(self.plot) self.integration = Integration(self.logger, self.id, self.pdata, self.dataSource, self.reference) self.integration.create() self.integration.draw(self.plot) self.multipletAnalysis = MultipletAnalysis(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.peakPicking, self.integration, self.reference) self.multipletAnalysis.create() self.multipletAnalysis.draw(self.plot) self.createMeasureJTool() self.plot.line('ppm', 'data', source=self.dataSource, line_width=2)
def create(self): """Create the output array""" f = Reference.create(self) if not f: return False print 'creating vlarray', self.folder, self.handle, self.name self.outfile.create_vlarray(where=f, name=self.handle, atom=tables.UInt8Atom(shape=()), title=self.name, filters=cfilter, createparents=True, reference_class=self.__class__.__name__) print 'updating path' self.path = self.folder + self.handle print 'ref created', self.path return True
def create(self): """Create a Table instance configured for Log storage""" f = Reference.create(self) if not f: return False # print 'creating # table',self.outfile,self.path,f,self.handle,self.name,self.outfile.get_path() self.outfile.create_table(where=f, name=self.handle, description=np.dtype(self.fields), title=self.name, filters=cfilter, createparents=True, reference_class=self.__class__.__name__) # print 'setting path' self.path = self.folder + self.handle # print 'done',self.path self.outfile.flush() return True
def interpolate(self, step=1, kind=1): """Array interpolation for summary synchronization.""" vt = Reference.interpolate(self, step) if vt is False: return False # Value sequence # starting from the oldest time minus step oldi = self.get_time(vt[0] - step) # Check if we have enough points to interpolate if len(self) - oldi < 5: return False # If possible, go back one more point, for interpolation safety if oldi > 1: oldi -= 1 # Decode values and separate time and value vectors dat = self[oldi:] # print 'Getting data',self.path,dat,vt dat = np.array(dat) dat = dat.transpose() # Build a linear spline using vt points as knots #f=LSQUnivariateSpline(dat[0],dat[1],vt, k=kind) # Do a linear fitting (slope, const), res, rank, sing, rcond = np.polyfit( dat[0], dat[1], kind, full=True) # Build a vectorized evaluator f = np.vectorize(lambda x: slope * x + const) while vt[0] < dat[0][0] and len(vt) > 1: vt = vt[1:] while vt[-1] > dat[0][-1] and len(vt) > 1: vt = vt[:-1] if len(vt) <= 1: return False try: # Interpret time series out = f(vt) except: print 'Array.interpolate', self.path, vt, dat raise # Encode in (t,v) append-able list out = np.array([vt, out]).transpose() self.summary.commit(out) return True
def __init__(self, filepath): with io.open(filepath, 'r', encoding='utf-8') as f: self.raw = json.load(f) self.compounds = [ Compound(x['name'], x['xrefs']) for x in self.raw['compounds'] ] self.remedies = [ Remedy(x['name'], x['xrefs']) for x in self.raw['remedies'] ] self.enzymes = [ Enzyme(x['name'], x['xrefs']) for x in self.raw['enzymes'] ] self.transporter = [ Transporter(x['name'], x['xrefs']) for x in self.raw['transporter'] ] self.drugs = [Drug(x['name'], x['xrefs']) for x in self.raw['drugs']] publication = self.raw['publication'] doi = publication['doi'] if 'doi' in publication else None self.publication = Reference(publication['pmid'], doi, publication['citation'])
def create(self, fields=False): """Create an EArray (enlargeable array) as data storage""" f = Reference.create(self) if not f: return False if fields: self.fields = fields self.outfile.create_table(where=f, name=self.handle, description=np.dtype(self.fields), title=self.name, filters=cfilter, createparents=True, reference_class=self.__class__.__name__) # print 'created',f,self.folder,self.handle self.path = self.folder + self.handle # print 'done',self.path self.outfile.flush() # Create the summary mirror if (not self.path.startswith('/summary')) and len(self.fields) == 2 and self.with_summary: # print 'Creating summary',self.path self.summary = Array( self.outfile, '/summary' + self.folder, opt=self.opt) return True
def __getattr__(self, name): if name[0:2] != "__": return Reference(name) else: raise AttributeError
def getter(self): from reference import Reference return Reference.by_ids(self.lib.reference_lib.values())
def getReferences(self, json=False): references = self.soup.find_all('ref') referenceList = [] for reference in references: extractedReference = Reference() try: extractedReference.id = reference.attrs['id'] except AttributeError: print('id not present') continue try: authors = reference.find_all('string-name') extractedReference.authors[:] = [] for author in authors: surname = author.find('surname').get_text().encode("utf-8") givenName = author.find('given-names').get_text().encode( "utf-8") authorObject = Author() authorObject.surname = surname authorObject.givenName = givenName extractedReference.authors.append(authorObject) except AttributeError: print('author not present') try: extractedReference.year = reference.find( 'year').get_text().encode("utf-8") except AttributeError: print('year not present') try: extractedReference.articleTitle = reference.find_all( 'article-title')[-1].get_text().encode("utf-8") print(extractedReference.articleTitle) except AttributeError: print('article title not present') try: extractedReference.source = reference.find( 'source').get_text().encode("utf-8") except AttributeError: print('source not present') try: extractedReference.volume = reference.find( 'volume').get_text().encode("utf-8") except AttributeError: print('volume not present') try: extractedReference.fromPage = reference.find( 'fpage').get_text().encode("utf-8") except AttributeError: print('fromPage not present') try: extractedReference.toPage = reference.find( 'lpage').get_text().encode("utf-8") except AttributeError: print('toPage not present') referenceList.append(extractedReference) if json is True: referenceListJsons = [] for reference in referenceList: referenceListJsons.append(self.convertToJson(reference)) return self.convertToJson(referenceList) return referenceList
def __init__(self, reference_tokens, beer_scorer): Reference.__init__(self, reference_tokens) #Construct reference string from tokens self._reference_string = " ".join(reference_tokens) self._beer_scorer = beer_scorer
def __init__(self, name, source): """ Constructor called either by PhysicsObject or Tree. Parse the source text block and collect all information on this object. """ self.name = name self.includes = [] self.constants = [] self.enums = [] self.objbranches = [] self.branches = [] self.references = [] self.functions = [] while True: line = source.readline() line = line.strip() if line == '': break try: self.includes.append(Include(line)) continue except Definition.NoMatch: pass try: self.enums.append(Enum(line, source)) continue except Definition.NoMatch: pass try: self.constants.append(Constant(line, source)) continue except Definition.NoMatch: pass try: self.branches.append(RefBranch(line)) continue except Definition.NoMatch: pass try: self.branches.append(RefVectorBranch(line)) continue except Definition.NoMatch: pass try: self.objbranches.append(ObjBranch(line)) continue except Definition.NoMatch: pass try: self.branches.append(Branch(line)) continue except Definition.NoMatch: pass try: self.branches.append(GenericBranch(line)) continue except Definition.NoMatch: pass try: self.references.append(Reference(line)) continue except Definition.NoMatch: pass try: self.functions.append(Function(line, source)) continue except Definition.NoMatch: pass break
class Plot: WIDTH = 800 HEIGHT = 600 def __init__(self, logger, id, path, compound): self.logger = logger self.logger.info("Parsing experiment data") self.dic, _ = ng.bruker.read(path) _, self.pdata = ng.bruker.read_pdata("{}/pdata/1/".format(path)) self.logger.info("Experiment data parsed successfully") self.compound = compound self.id = SpectrumDB.Create( id ) # SpectrumDB.Create(hashlib.sha256(self.pdata.tostring()).hexdigest()) def createReferenceLayout(self): return column( row(column(self.reference.old), column(self.reference.new)), row(self.reference.button)) def createPeakPickingLayout(self): return column( CustomRow(column(self.peakPicking.manual), column(self.peakPicking.peak), hide=True), row(self.peakPicking.dataTable), row(column(self.peakPicking.deselectButton), column(self.peakPicking.deleteButton)), row(self.peakPicking.chemicalShiftReportTitle), row(self.peakPicking.chemicalShiftReport)) def createIntegrationLayout(self): return column( CustomRow(column(self.integration.manual), hide=True), row(self.integration.dataTable), row(column(self.integration.deselectButton), column(self.integration.deleteButton))) def createMultipletManagerLayout(self): return column( CustomRow(column(self.multipletAnalysis.manual), hide=True), row(self.multipletAnalysis.dataTable), row(self.multipletAnalysis.title), row(column(self.multipletAnalysis.classes), column(self.multipletAnalysis.integral), column(self.multipletAnalysis.j)), row(self.multipletAnalysis.delete), row(self.multipletAnalysis.reportTitle), row(self.multipletAnalysis.report)) def createTabs(self, tabs): callback = CustomJS(args=dict( referenceTool=self.reference.tool, peakPickingManualTool=self.peakPicking.manualTool, peakByPeakTool=self.peakPicking.peakTool, integrationTool=self.integration.tool, multipletAnalysisTool=self.multipletAnalysis.tool), code=""" switch(this.active) { case 0: referenceTool.active = true; break; case 1: if (!peakByPeakTool.active) { peakPickingManualTool.active = true; } break; case 2: integrationTool.active = true; break; case 3: multipletAnalysisTool.active = true; break; } """) return Tabs(tabs=tabs, width=500, callback=callback, id="tabs") def draw(self): try: referenceLayout = self.createReferenceLayout() peakPickingLayout = self.createPeakPickingLayout() integrationLayout = self.createIntegrationLayout() multipletManagerLayout = self.createMultipletManagerLayout() referenceTab = Panel(child=referenceLayout, title="Reference") peakPickingTab = Panel(child=peakPickingLayout, title="Peak Picking") integrationTab = Panel(child=integrationLayout, title="Integration") multipletAnalysisTab = Panel(child=multipletManagerLayout, title="Multiplet Analysis") tabs = self.createTabs([ referenceTab, peakPickingTab, integrationTab, multipletAnalysisTab ]) curdoc().add_root( row( column( row(self.plot), row(Div(text=self.compound, id="compoundContainer"))), column(row(tabs)))) curdoc().title = "NMR Analysis Tool - " + str(self.id) except NameError: print("Please create plot first") def create(self): self.makePPMScale() self.newPlot() self.dataSource = ColumnDataSource( data=dict(ppm=self.ppmScale, data=self.pdata)) self.reference = Reference(self.logger, self.dataSource) self.reference.create() self.reference.draw(self.plot) self.peakPicking = PeakPicking(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.reference) self.peakPicking.create() self.peakPicking.draw(self.plot) self.integration = Integration(self.logger, self.id, self.pdata, self.dataSource, self.reference) self.integration.create() self.integration.draw(self.plot) self.multipletAnalysis = MultipletAnalysis(self.logger, self.id, self.dic, self.udic, self.pdata, self.dataSource, self.peakPicking, self.integration, self.reference) self.multipletAnalysis.create() self.multipletAnalysis.draw(self.plot) self.createMeasureJTool() self.plot.line('ppm', 'data', source=self.dataSource, line_width=2) # make ppm scale def makePPMScale(self): self.udic = ng.bruker.guess_udic(self.dic, self.pdata) uc = ng.fileiobase.uc_from_udic(self.udic) self.ppmScale = uc.ppm_scale() # create a new plot with a title and axis labels def newPlot(self): #Constants xr = Range1d(start=int(max(self.ppmScale) + 1), end=int(min(self.ppmScale) - 1)) self.plot = figure(x_axis_label='ppm', x_range=xr, toolbar=CustomToolbar(), tools="pan,save,reset", plot_width=self.WIDTH, plot_height=self.HEIGHT) # Remove grid from plot self.plot.xgrid.grid_line_color = None self.plot.ygrid.grid_line_color = None horizontalBoxZoomTool = HorizontalBoxZoomTool() self.plot.add_tools(horizontalBoxZoomTool) self.plot.toolbar.active_drag = horizontalBoxZoomTool fixedWheelZoomTool = FixedWheelZoomTool(dimensions="height") self.plot.add_tools(fixedWheelZoomTool) self.plot.toolbar.active_scroll = fixedWheelZoomTool fixedZoomOutTool = FixedZoomOutTool(factor=0.4) self.plot.add_tools(fixedZoomOutTool) hoverTool = HoverTool(tooltips="($x, $y)") self.plot.add_tools(hoverTool) def createMeasureJTool(self): source = ColumnDataSource(data=dict(x=[], y=[])) label = Label(x=0, y=0, text="", text_color="#000000", render_mode="css") self.plot.add_layout(label) measureJTool = MeasureJTool(label=label, frequency=getFrequency(self.udic), id="measureJTool") self.plot.add_tools(measureJTool)
def __init__(self, reference_tokens, meteor_scorer): Reference.__init__(self, reference_tokens) #Construct reference string from tokens self._reference_string = " ".join(reference_tokens) self._meteor_scorer = meteor_scorer
def main(): #Get arguments args = argParse() print("- ARGS :") print(args, "\n") #create indexedReference print("- REFERENCE :") reference = Reference() #check if already indexed fileName = path.splitext(args.ref)[0] # retrieve from .gz if (path.exists(fileName + ".dumped.gz")): reference.load(fileName) print("reference loaded from dumped.gz") #index sequence else: sequence = getSequence(args.ref) reference.createIndex(sequence) reference.save(fileName) print("reference indexed and saved") print("DEBUG", reference.N) print("Reference : ", "\n", reference.text[:10], "...", "\n") #open Reads print("- READS :") readStream = openReads(args.reads) readName, readContent = getNextRead(readStream) print("First read :", readName, readContent) print("RevCompl :", reverseCompl(readContent), "\n") #outputStream outputStream = open(args.out, "w") #Start the timer print("- FINDING ALIGNEMENT :") with Timer() as total_time: while (readContent != -1): print(readName, "(processing)") isRevCompl = False bestScore, bestPos = getBestSemiGlobalAlg(readContent, reference, args.k, args.dmax) bestScoreRev, bestPosRev = getBestSemiGlobalAlg( reverseCompl(readContent), reference, args.k, args.dmax) #if the reverse search is better we take it if (bestScoreRev < bestScore): bestScore, bestPos = bestScoreRev, bestPosRev isRevCompl = True #Found a result if (bestScore != len(readContent)): if bestScore <= args.dmax: appendResults(outputStream, readName, bestPos, isRevCompl, bestScore) #with tabs readName, readContent = getNextRead(readStream) total_time.print('\nIt tooks {} secondes.', 5) print() readStream.close() outputStream.close() #print out file print("- RESULTS :") f = open(args.out, "r") print(f.read()) f.close() print("exported in:", args.out)
def main(): global lr, outputFile, opts opts, args = parseOptions() lr = args[1] animalId = args[0] + '_' + lr male = animalId[0] == 'M' # Open our files... ref = Reference(opts.reference + '_ordered.fa') indels = open(opts.indels) snps = open(opts.snps) dir = opts.dir if opts.output: outputFile = open(os.path.join(dir, opts.output), 'w') else: outputFile = sys.stdout # Prepare for processing the indels file # Skip the indels vcf header cruft iHeaders = skipHeaders(indels) iStrainTracker = StrainTracker(opts.control, iHeaders, 'I') Indel.setStrainTracker(iStrainTracker) # From here on out all lines in the indels file will be indel records. # Flag that we need to refresh the indel needIndel = True # Similarly set up the snps file sHeaders = skipHeaders(snps) sStrainTracker = StrainTracker(opts.control, sHeaders, 'S') Snp.setStrainTracker(sStrainTracker) # From here on out all lines in the snps file will be snp records. # Flag that we need to refresh the snp needSnp = True # Main processing loop... while True: if needIndel: indel = Indel(indels) if indel.chr == 'EOF': # We exhausted the indels file. We'll clean up the snps # after the loop break needIndel = False if needSnp: snp = Snp(snps) if snp.chr == 'EOF': # We exhausted the snps file. We'll clean up the indels # after the loop break needSnp = False # Now we have an indel and a snp. Process whichever is first. # This function will return True if it processed the indel, # False if it processed the snp. processedIndel = processNextEvent(ref, indel, snp, animalId) if processedIndel: needIndel = True else: needSnp = True # End of the main loop. We have exhausted one or the other input # file. Now clean up the remainder of the other file. if indel.chr == 'EOF': # Last parameter False indicates processing snps processRemaining(ref, snps, animalId, False) elif snp.chr == 'EOF': processRemaining(ref, indels, animalId, True) # That's about it! finishUp(ref, opts.reference, male, lr)
def open(self, folder): """Open an existing Array in `folder` with its summary Array""" Reference.open(self, folder) # Open the summary mirror if not self.path.startswith('/summary') and len(self.fields) == 2: self.summary = Array(self.outfile, '/summary' + self.path)
def __init__(self, outfile, folder=False, opt=False, write_current=False, with_summary=True): self.with_summary = with_summary Reference.__init__(self, outfile, folder=folder, opt=opt, write_current=write_current)