def process_dataframes(self, sheets, dataframes, crosslinks): '''Returns the dataframes generated for each sheet''' for sheet, dataframe in ZIP(sheets, dataframes): crosslink = base.Crosslink(sheet.linkname, sheet.linktype) labels_list = crosslinks[crosslink] lengths = {len(i.peptide) for i in labels_list} dimensions = max(lengths) dataframe.set_dimensions(lengths, dimensions) dataframe.set_version() linkages = self.getlinkages(labels_list) for labels, linkage in ZIP(labels_list, linkages): dataframe(labels, linkage)
def get_axiscorrelation(isotopes, pattern): '''Returns the axis-wise ratio correlations to the theoretical ratio''' base = next(isotopes) ratios = (i / base for i in isotopes) length = range(len(pattern) - 1) theoretical = [np.zeros(base.size) for i in length] constants = np.array([i / pattern[0] for i in pattern[1:]]) for pattern, array in ZIP(constants, theoretical): array.fill(pattern) pearson = [np.nan_to_num(stats.pearsonr(x, y)[0]) for x, y in ZIP(ratios, theoretical)] return np.mean(pearson)
def addratio(self, index, linkage, ratio_obj, ratioheaders): '''Adds the ratios from a given linkage to the dataframe''' # set the ratio data zipper = ZIP(ratio_obj.ratio, ratio_obj.error) length = len(ratio_obj.ratio) - 1 for idx, (ratiovalue, errorvalue) in enumerate(zipper): if idx != ratio_obj.index and idx < length: header = ratioheaders[self.offset + idx] elif idx != ratio_obj.index and idx == length: header = ratioheaders[-1] else: continue # TODO: clean this up... the ratioobj should have this builtin try: ratiovalue = float(ratiovalue) except ValueError: pass try: errorvalue = float(errorvalue) except ValueError: pass ratiocolumn = self.getratiocolumn(linkage, header) self.loc[index, ratiocolumn] = ratiovalue errorcolumn = self.geterrorcolumn(linkage, header) self.loc[index, errorcolumn] = errorvalue counts = ratio_obj.getcounts(idx) countscolumn = self.getcountscolumn(linkage, header) self.loc[index, countscolumn] = counts
def getsortkey(self, rows): '''Returns a sortable key for the link rows''' names = self.row.data.getcolumn(rows, 'preferred') positions = (self.mincrosslinkpositions[i] for i in rows) return {i: (n, p) for i, n, p in ZIP(rows, names, positions)}
def get_anchor_weight(clusters): ''' Weights all the anchor points using a nearest non-overlapping feature approach (1D). If the element overlaps, the value is 1. If the element does not overlap but is the nearest, then the value is 2. The returned weight is 1 / sqrt(value) ''' # get the euclidean distances sorted distance_matrix = get_nonoverlapping_distances(clusters) sortedargs = np.argsort(distance_matrix) # weight each distance weight_matrix = np.zeros(sortedargs.shape) zipped = ZIP(sortedargs, distance_matrix) for row, (indexes, distances) in enumerate(zipped): counter = 2 for index in indexes: if distances[index] == 0: weight_matrix[row][index] = 1 else: weight_matrix[row][index] = counter counter += 1 return np.prod((1 / np.sqrt(weight_matrix)), axis=0)
def getzipped(self, row): '''Custom zipper which uses the labeledcrosslinks indexes''' data = self.matched[row] spreadsheets = data['spreadsheet']['labeled'] crosslinks = data['labeledcrosslinks'] return ZIP(spreadsheets, crosslinks)
def test_properties(self): '''Test protein object properties''' for length, mw, sequence in ZIP(LENGTHS, MOLECULAR_WEIGHTS, SEQUENCES): inst = protein.Protein(sequence, 'test') self.assertEquals(inst.length, length) self.assertAlmostEquals(inst.mw, mw, 5)
def __call__(self, crosslinkindex, crosslink, isotopedata): ''' Returns each permutation of the isotope-labeled crosslinkers in a generator. ''' isotopestates, experimental, theoretical = ZIP(*isotopedata) permutations = self.permutations(isotopestates) zipped = list(ZIP(theoretical, experimental)) states = list(self.getstates(crosslink, permutations, zipped)) if states: frozen = self.freezer(crosslinkindex, states) return IsotopeLabeledLink(crosslinkindex, isotopestates, frozen, states, self.row.index)
def setdeadendmass(self, ends): ''' Sets the mass for each deadend modificiation on a crosslinker at each reactive site, allowing quick lookups. ''' zipped = ZIP(ends.aminoacid, ends.deadend) self.deadendmass = {r: chemical.Molecule(d).mass for r, d in zipped}
def __call__(self, document, path, sheets, dataframes): '''On call''' writer = openoffice.OpenOfficeWriter(path) zipped = ZIP(sheets, dataframes) for index, (sheet, dataframe) in enumerate(zipped): writer.workbook.add_worksheet(index, sheet.title, dataframe) writer.workbook.save()
def setms1spreadsheet(spreadsheet, linkedscans): '''Sets the MS1 scan information to the spreadsheet''' scans = spreadsheet[(' ', 'Precursor Scan')] # (None, None) -> means precursor not found ms1scans, ms1rt = ZIP(*(linkedscans.get(i, (None, None)) for i in scans)) spreadsheet[(' ', 'MS1 Scan')] = list(ms1scans) spreadsheet[(' ', 'MS1 RT')] = list(ms1rt)
def get_pointcorrelation(isotopes, pattern): '''Returns the Pearson correlation for each point averaged''' zipped = ZIP(*isotopes) arrays = (np.array(i) for i in zipped) adjusted = (i / i.max() for i in arrays) corrs = (stats.pearsonr(i, pattern)[0] for i in adjusted) return np.mean([np.nan_to_num(i) for i in corrs])
def newtooltip(self): '''Initializes a new tooltip for the display''' amplitudes = [] zipper = ZIP(self.parent().group, self.parent().labels.items) for child, legend in zipper: kwds = self.getkwds(child, legend) amplitudes.append(self.amplitude.format(**kwds)) return ''.join(amplitudes)
def test_sequencing(self): '''Test decoy creation''' for sequence, (first, last) in ZIP(SEQUENCES, PEPTIDES): inst = protein.Protein(sequence, 'test') inst.sequencing_peptides('Trypsin') self.assertEquals(inst.peptides[0].sequence, first) self.assertEquals(inst.peptides[-1].sequence, last)
def process_dataframes(self, sheets, dataframes, crosslinks): '''Returns the dataframes generated for each sheet''' for sheet, dataframe in ZIP(sheets, dataframes): crosslink = base.Crosslink(sheet.linkname, sheet.linktype) labels_list = crosslinks[crosslink] linkages = self.getlinkages(labels_list) dataframe(labels_list, linkages)
def getmatched(self, indexes, modifications, modfreezer): '''Returns the unique matched data from the indexes''' zipped = ZIP(modifications, indexes) for modification, index in zipped: modification = modfreezer(modification) peptide, uniprotid = self.row.data.getrow(index, ('peptide', 'id')) yield frozenset((modification, peptide, uniprotid))
def yield_labels(labels, indexes, plotdata): '''Yields the legend for the labels view''' profile = labels.get_document().profile for index, plot in ZIP(indexes, plotdata): crosslink = labels[index] populations = crosslink.populations yield ' - '.join(profile.populations[i].header for i in populations)
def spectrum(self, scan, mzs, intensity): '''Ends and processes spectrum data and Decodes peak lists.''' if self.get_storespectra(scan): arrays = self.get_decoded_scans(scan, mzs, intensity) if scan.getattr('spectrum_type') != 'centroided': arrays = peak_picking.centroid_scan(*arrays) for key, array in ZIP(ARRAYS, arrays): scan.create_array(key, obj=array)
def __set_amplitudes(self, spreadsheet, headers, used): '''Sets the integrated amplitude data for the transitions''' integrated = [i.integrate_data(used) for i in self] for header, integraldata in ZIP(headers, integrated): for key, attr in integraldata.iterfields(): spreadsheet[(header, key)] = attr for key, attrname in xictools.SPECTRAL_ENUM: ratio = xictools.Ratios.fromintegrated(attrname, integrated) spreadsheet[(' ', 'Ratio ' + key)] = ratio.tostr()
def __append_charge(self, data, labeledcrosslink, index): '''Appends a precursor retention time to the HDF5 attributes''' charge = data['matched']['precursor_z'][index] precursor_z = self.precursor_z if charge not in precursor_z: precursor_z.append(charge) precursor_z.sort() for label, transition in ZIP(labeledcrosslink.states, self): transition.set_charges(label, charge)
def getmass(self, modifications, populations, crosslink): '''Calculate the new mass of the crosslinked peptide''' crosslinker = self._getcrosslinker(populations[0]) masser = masstools.CrosslinkedMass(self.row, crosslinker) peptides = self.row.data.getcolumn(crosslink.index, 'peptide') zipped = ZIP(peptides, modifications) formulas = (masstools.getpeptideformula(*i, engine=self.engine) for i in zipped) return masser.getpeptidemass(crosslink.ends, formulas, modifications)
def getmodifications(self, modification): '''Returns unique identifiers for the certain and uncertain mods''' certain = modification['certain'] uncertain = self.getuncertain(modification) for char, dicts in ZIP(CHARS, [[certain], uncertain]): for modificationdict in dicts: for name, positions in modificationdict.items(): # skip loop if not a standard modification if name not in self.isobaric: yield self.getmodstring(name, positions, char)
def filterstrict(self): ''' Removes all MS3 scans with any matched peptide with a UniProt ID not in the limited database. ''' grouped = self.row.data.groupby(fields=['id']) for values in grouped.values(): rows, ids = ZIP(*values) if any(i not in self.proteins.mapping['proteins'] for i in ids): self.deleterows += rows self.deleterows.sort()
def __call__(self, indexes): '''Constructs a new scan item''' indexer = self.indexer(indexes) data = list(ZIP(*self.row.data.getcolumn(indexer.filtered, COLUMNS))) if data: # precursor data, only need one item, not list of items # no data in the case of non-compatible crosslinker for index in [-2, -1]: data[index] = data[index][0] return Scan(*data, indexer=indexer)
def scan(self, scan, binarydata): ''' End reading scan and process data. Converts retention time to the float format and decodes the peaklists to python lists. ''' if self.get_storespectra(scan): arrays = self.get_decoded_scans(scan, binarydata) if scan.getattr('spectrum_type') != 'centroided': arrays = peak_picking.centroid_scan(*arrays) for key, array in ZIP(ARRAYS, arrays): scan.create_array(key, array)
def ppm_crosslink(crosslink, indexes, plotdata, bounds): '''Returns the mass error at the crosslink level''' for index, plot in ZIP(indexes, plotdata): charge = crosslink[index] zipped = [i.get_ppm(bounds) for i in charge if i.checked] ppm = float("nan") if zipped: ppms, weights = zip(*zipped) if any(weights): ppm = np.average(ppms, weights=weights) yield ppm_patch(ppm, plot)
def groupspectra(scan): ''' Groups scans within a certain m/z window that were found deconvoluted from the self.mzs array previously. ''' mzs = defaultdict(list) intensities = defaultdict(list) for row, mz, intensity in ZIP(*scan): mzs[row].append(mz) intensities[row].append(intensity) return mzs, intensities
def set_sums(self, labels, headers, key): '''Sets a row total for the summed amplitude and ratios''' row = {'Isotope': 'Sum'} used = labels.getusedcharges() integrated = [i.integrate_data(used) for i in labels] amplitudes = [op.attrgetter(key+'.value')(i) for i in integrated] for (amplitude, header) in ZIP(amplitudes, headers): row[header] = amplitude row['Ratio'] = xictools.Ratios.fromintegrated(key, integrated).tostr() index = self.get_last_index() self.loc[index] = row
def getamplitudes(self, isotopes, headers, row, key): '''Returns the function processed data for the spectral amplitude''' integrated = [] noises = [] for (isotope, header) in ZIP(isotopes, headers): noise = op.methodcaller(key)(isotope) if isotope.ischecked(): row[header] = integral = noise else: row[header] = integral = float('nan') integrated.append(integral) noises.append(noise) return integrated, noises
def getlinkmodified(self, scan): ''' Grabs the number of link-modified ends for a given peptide combination and cross-linker count. Indexes these mods locally to the peptide. ''' residues = [] for peptide, modification in ZIP(scan.peptide, scan.modifications): crosslinker_positions = self.getpositions(modification) for position in crosslinker_positions: residue = self.residuefinder(peptide, position) residues.append(residue) return Counter(residues)