Example #1
0
    def generate(self, input=None):  #, config, algorithms):
        # Generate bin values for range start_scale to end_scale
        # Calculate the number of bins at binsize across range
        dso = DataSet(size=input.shape)
        dso.import_data(input)

        #ng.analysis.peakpick.pick(data, thres, msep=None, direction='both', algorithm='thres', est_params=True, lineshapes=None)

        threshold = self.config.get('peak_threshold')
        algorithm = self.algorithms[self.config.get('algorithm')]
        msep = (self.config.get('peak_separation'), )

        # Take input dataset and flatten in first dimension (average spectra)
        data_avg = np.mean(input.data, axis=0)

        # pick peaks and return locations;
        #nmrglue.analysis.peakpick.pick(data, pthres, nthres=None, msep=None, algorithm='connected', est_params=True, lineshapes=None, edge=None, diag=False, c_struc=None, c_ndil=0, cluster=True, table=True, axis_names=['A', 'Z', 'Y', 'X'])[source]¶
        locations, scales, amps = ng.analysis.peakpick.pick(
            data_avg,
            threshold,
            msep=msep,
            algorithm=algorithm,
            est_params=True,
            cluster=False,
            table=False)

        #n_cluster = max( cluster_ids )
        n_locations = len(locations)

        new_shape = list(input.shape)
        new_shape[1] = n_locations  # correct number; tho will be zero indexed

        # Convert to numpy arrays so we can do clever things
        scales = [dso.scales[1][l[0]] for l in locations]

        # Adjust the scales (so aren't lost in crop)
        dso.labels[1] = [str(l) for l in scales]
        dso.scales[1] = scales

        dso.crop(new_shape)

        # Iterate over the clusters (1 to n)
        for n, l in enumerate(locations):
            #l = locations[ cluster_ids == n ]
            #peak_data = np.amax( peak_data, axis=1 ) # max across cols
            dso.data[:, n - 1] = input.data[:, l[0]]

        # FIXME:
        # Extract the location numbers (positions in original spectra)
        # Get max value in each row for those regions
        # Append that to n position in new dataset

        # -- optionally use the line widths and take max within each of these for each spectra (peak shiftiness)
        # Filter the original data with those locations and output\

        return {'output': dso}
Example #2
0
    def normalise(self, dsi):
        # Generate bin values for range start_scale to end_scale
        # Calculate the number of bins at binsize across range
        dso = DataSet(size=dsi.shape)
        dso.import_data(dsi)

        dso.data = self.algorithms[self.config.get('algorithm')](dso.data)
        # -- optionally use the line widths and take max within each of these for each spectra (peak shiftiness)
        # Filter the original data with those locations and output\

        return dso
Example #3
0
    def normalise(self, dsi):
        # Generate bin values for range start_scale to end_scale
        # Calculate the number of bins at binsize across range
        dso = DataSet(size=dsi.shape)
        dso.import_data(dsi)

        dso.data = self.algorithms[self.config.get('algorithm')](dso.data)
        # -- optionally use the line widths and take max within each of these for each spectra (peak shiftiness)
        # Filter the original data with those locations and output\

        return dso
Example #4
0
    def generate(self, input=None): #, config, algorithms):
        # Generate bin values for range start_scale to end_scale
        # Calculate the number of bins at binsize across range
        dso = DataSet( size=input.shape )
        dso.import_data(input)
        
        #ng.analysis.peakpick.pick(data, thres, msep=None, direction='both', algorithm='thres', est_params=True, lineshapes=None)
        
        threshold =  self.config.get('peak_threshold')
        algorithm = self.algorithms[ self.config.get('algorithm')]
        msep = ( self.config.get('peak_separation'),)
        
        # Take input dataset and flatten in first dimension (average spectra)
        data_avg = np.mean( input.data, axis=0)

        # pick peaks and return locations; 
        #nmrglue.analysis.peakpick.pick(data, pthres, nthres=None, msep=None, algorithm='connected', est_params=True, lineshapes=None, edge=None, diag=False, c_struc=None, c_ndil=0, cluster=True, table=True, axis_names=['A', 'Z', 'Y', 'X'])[source]¶
        locations, scales, amps = ng.analysis.peakpick.pick(data_avg, threshold, msep=msep, algorithm=algorithm, est_params = True, cluster=False, table=False)

        #n_cluster = max( cluster_ids )
        n_locations = len( locations )
        
        new_shape = list( input.shape )
        new_shape[1] = n_locations # correct number; tho will be zero indexed
        
        # Convert to numpy arrays so we can do clever things
        scales = [dso.scales[1][l[0]] for l in locations ]

        # Adjust the scales (so aren't lost in crop)
        dso.labels[1] = [ str(l) for l in scales]
        dso.scales[1] = scales
        
        dso.crop( new_shape )

        # Iterate over the clusters (1 to n)
        for n, l in enumerate(locations):
            #l = locations[ cluster_ids == n ]
            #peak_data = np.amax( peak_data, axis=1 ) # max across cols
            dso.data[:,n-1] = input.data[:, l[0]]
            
        # FIXME:
        # Extract the location numbers (positions in original spectra)
        # Get max value in each row for those regions
        # Append that to n position in new dataset
        
        # -- optionally use the line widths and take max within each of these for each spectra (peak shiftiness)
        # Filter the original data with those locations and output\

        return {'output':dso}
Example #5
0
    def generate(self, input=None):
        dsi = input
        ###### BINNING USING CONFI
        # Generate bin values for range start_scale to end_scale
        # Calculate the number of bins at binsize across range
        dso = DataSet()
        dso.import_data(dsi)

        r = dsi.scales_r[1]
        self._bin_size, self._bin_offset = self.config.get('bin_size'), self.config.get('bin_offset')

        bins = np.arange(r[0] + self._bin_offset, r[1] + self._bin_offset, self._bin_size)
        number_of_bins = len(bins) - 1

        # Can't increase the size of data, if bins > current size return the original
        if number_of_bins >= len(dso.scales[1]):
            return {'dso': dso}

        # Resize (lossy) to the new shape
        old_shape, new_shape = list(dsi.data.shape), list(dso.data.shape)
        new_shape[1] = number_of_bins
        dso.crop(new_shape)  # Lossy crop, but we'll be within the boundary below


        for n, d in enumerate(dsi.data):
            binned_data = np.histogram(dsi.scales[1], bins=bins, weights=d)
            binned_num = np.histogram(dsi.scales[1], bins=bins)  # Number of data points that ended up contributing to each bin
            dso.data[n, :] = binned_data[0] / binned_num[0]  # Mean

        dso.scales[1] = [float(x) for x in binned_data[1][:-1]]
        #dso.labels[1] = [str(x) for x in binned_data[1][:-1]]

        # Remove any NaNs that have crept in (due to the histogram)
        dso.remove_invalid_data()

        return {'output': dso, 'input': input}  # Pass back input for difference plot