コード例 #1
0
ファイル: Pacton.py プロジェクト: ivanalam/nplab
    def get_pixel_response_calibration_spectrum(self, debug=0):
        '''
		Scan over spectrum in the region where you don't expect to see anyting (ie. UV)
		Take spectra at each position and average. This then gives you the pixel response (strange I know).
		Smooth the average using cvx and use this in furthre processing for background subtraction on the LHS
		to make it easier to stitch together spectra
		'''
        wavelengths = list(range(50, 80, 10))
        spectra = []
        for wl in wavelengths:
            self.acton.set_wavelength(wl,
                                      blocking=True,
                                      fast=True,
                                      debug=debug)
            spectrum, _ = self.pixis.get_spectrum()
            spectra.append(spectrum)

        spectra = np.array(spectra)
        calibration_spectrum = np.mean(spectra, axis=0)
        #subtract minimum value to remove edge effects
        ys = np.array(calibration_spectrum)

        ymin = np.min(ys)
        ys = ys - ymin
        #smooth to get less fluctuations in background
        smoothed, _, _ = convex_smooth(ys, 2)
        smoothed = np.array(smoothed) + ymin
        #replace first 3 pixel values with
        smoothed[0:3] = np.mean(calibration_spectrum[0:3])

        self.pixel_response = smoothed
        self.raw_pixel_response = calibration_spectrum
        return smoothed, calibration_spectrum
コード例 #2
0
    def dummyRun(self,
                 initial_fit=None,
                 add_peaks=True,
                 allow_asymmetry=False,
                 minwidth=8,
                 maxwidth=30,
                 regions=20,
                 noise_factor=0.01,
                 min_peak_spacing=5,
                 comparison_thresh=0.05,
                 verbose=False):
        '''
        handy to test other scripts quickly
        '''

        self.initial_bg_poly()
        self.minwidth = minwidth
        self.maxwidth = maxwidth
        #        if initial_fit is not None:
        #            self.peaks = initial_fit
        #            self.peaks_stack = np.reshape(self.peaks, [len(self.peaks)/3, 3])
        #            self.optimize_heights
        #            #self.optimize_centre_and_width()

        try:
            smoothed = sm.Run(self.signal)
        except:
            smoothed = sm2.convex_smooth(self.spec, 25)[0]
        maxima = argrelextrema(smoothed, np.greater)[0]
        heights = smoothed[maxima]
        maxima = maxima[np.argsort(heights)[-5:]]
        heights = smoothed[maxima]

        centres = self.shifts[maxima]
        widths = np.ones(len(maxima)) * 17

        self.peaks_stack = np.transpose(np.stack([heights, centres, widths]))
        self.peaks = np.ravel(self.peaks_stack)

        self.optimize_heights
        self.optimize_centre_and_width()
        print("I'm a dummy!")
コード例 #3
0
def main(debug=0):
    #Measurements and Calibration files
    measurement_file = df.DataFile("measured_spectrum.hdf5", "r")
    reference_file = df.DataFile(
        "maxwell_room_light_spectrum_calibration.hdf5", "r")

    #Load reference within given range
    ref_xs, ref_ys = load_reference_data(reference_file,
                                         lower_wavelength=430,
                                         upper_wavelength=714)

    #Load measured data from file
    data = load_measured_data(measurement_file)

    center_wavelengths = [d[0] for d in data]
    #Load the raw counts from the measured data

    #Merge individual spectra (using median) and normalise to set minimum value to zero
    signal_spectrum, mapper_1 = median_spectrum(data, debug=1)
    signal_spectrum = zero_min(signal_spectrum)
    #handcrafted truncation to "valid range" where we can see peaks & that matches the reference spectrum
    signal_spectrum = signal_spectrum[1550:10570]
    #Apply smoothing to the measured signal - want to eliminate most false peaks
    signal_spectrum = SUREShrink(signal_spectrum)
    signal_spectrum, _, _ = convex_smooth(signal_spectrum, 1.0)

    #rescale the reference and fit a line to the wavelength range [nm]
    ref_xs, ref_ys, gradient, offset = rescale_reference(
        xs=ref_xs,
        ys=ref_ys,
        max_size=np.nanmax(signal_spectrum),
        N=len(signal_spectrum),
        debug=1)

    #Get peaks from the signal, with thresholding to eliminate low order maxima/minima
    signal_peaks = get_peaks(signal_spectrum, threshold=1.0)
    ref_peaks = get_peaks(ref_ys, threshold=2.0)

    #Link peaks, handcrated to ignore false peaks
    interpolator_function, interpolator_bounds, lines = link_peaks(
        signal_peaks=signal_peaks,
        reference_peaks=ref_peaks,
        ignored_signal_indices=[3, 8, 11, 13, 19, 22, 23],
        ignored_reference_indices=[11, 14, 18],
        debug=debug)

    if debug > 0:
        fig, ax = plt.subplots(1, figsize=(12, 10))
        ax.set_title("fname: main, debug 0")
        ax.plot(ref_ys, "-", label="Reference")
        ax.plot(signal_spectrum, label="Signal")

        ax.plot(ref_peaks[0, :], ref_peaks[1, :], "o", label="Reference peaks")
        t1s = annotate_points(ref_peaks[0, :], ref_peaks[1, :], ax)
        ax.plot(signal_peaks[0, :],
                signal_peaks[1, :],
                "o",
                label="Signal peaks")
        t2s = annotate_points(signal_peaks[0, :], signal_peaks[1, :], ax)

        for (xs, ys) in lines:
            ax.plot(xs, ys, "-", color="red")

        ax.set_xlabel("Array index")
        ax.set_ylabel("Counts [arb. units]")

        ax.legend()

    def make_mapper2(interpolator_function, gradient, offset):
        def index_to_wavelength(index, with_correction=True):

            if with_correction:
                alignment_correction = int(
                    np.round(interpolator_function(index)))
                index = index - alignment_correction
            #this out linear model
            wavelength = index * gradient + offset
            return wavelength

        return index_to_wavelength

    index_to_wavelength = mapper_2 = make_mapper2(interpolator_function,
                                                  gradient, offset)
    if debug > 0:
        fig, ax = plt.subplots(1)
        ax.set_title("fname: main, debug plot 1")
        xs = list(
            range(np.min(interpolator_bounds), np.max(interpolator_bounds)))
        ys = [index_to_wavelength(x) for x in xs]
        plt.plot(xs, ys)

        fig, ax = plt.subplots(1)
        ax.set_title("fname: main, debug plot 2")
        wavelengths_reference = [
            index_to_wavelength(x, with_correction=False)
            for x in range(len(ref_ys))
        ]
        ax.plot(wavelengths_reference,
                ref_ys,
                label="Reference spectrum (rescaled)")
        xs = list(range(interpolator_bounds[0], interpolator_bounds[1]))

        ax.plot([index_to_wavelength(x, with_correction=True) for x in xs],
                [signal_spectrum[x] for x in xs],
                label="Stitched signal (corrected)")
        ax.plot([index_to_wavelength(x, with_correction=False) for x in xs],
                [signal_spectrum[x] for x in xs],
                alpha=0.4,
                label="Stitched signal (uncorrected)")
        ax.set_ylim(0)

        ax.set_xlabel("Wavelength [nm]")
        ax.set_ylabel("Intensity (arb. units)")
        ax.set_title(
            "Alignment to reference spectrum\n Reference: Ocean Optics Spectrometer\n Signal: Acton+Pixis"
        )
        ax.legend()

    # mapper_1 : maps from (center_wavelength, pixel_index) to index in spectrum array
    # mapper_2 : maps from index in spectrum array to wavelenegth
    def mapper(center_wavelength, pixel_index):
        try:
            array_index = mapper_1(center_wavelength, pixel_index)
            wavelength = mapper_2(array_index)
            return wavelength
        except:
            return np.nan

    if debug > 0:

        plt.show()

    #This tests the data
    plot_layers(center_wavelengths, data, mapper, show_plot=True)
    return mapper
コード例 #4
0
def compute_shifts(spectra, threshold=-105, debug=0):
    shifts = []
    for i in range(1, len(spectra)):
        ys0 = spectra[i - 1]
        ys1 = spectra[i]

        shift, _ = correlation_align(ys0,
                                     ys1,
                                     upsampling=1.0,
                                     return_integer_shift=True)
        shifts.append(shift)
    #threshold - some shifts fail when there are no spectral features
    shifts = np.array(shifts)
    inds = np.where(shifts <= threshold)
    inds = inds[0]

    xs_interp = list(range(0, np.max(inds) + 1))
    f0 = interp1d(inds, shifts[inds])
    ys_interp = [f0(x) for x in xs_interp]
    pad_length = 5
    smoothed_shifts, _, _ = convex_smooth([ys_interp[0]] * pad_length +
                                          ys_interp, 0.0, "quadratic")
    smoothed_shifts = smoothed_shifts[pad_length:]
    end_pad_length = len(shifts) - len(smoothed_shifts)
    # print end_pad_length
    padded_smoothed_shifts = np.concatenate(
        [smoothed_shifts, ([smoothed_shifts[-1]] * end_pad_length)])
    padded_smoothed_shifts = [0] + list(padded_smoothed_shifts)
    outp = np.array(np.round(padded_smoothed_shifts), dtype=int)
    assert (len(inds) == len(shifts[inds]))
    N = len(outp)

    if debug > 0:
        fig, ax = plt.subplots(1)
        ax.set_title("fname:compute_shifts, debug plot")
        ax.plot(
            inds,
            shifts[inds],
            "o",
            label="Threshold shifts [threshold value: {}".format(threshold))
        # ax.plot(inds[0], smoothed,"-",label="Threshold shifts [threshold value: {}".format(threshold))
        ax.plot(list(range(len(shifts))),
                shifts,
                "x-",
                label="Raw shift values".format(threshold))

        ax.plot(xs_interp,
                ys_interp,
                "x-",
                label="Linearly interpolated shift values".format(threshold))
        ax.plot(xs_interp,
                smoothed_shifts,
                "x-",
                label="Smoothed shift values".format(threshold))
        print(len(shifts), len(padded_smoothed_shifts))

        ax.plot(padded_smoothed_shifts,
                "x-",
                label="Smoothed shift values with end-padding")
        ax.plot(outp, label="Final output")
        ax.legend()
        plt.show()

    assert (len(padded_smoothed_shifts) == len(spectra))
    return inds, outp
コード例 #5
0
    def initial_bg_poly(self):
        '''
        takes an inital guess at the background.
        takes the local minima of the smoothed spectrum, weighted by how far they are to other minima, and fits to these.
        weighting is to prioritise flatter portions of the spectrum (which would naturally have fewer minima)
        
        '''

        try:
            smoothed = sm.Run(self.spec)
        except:
            smoothed = sm2.convex_smooth(self.spec, 25)[0]
        self.bg_indices = argrelextrema(smoothed, np.less)[0]
        self.bg_vals = smoothed[self.bg_indices]

        residuals = []
        for index in self.bg_indices:
            residuals.append(
                find_closest(index, np.setdiff1d(self.bg_indices, index))[2])
        norm_fac = 5. / max(residuals)
        extra_lens = norm_fac * np.array(residuals)
        for bg_index, extra_len in zip(self.bg_indices, extra_lens):
            extra_len = int(extra_len)
            if extra_len < 1: extra_len = 1
            for extra in np.arange(extra_len) + 1:
                try:
                    self.bg_vals.append(smoothed[bg_index + extra])
                    self.bg_indices.append(bg_index + extra)
                except:
                    pass
                try:
                    self.bg_vals.append(smoothed[bg_index - extra])
                    self.bg_indices.append(bg_index - extra)
                except:
                    pass
        edges = np.arange(5) + 1
        edges = np.append(edges, -edges).tolist()
        self.bg_indices = np.append(self.bg_indices, edges)
        self.bg_vals = np.append(self.bg_vals, smoothed[edges])

        if self.bg_type == 'poly':
            self.bg_bound = (min(self.spec), max(self.spec))
            self.bg_bounds = []
            while len(self.bg_bounds) < len(self.bg_vals):
                self.bg_bounds.append(self.bg_bound)
            self.bg_p = np.polyfit(self.shifts[self.bg_indices], self.bg_vals,
                                   self.order)
            self.bg = np.polyval(self.bg_p, self.shifts)
            self.signal = (old_div((np.array(self.spec - self.bg)),
                                   self.transmission)).tolist()
        else:

            if self.vary_const_bg == False:
                self.bg_p = curve_fit(self.bg_function,
                                      self.shifts[self.bg_indices],
                                      self.bg_vals,
                                      p0=[0.5 * max(self.spec), 300, 1E-10],
                                      maxfev=100000,
                                      bounds=self.bg_bounds)[0]
            else:
                self.bg_p = curve_fit(
                    self.bg_function,
                    self.shifts[self.bg_indices],
                    self.bg_vals,
                    p0=[0.5 * max(self.spec), 300,
                        min(self.spec)],
                    maxfev=100000,
                    bounds=self.bg_bounds)[0]
            self.bg = self.bg_function(self.shifts, *self.bg_p)
            self.signal = (old_div((np.array(self.spec - self.bg)),
                                   self.transmission)).tolist()