wvl = data["lambda"]
background = data["spectra_" + str(background_spectra)]
lamp_spectra = lamp_data["spectra_0"]

peak_guesses = [540+i*20 for i in range(nbr_particles)]
#peak_guesses.reverse()
peak_positions = []
for i in range(0,nbr_particles):
    spectra = data[f'spectra_{i}']
    if not i == background_spectra:
        print(i)
        # Don't plot background
        corrected_spectra = (spectra - background)/lamp_spectra
        #norm_spectra = spectra/spectra.max()
        print(peak_guesses[i])
        peak_pos, fwhm = lorentzian_fit(wvl, corrected_spectra, 780, True)
        peak_positions.append(peak_pos)
        delta_y = np.ones(len(wvl))*(i)
        ax.plot(wvl, delta_y, corrected_spectra, label=f'Particle {i+1}', alpha=0.7)



#ax.set_title(f'Corrected spectra for file {filename}')
ax.tick_params(axis='both', which='major', pad=0)
ax.set_xlabel(r'Wavelength [nm]')
ax.set_ylabel("Particle")
ax.set_zlabel(r'Intensity [arb. units]')
plt.locator_params(axis='z', nbins=5)
plt.grid()
#plt.legend(loc="upper left")
plt.tight_layout()
    # Pick out background spectrum
    sample_bground = sample_data["spectra_0"]
    # Calculate average spectra
    measurements = 0
    avg_spectra = spectra_dict[sample]["avg_spectra"]
    for measurement in sample_data:
        if not (measurement == "spectra_0") and not (measurement == "lambda"):
            # Skip background measurement
            avg_spectra += sample_data[measurement]
            measurements += 1
    print(f'Number of measurements: {measurements} for sample: {sample}')
    avg_spectra /= measurements
    # Renormalize, by removing background and then dividing by lamp_spectra
    avg_spectra = (avg_spectra - sample_bground) / lamp_spectra
    # calc peak_pos and fwhm
    peak_pos, fwhm = lorentzian_fit(wvl, avg_spectra, 720, False)
    print(peak_pos)
    # Save results
    spectra_dict[sample]["wvl"] = wvl
    spectra_dict[sample]["avg_spectra"] = avg_spectra
    spectra_dict[sample]["peak_pos"] = peak_pos

# Iterate through and plot all spectras. Also append all peak pos together to a vector and plot that
peak_vector = []
for i, sample in enumerate(spectra_dict):
    wvl = spectra_dict[sample]["wvl"]
    avg_spectra = spectra_dict[sample]["avg_spectra"]
    peak_vector.append(spectra_dict[sample]["peak_pos"])
    ax.plot(wvl,
            avg_spectra,
            color=spectra_dict[sample]["color"],
Пример #3
0
#print(measurements)

peak_guess = 750  # About 750 nm is a good guess for Pd

# Loop over all measurements at different times t
peak_positions = [[] for i in range(nbr_particles)]
for iter, measurement_df in tqdm(enumerate(measurements)):
    wvl = measurement_df.iloc[:, 0]
    b_ground = measurement_df.iloc[:, 1]
    # Find peak position and FWHM for each particle
    for measurement_nbr in measurement_df:
        if measurement_nbr in samples_to_plot:
            # The first two columns are always wavelength and background
            spectra = measurement_df.iloc[:, measurement_nbr]
            corr_spectra = (spectra - b_ground) / lamp_spectra
            peak_pos, fwhm = lorentzian_fit(wvl, corr_spectra, peak_guess,
                                            False)
            peak_positions[measurement_nbr].append(fwhm)

# Take only every 30:th point from the gas_data measurements
t = np.array([a for idx, a in enumerate(gas_data[0]) if idx % 30 == 0])
g = np.array([a for idx, a in enumerate(gas_data[1]) if idx % 30 == 0])
# Remove the extra measurements from gas measurement
t = t[:len(measurements)]
g = g[:len(measurements)]
# Plot
fig, ax1 = plt.subplots()
#for particle_peak_pos in peak_positions:
#t =  np.arange(0,119)*30
for smpl in samples_to_plot:
    # Plot delta FWHM relative to smallest FWHM for each sample (remove offset)
    sample_series = np.array(peak_positions[smpl])