def bg_peaks(file, zero_file): ''' Uses ReadInValues function and FindPeaks function to find the peak wavelength of each background image, and find the peak wavelength shift of each background image compared to the sensor peak (zero file). Returns three values, the file_name, the background peak and the peak shift value. Args: file: <string> file path to background image zero_file: <string> file path to sensor background image ''' wav_naught, int_naught, zero_file_name = io.csv_in(zero_file) wavelength, intensity, file_name = io.csv_in(file) zero_peak = peaks(x=wav_naught, y=int_naught, distance=300, width=20, xmin=740, xmax=800) bg_peak = peaks(x=wavelength, y=intensity, distance=300, width=20, xmin=740, xmax=800) peak_shift = float(bg_peak[0]) - float(zero_peak[0]) return file_name, bg_peak[0], peak_shift
def time_sort(in_dir_name, dir_params, main_dir): ''' Spectrums/Images captured using splicco's automatic data capture/timed sequential function are automatically given a user defined file name and a time and date stamp of the moment a measurement was taken (file created). This function converts the time stamp given into a value in seconds by splitting the file name into sections, reversing the order and splitting the time stamp at 'h' 'm' and 's' then converting to seconds. The function then adds all the values together to give a total time in seconds, concatenates this with the original file name, and saves the original data out with a new file name as a numpy array. Args: in_dir_name: <string> directory name containing spectrum files dir_params: <array> directories are given a name equivalent to the individual file names, the dir_params function splits the directory name into an array that can be used to find the correct spectrum files. main_dir: <string> current working directory ''' file_string = '_'.join(dir_params) print(f'\n{dir_params}') data_files = io.extract_files(dir_name=in_dir_name, file_string=file_string) for index, selected_file in enumerate(data_files): file = os.path.join(in_dir_name, selected_file) wavelength, intensity, file_name = io.csv_in(file) data = np.vstack((wavelength, intensity)).T split_file = file_name.split('_')[::-1] date_split = split_file[1] hrs_split = split_file[0].split('h') mins_split = hrs_split[1].split('m') secs_split = mins_split[1].split('s') total_seconds = convert_to_seconds(date=date_split, hours=hrs_split[0], minutes=mins_split[0], seconds=secs_split[0], milliseconds=secs_split[1]) out_dir_name = '_'.join(dir_params) + '_TimeAdjusted' out_dir = os.path.join(main_dir, out_dir_name) io.check_dir_exists(out_dir) joined = [] joined.append(file_string) joined.append(str(total_seconds)) new_file_name = '_'.join(joined) io.array_save(array_name=data, file_name=new_file_name, dir_name=out_dir) io.update_progress(index / len(data_files))
def peak_shift(file, zero_file): ''' Reads in the wavelength, intensity and file name parameters from an in-file and the sensor file. Then uses the FindPeaks function to determine the x coordinates of a resonant peak within both files. Using this it calculates a peak shift. Outputs the time, peak and peak shift values. Args: file: <string> file path to image zero_file: <string> file path to sensor background image ''' wavelength, intensity, file_name = io.array_in(file=file) wav_zero, int_zero, zero_file_name = io.array_in(file=zero_file) file_peak = peaks(x=wavelength, y=intensity, distance=300, width=20, xmin=730, xmax=810) zero_peak = peaks(x=wav_zero, y=int_zero, distance=300, width=20, xmin=740, xmax=800) time_stamp = (file_name.split('_')[::-1])[0] if len(file_peak) == 0: peak = None peak_shift = None else: peak = float(file_peak[0]) peak_shift = float(file_peak[0]) - float(zero_peak[0]) return time_stamp, peak, peak_shift
def time_correct(in_dir_name, dir_params, main_dir): ''' Spectrums/Images time adjusted in TimeSort function above are loaded in and the data is maintained. The file name is split and the first file captured is set to 0, the following files within the directory are given a time stamp respective to the zero file (a time step). This is useful for later processing. Args: in_dir_name: <string> directory name containind time adjusted spectrum files dir_params: <array> directories are given a name equivalent to the individual file names, the dir_params function splits the directory name into an array that can be used to find the correct spectrum files. main_dir: <string> current working directory ''' file_string = '_'.join(dir_params[0:2]) print(' ') print(dir_params) data_files = io.extract_files(dir_name=in_dir_name, file_string=file_string) zero_file_name = data_files[0] zero_time_stamp = (zero_file_name.split('_')[::-1])[0] for index, selected_file in enumerate(data_files): file = os.path.join(in_dir_name, selected_file) data = np.load(file) file_name = io.get_filename(file) split_file = file_name.split('_')[::-1] time_correction = int( float(split_file[0]) - float(zero_time_stamp[0:-4])) out_dir_name = '_'.join(dir_params[0:-1]) + '_TimeCorrected' out_dir = os.path.join(main_dir, out_dir_name) io.check_dir_exists(out_dir) joined = [] joined.append(file_string) joined.append(str(time_correction)) new_file_name = '_'.join(joined) io.array_save(array_name=data, file_name=new_file_name, dir_name=out_dir) io.update_progress(index / len(data_files))
def find_img_size(dir_name, file_string): ''' Find size of an individual image from the hyperspectral imaging file, used to determine the pixel size of the camera. Find the height and width of each image and outputs the number of rows and colums as variables. Args: dir_name: <string> directory containing images file_string: <string> image names within direcotry (eg. img_) Returns: np.shape: <height and width> ''' data_files = io.extract_files(dir_name=dir_name, file_string=file_string) zero_data = data_files[0] zero = os.path.join(dir_name, zero_data) zero_file = np.load(zero) return np.shape(zero_file)
def processing_parameters(main_dir, exp_settings, image_save=False): ''' Calculate the total processing time based on the number of files present in each hs_img. Give the option to output a time for both image output and data output. ''' number_files = 0 for hs_img in exp_settings['hs_imgs']: img_dir = os.path.join(main_dir, hs_img) if not os.path.isdir(img_dir): continue data_files = io.extract_files(dir_name=img_dir, file_string='img_') number_files += len(data_files) process_time = normalise_process_time(number_files, image_save) file_size = normalise_file_size(number_files, image_save) print(f'\nTotal number of files: {number_files}') print(f'Save normalised images set to: {image_save}') print(f'Total file size: ~{file_size} GB') print(f'Total processing time: ~{process_time} mins')
import os import GMR.InputOutput as io main_dir = io.config_dir_path() exp_settings = io.exp_in(main_dir) print(f'Experiment Settings:\n {exp_settings}\n') for hs_img in exp_settings['hs_imgs']: img_dir = os.path.join(main_dir, hs_img) if not os.path.isdir(img_dir): continue corrected_img_dir = os.path.join(img_dir, 'corrected_imgs') data_files = io.extract_files(dir_name=corrected_img_dir, file_string='corrected_img_') print(len(data_files)) data_cube = [] print('\nBuilding data cube...') for index, file in enumerate(data_files): file_path = os.path.join(corrected_img_dir, file) corrected_img, file_name = io.array_in(file_path, mode='r') data_cube.append(corrected_img) io.update_progress(index / len(data_files)) print('\nSaving data cube...approximately 1min per 100 imgs') io.array_out(array_name=data_cube, file_name=f'{hs_img}_datacube',
import os import numpy as np import matplotlib.pyplot as plt import csv import shutil import GMR.InputOutput as io import GMR.DataPreparation as dprep import GMR.DataProcessing as dproc sensor = 'Nanohole_Array' ## Set this to the photonic crystal used ## root = io.config_dir_path() for date_dir in os.listdir(root): selected_date = os.path.join(root, date_dir) print(f'Looking at: {date_dir}') print('Background Calibration') bg_dir = os.path.join(selected_date, 'Background') bg_datafiles = io.extract_files(dir_name=bg_dir, file_string='_Background.csv') for index, selected_file in enumerate(bg_datafiles): file = os.path.join(bg_dir, selected_file) zero_file = os.path.join(bg_dir, f'{sensor}_Background.csv') wavelength, intensity, file_name = io.csv_in(file) wav_naught, int_naught, zero_name = io.csv_in(zero_file) fig, ax = plt.subplots(1, 1, figsize=[10, 7])
import os import GMR.InputOutput as io import GMR.DataPreparation as dp main_dir = io.config_dir_path() exp_settings = io.exp_in(main_dir) print(f'Experiment Settings:\n{exp_settings}\n') dp.processing_parameters(main_dir=main_dir, exp_settings=exp_settings, image_save=False) for hs_img in exp_settings['hs_imgs']: img_dir = os.path.join(main_dir, hs_img) if not os.path.isdir(img_dir): continue step, wl, f, power, norm_power = io.get_pwr_spectrum(dir_name=img_dir, plot_show=False, plot_save=True) io.create_all_dirs(dir_name=img_dir) data_files = io.extract_files(dir_name=img_dir, file_string='img_') print('\nNormalising csvs...') for index, file in enumerate(data_files): file_path = os.path.join(img_dir, file) img, file_name = io.csv_in(file_path=file_path)
import os import matplotlib.pyplot as plt import GMR.InputOutput as io import GMR.DataPreparation as dp import GMR.DataAnalysis.RIndex as ri root = io.config_dir_path() process_dict = { 'Default': 0, 'Normalise': 1, 'Data Cube': 2, 'Free Spectral Range': 3, } process_choice = io.user_in(choiceDict=process_dict) normalise, datacube, fsr = io.process_choice(choice=process_choice) norm_save_dict = { 'Default settings': 0, 'All raw/normalised image save': 1, 'Save only result': 2, } norm_choice = io.user_in(choiceDict=norm_save_dict) raw_save, norm_save, pwr_save = io.norm_choice(choice=norm_choice) exp_settings = io.exp_in(root) print(f'Experiment Settings:\n{exp_settings}\n') dp.processing_parameters(main_dir=root, exp_settings=exp_settings, image_save=norm_save)