Beispiel #1
0
def time_sort(in_dir_name, dir_params, main_dir):
    '''
    Spectrums/Images captured using splicco's automatic data capture/timed
    sequential function are automatically given a user defined file name and
    a time and date stamp of the moment a measurement was taken (file created).
    This function converts the time stamp given into a value in seconds by
    splitting the file name into sections, reversing the order and splitting
    the time stamp at 'h' 'm' and 's' then converting to seconds.
    The function then adds all the values together to give a total time in
    seconds, concatenates this with the original file name, and saves the
    original data out with a new file name as a numpy array.
    Args:
        in_dir_name: <string> directory name containing spectrum files
        dir_params: <array> directories are given a name equivalent to the
                    individual file names, the dir_params function splits
                    the directory name into an array that can be used to find
                    the correct spectrum files.
        main_dir: <string> current working directory
    '''
    file_string = '_'.join(dir_params)
    print(f'\n{dir_params}')
    data_files = io.extract_files(dir_name=in_dir_name,
                                  file_string=file_string)

    for index, selected_file in enumerate(data_files):
        file = os.path.join(in_dir_name, selected_file)
        wavelength, intensity, file_name = io.csv_in(file)

        data = np.vstack((wavelength, intensity)).T

        split_file = file_name.split('_')[::-1]
        date_split = split_file[1]
        hrs_split = split_file[0].split('h')
        mins_split = hrs_split[1].split('m')
        secs_split = mins_split[1].split('s')

        total_seconds = convert_to_seconds(date=date_split,
                                           hours=hrs_split[0],
                                           minutes=mins_split[0],
                                           seconds=secs_split[0],
                                           milliseconds=secs_split[1])

        out_dir_name = '_'.join(dir_params) + '_TimeAdjusted'
        out_dir = os.path.join(main_dir, out_dir_name)
        io.check_dir_exists(out_dir)

        joined = []
        joined.append(file_string)
        joined.append(str(total_seconds))
        new_file_name = '_'.join(joined)

        io.array_save(array_name=data,
                      file_name=new_file_name,
                      dir_name=out_dir)

        io.update_progress(index / len(data_files))
Beispiel #2
0
def time_correct(in_dir_name, dir_params, main_dir):
    '''
    Spectrums/Images time adjusted in TimeSort function above are loaded in
    and the data is maintained. The file name is split and the first file
    captured is set to 0, the following files within the directory are given
    a time stamp respective to the zero file (a time step). This is useful
    for later processing.
    Args:
        in_dir_name: <string> directory name containind time adjusted
                     spectrum files
        dir_params: <array> directories are given a name equivalent to the
                    individual file names, the dir_params function splits
                    the directory name into an array that can be used to find
                    the correct spectrum files.
        main_dir: <string> current working directory
    '''
    file_string = '_'.join(dir_params[0:2])
    print(' ')
    print(dir_params)
    data_files = io.extract_files(dir_name=in_dir_name,
                                  file_string=file_string)

    zero_file_name = data_files[0]
    zero_time_stamp = (zero_file_name.split('_')[::-1])[0]

    for index, selected_file in enumerate(data_files):
        file = os.path.join(in_dir_name, selected_file)
        data = np.load(file)

        file_name = io.get_filename(file)
        split_file = file_name.split('_')[::-1]
        time_correction = int(
            float(split_file[0]) - float(zero_time_stamp[0:-4]))

        out_dir_name = '_'.join(dir_params[0:-1]) + '_TimeCorrected'
        out_dir = os.path.join(main_dir, out_dir_name)
        io.check_dir_exists(out_dir)

        joined = []
        joined.append(file_string)
        joined.append(str(time_correction))
        new_file_name = '_'.join(joined)

        io.array_save(array_name=data,
                      file_name=new_file_name,
                      dir_name=out_dir)

        io.update_progress(index / len(data_files))
Beispiel #3
0
def find_img_size(dir_name, file_string):
    '''
    Find size of an individual image from the hyperspectral imaging file,
    used to determine the pixel size of the camera. Find the height and
    width of each image and outputs the number of rows and colums as
    variables.
    Args:
        dir_name: <string> directory containing images
        file_string: <string> image names within direcotry (eg. img_)
    Returns:
        np.shape: <height and width>
    '''
    data_files = io.extract_files(dir_name=dir_name, file_string=file_string)
    zero_data = data_files[0]
    zero = os.path.join(dir_name, zero_data)
    zero_file = np.load(zero)

    return np.shape(zero_file)
Beispiel #4
0
def processing_parameters(main_dir, exp_settings, image_save=False):
    '''
    Calculate the total processing time based on the number of files
    present in each hs_img. Give the option to output a time for
    both image output and data output.
    '''
    number_files = 0
    for hs_img in exp_settings['hs_imgs']:
        img_dir = os.path.join(main_dir, hs_img)
        if not os.path.isdir(img_dir):
            continue

        data_files = io.extract_files(dir_name=img_dir, file_string='img_')
        number_files += len(data_files)

    process_time = normalise_process_time(number_files, image_save)
    file_size = normalise_file_size(number_files, image_save)

    print(f'\nTotal number of files: {number_files}')
    print(f'Save normalised images set to: {image_save}')
    print(f'Total file size: ~{file_size} GB')
    print(f'Total processing time: ~{process_time} mins')
Beispiel #5
0
import os
import GMR.InputOutput as io

main_dir = io.config_dir_path()

exp_settings = io.exp_in(main_dir)
print(f'Experiment Settings:\n {exp_settings}\n')

for hs_img in exp_settings['hs_imgs']:
    img_dir = os.path.join(main_dir, hs_img)
    if not os.path.isdir(img_dir):
        continue
    corrected_img_dir = os.path.join(img_dir, 'corrected_imgs')

    data_files = io.extract_files(dir_name=corrected_img_dir,
                                  file_string='corrected_img_')
    print(len(data_files))

    data_cube = []

    print('\nBuilding data cube...')
    for index, file in enumerate(data_files):
        file_path = os.path.join(corrected_img_dir, file)
        corrected_img, file_name = io.array_in(file_path, mode='r')
        data_cube.append(corrected_img)

        io.update_progress(index / len(data_files))

    print('\nSaving data cube...approximately 1min per 100 imgs')
    io.array_out(array_name=data_cube,
                 file_name=f'{hs_img}_datacube',
import GMR.InputOutput as io
import GMR.DataPreparation as dprep
import GMR.DataProcessing as dproc

sensor = 'Nanohole_Array'  ## Set this to the photonic crystal used ##

root = io.config_dir_path()

for date_dir in os.listdir(root):
    selected_date = os.path.join(root, date_dir)
    print(f'Looking at: {date_dir}')

    print('Background Calibration')
    bg_dir = os.path.join(selected_date, 'Background')
    bg_datafiles = io.extract_files(dir_name=bg_dir,
                                    file_string='_Background.csv')

    for index, selected_file in enumerate(bg_datafiles):
        file = os.path.join(bg_dir, selected_file)
        zero_file = os.path.join(bg_dir, f'{sensor}_Background.csv')

        wavelength, intensity, file_name = io.csv_in(file)
        wav_naught, int_naught, zero_name = io.csv_in(zero_file)

        fig, ax = plt.subplots(1, 1, figsize=[10, 7])
        ax.plot(wavelength, intensity, 'r', lw=2, label=file_name)
        ax.plot(wav_naught, int_naught, 'b', lw=2, label=zero_name)
        ax.grid(True)
        ax.legend(frameon=True, loc=0, ncol=1, prop={'size': 12})
        ax.set_xlabel('Wavelength [nm]', fontsize=14)
        ax.set_ylabel('Intensity', fontsize=14)
Beispiel #7
0
dp.processing_parameters(main_dir=main_dir,
                         exp_settings=exp_settings,
                         image_save=False)

for hs_img in exp_settings['hs_imgs']:
    img_dir = os.path.join(main_dir, hs_img)
    if not os.path.isdir(img_dir):
        continue

    step, wl, f, power, norm_power = io.get_pwr_spectrum(dir_name=img_dir,
                                                         plot_show=False,
                                                         plot_save=True)

    io.create_all_dirs(dir_name=img_dir)

    data_files = io.extract_files(dir_name=img_dir, file_string='img_')

    print('\nNormalising csvs...')
    for index, file in enumerate(data_files):
        file_path = os.path.join(img_dir, file)
        img, file_name = io.csv_in(file_path=file_path)

        _, img_no = file_name.split('_')
        io.png_out(image_data=img,
                   file_name=file_name,
                   dir_name=img_dir,
                   image_title=f'Image: {img_no}',
                   out_name=f'{file_name}.png',
                   plot_show=False)

        norm_img = dp.pwr_norm(image_data=img,