def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Radiance_Cal_VIS_Lamp'
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.27'
    image_dir = os.path.join(data_dir, r'saved_quads')
    image_save_dir = os.path.join(image_dir, 'FPS_orientation_raw')
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    if not os.path.exists(image_save_dir):
        os.makedirs(image_save_dir)

    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    print('Total data = ', len(data_path_all))

    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)

        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
        # print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
        # round(fpa_temp, 2))
        #print('Integ. Time =' , int_time)
        #print(coadds)

        else:
            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21.5

    ##############Image Processing begins here##############################

    #print(coadds)

        full_frame = read_idl_file(data_file)
        full_frame = perform_coaddition_correction(full_frame, coadds)
        full_frame = make_full_frame_from_raw_fpe(full_frame)
        #create_image(full_frame)
        processed_file_name = image_save_dir + '/' + data_path + '.csv'
        np.savetxt(processed_file_name, full_frame, fmt='%1.3f', delimiter=",")
def calculate_dark_current(data_dir, coadds):
    all_dark_current = []
    dark_data_dir = os.path.join(data_dir, 'Dark_data')
    data_path_all = sorted(
        [each for each in os.listdir(dark_data_dir) if each.endswith('.sav')])

    for data_path in data_path_all:
        data_file = os.path.join(dark_data_dir, data_path)
        full_frame = read_idl_file(data_file)
        bias_removed_quads = perform_bias_subtraction(full_frame)
        dark_current_image = create_final_image(np.array(bias_removed_quads))
        all_dark_current.append(dark_current_image)
    all_dark_current = np.array(all_dark_current)
    #mean_dark_current = np.mean(np.mean(all_dark_current, axis=0)/coadds)

    return np.mean(all_dark_current, axis=0)
def calculate_dark_current(data_dir, coadds=63):
    """ Function to calculate the dark current. FInal dark current is the average
    """
    all_dark_current = []
    dark_data_dir = os.path.join(data_dir, 'Dark_data')
    data_path_all = sorted(
        [each for each in os.listdir(dark_data_dir) if each.endswith('.sav')])

    for data_path in data_path_all:
        data_file = os.path.join(dark_data_dir, data_path)
        full_frame = read_idl_file(data_file)
        bias_removed_quads = perform_bias_subtraction(full_frame)
        dark_current_image = create_final_image(np.array(bias_removed_quads))
        #print(np.mean(dark_current_image))
        dark_current_image = dark_current_image / coadds
        all_dark_current.append(dark_current_image)
    all_dark_current = np.array(all_dark_current)
    #mean_dark_current = np.mean(np.mean(all_dark_current, axis=0)/coadds)
    #create_image(np.mean(all_dark_current, axis=0), 'a','b')
    #dc_noise = np.var(all_dark_current, axis=0)
    #np.savetxt(dark_data_dir+'/'+ 'Dc_var.csv', dc_noise ,fmt='%1.3f',  delimiter=",")
    #cc
    #np.savetxt(variance_file_name, var_all,fmt='%1.3f',  delimiter=",")
    return np.mean(all_dark_current, axis=0)
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Radiance_Cal_VIS_Lamp'
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.27'
    image_dir = os.path.join(data_dir, r'saved_quads')
    image_save_dir = os.path.join(image_dir, 'processed_corrected_linearity')
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    linearity = 1 # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    temp_correction = 1
#    all_image = []
#    var_all = []
#    signal_all = []
    #dark_current = 0
    if not os.path.exists(image_save_dir):
        os.makedirs(image_save_dir)
    #print(image_save_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted([each for each in os.listdir(image_dir)
                            if each.endswith('.sav')])

#    #dark_data = data_path_all[1:len(data_path_all):2]


    print('Total data = ', len(data_path_all))
#    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
#
#    for results in data_path_all[0:15]:
#         resultFile.write(results+"\n")
#    resultFile.close()
    count = 0
    #peak_loc_all = []
    for data_path in data_path_all[19:]:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        
#        cc
        telemetry_file_name = data_path.split('.')[0]
       #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir, telemetry_file_name+'.h5')
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(telemetry_dir,
                                                                        telemetry_file_name)
           # print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                 # round(fpa_temp, 2))
            print('Integ. Time =' , int_time)
            print(coadds)
            
        else:
            print('Telemetry file missing')
            coadds = 100
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21.5
        
       ##############Image Processing begins here##############################
       
        #print(coadds)
    
        full_frame = read_idl_file(data_file)
        
        full_frame = perform_coaddition_correction(full_frame, coadds)
        
        #full_frame = full_frame/coadds
        #print(np.max(full_frame))  
        #cc 
        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
       # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        bias_removed_quads = perform_bias_subtraction(full_frame) 
        
        #print('0', bias_removed_quads)
        

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 3))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/'+ data_path+'.png'
        #create_image(raw_image/coadds, text1, plot_save_dir)
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------
        #########################NON LINEARITY CORRECTION#######################
        if temp_correction:
            temp_corrected_quads =   perform_temp_correction(bias_removed_quads, fpe_temp)
        else:
            temp_corrected_quads = bias_removed_quads
        

        
       #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        # pass the difference instead of coadds
        if linearity:
            linearized_a = apply_linearity_correction(temp_corrected_quads [0, :, :],
                                                      quads[0])
            linearized_b = apply_linearity_correction(temp_corrected_quads [1, :, :],
                                                      quads[1])
            linearized_c = apply_linearity_correction(temp_corrected_quads [2, :, :],
                                                      quads[2])
            linearized_d = apply_linearity_correction(temp_corrected_quads[3, :, :],
                                                      quads[3])
            linearized_quads = np.array([linearized_a, linearized_b,
                                         linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = temp_corrected_quads 
        #print('2', linearized_quads)
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        #outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays


        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(linearized_quads)
        else:
            cross_talk_removed_quads = linearized_quads
            
        if smear:
            smear_removed_quads = perform_smear_removal(np.array(cross_talk_removed_quads),
                                                        int_time)
        else:
            smear_removed_quads = cross_talk_removed_quads
        #print('3',smear_removed_quads)
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))
        
        
        #print('4',cross_talk_removed_quads)
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        
        #gain_applied_quads = apply_electronics_gain(np.array(cross_talk_removed_quads), diff_tsoc_odd_even)
        processed_image = create_final_image(np.array(smear_removed_quads))
        #processed_image = processed_image - dark_current

        #prnu_map = parse_prnu_file()
        #prnu_map = read_prnu_files()
        #prnu_spectrometer = create_final_image(np.array(prnu_map))
        #prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        #prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98
        #outlier_spectrometer = create_final_image(np.array(outlier_mask))
        nx_quad, ny_quad = processed_image.shape
        #outlier_spectrometer = np.reshape(outlier_spectrometer,
        #                                  (nx_quad*ny_quad, 1))
        #outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        #outlier_spectrometer = np.reshape(outlier_spectrometer, (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        #processed_image = processed_image/(prnu_spectrometer)
        #processed_image[processed_image >= 0.90*16383] = np.NAN
        #processed_image = processed_image*180/int_time
        #print(np.nanmax(processed_image))
        #processed_image[processed_image>=0.85*16383] = 16383
       
#        text1 = telemetry_file_name+'.img, ' + 'Int. time:' + str(round(int_time,2))+ \
#                           'ms\n Co-adds:' +str(int(coadds))+\
#                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
#                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
#        processed_image_save = os.path.join(image_save_dir, 'processed_image')
#        if not os.path.exists(processed_image_save):
#            os.makedirs(processed_image_save)
#        plot_save_dir = processed_image_save + '/' + data_path+'.png'
        #processed_image = processed_image[:, 500:2000]
        #processed_image[processed_image>6000] = 6000
        #processed_image = uniform_filter(processed_image, size=(15, 15), mode='mirror')
        #create_image(processed_image, text1, plot_save_dir)        
        #processed_image = np.array(processed_image)/16.7
#        processed_image[processed_image>=0.90*16383] = 16383
        #hf_name = data_path[:-8]
#        file_name =  data_path[:-8]+'.h5'
#        hf_file_name = os.path.join(image_save_dir, file_name)
        #Write into h5file    
#        hf = h5py.File(hf_file_name,'w')
#        hf.create_dataset('Processed_data', data=np.array(processed_image))
        #cc
        processed_file_name = image_save_dir +'/'+ data_path+'.csv'
        np.savetxt(processed_file_name, processed_image,fmt='%1.3f',  delimiter=",")  
Example #5
0
def main():
    """
    Read in the saved IDL variables and makes quad images and do all the data processing
    """

    file_path = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2017.06.28\saved_quads\test'
    data_path_all = [
        each for each in os.listdir(file_path) if each.endswith('.sav')
    ]
    for data_path in data_path_all:
        data_file = os.path.join(file_path, data_path)
        full_frame = read_idl_file(data_file)

        # Ok, lets break the full_frame into quads
        num_quads, spectral_dims, spatial_dims = full_frame.shape
        print(num_quads, spectral_dims, spatial_dims)

        quads = ['Quad D', 'Quad C', 'Quad A', 'Quad B']
        #        quad_D = full_frame[0:int(spectral_dims/2), 0:int(spatial_dims/2)]
        #        quad_C = full_frame[0:int(spectral_dims/2), int(spatial_dims/2):]
        #        quad_A = full_frame[int(spectral_dims/2):, 0:int(spatial_dims/2)]
        #        quad_B = full_frame[int(spectral_dims/2):, int(spatial_dims/2):]

        # Ok, let's seperate out the active quads, trailing overclocks and smear overclocks

        # Begin with active quads

        plot_raw_image(full_frame)

        # Now the trailing overclocks
        tsoc_D = full_frame[2:1030, 1034:1056]
        tsoc_C = full_frame[2:1030, 1056:1078]
        tsoc_A = full_frame[1062:2090, 1034:1056]
        tsoc_B = full_frame[1062:2090, 1056:1078]

        num_coadds = 10  # you need to check the telemetry files
        integ_time = 139.9609

        # Now the smear overclocks

        smear_oc_D = full_frame[1030:1046, 10:1034]
        smear_oc_C = full_frame[1028:1044, 1078:2102]
        smear_oc_A = full_frame[1046:1062, 10:1034]
        smear_oc_B = full_frame[1046:1062, 1078:2102]

        # now for each quad, let subtract out the offset

        bias_subtracted_D = perform_bias_subtraction(active_D, tsoc_D)
        bias_subtracted_C = perform_bias_subtraction(active_C, tsoc_C)
        bias_subtracted_A = perform_bias_subtraction(active_A, tsoc_A)
        bias_subtracted_B = perform_bias_subtraction(active_B, tsoc_B)

        # now for each quad perfrom linearization
        linearized_D = perform_linearity_correction(bias_subtracted_D,
                                                    quads[0], num_coadds)
        linearized_C = perform_linearity_correction(bias_subtracted_C,
                                                    quads[1], num_coadds)
        linearized_A = perform_linearity_correction(bias_subtracted_A,
                                                    quads[2], num_coadds)
        linearized_B = perform_linearity_correction(bias_subtracted_B,
                                                    quads[3], num_coadds)

        # now for each quad remove SMEAR
        smear_corr_D = perform_smear_offset_correction(linearized_D,
                                                       integ_time)
        smear_corr_C = perform_smear_offset_correction(linearized_C,
                                                       integ_time)
        smear_corr_A = perform_smear_offset_correction(linearized_A,
                                                       integ_time)
        smear_corr_B = perform_smear_offset_correction(linearized_B,
                                                       integ_time)

        # ok, now let's combine quads to one full frame again

        lower_quads = np.concatenate((smear_corr_D, smear_corr_C), axis=1)
        upper_quads = np.concatenate((smear_corr_A, smear_corr_B), axis=1)
        full_frame_image = np.concatenate((lower_quads, upper_quads), axis=0)

        # Apply PRNU Correction
        full_frame_image = apply_PRNU_correction(full_frame_image)

        ax = plt.gca()
        image = ax.imshow(full_frame_image,
                          cmap='nipy_spectral',
                          origin='lower')
        #plt.title(title)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(image, cax=cax)
        plt.grid(False)
        plt.show()

        ax = plt.gca()
        image = ax.imshow(linearized_D - smear_corr_D,
                          cmap='nipy_spectral',
                          origin='lower')
        #plt.title(title)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(image, cax=cax)
        plt.grid(False)
        plt.show()
        cc
Example #6
0
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Spectral_Smile\Flight_Slight'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(data_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    bandpass_val = []
    bandpass_val_norm = []
    normalization_factor_all = []
    dark_current = 0
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    #
    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all[0:15]:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    if dark_current:
        dark_current = calculate_dark_current(image_dir, coadds=1)
    else:
        dark_current = 0

    for data_path in data_path_all[7:]:

        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        check_image = create_image_active_region(full_frame)
        raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - dark_current

        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98

        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image / (prnu_spectrometer)
        #print(np.min(processed_image))

        #processed_image[processed_image>=0.85*16383] = 16383
        text1 = telemetry_file_name+'.img' +' (Spectral Bandpass 675 nm)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' + data_path + '.png'
        processed_image = uniform_filter(processed_image,
                                         size=3,
                                         mode='constant')

        #create_image(processed_image, text1, plot_save_dir)

        row_mean = np.mean(processed_image, axis=1)
        max_row_each = np.where(
            row_mean == max(np.mean(processed_image, axis=1)))[0]
        max_row_each = max_row_each[0]
        print(max_row_each)
        #
        #
        #        #max_row = 29 # for 297.8 nm
        #        #max_row = 1546 #for 640nm
        #        #max_row = 1369 #for 605nm
        #        #max_row = 1050 #for 605nm
        #        #max_row = 142 # for 320 nm
        #
        #        #max_row = 91 # for 310 nm
        #        max_row = 15 # 675 nm
        #        #max_row = 2032 # for 736 nm
        #        #check_val = 1392
        col_index = np.sort([randint(100, 2000) for p in range(0, 10)])
        for i in col_index:

            plt.plot(processed_image[:, i], 'o--', label='Col. ' + str(i))
            plt.legend(loc='best', ncol=4)
        plt.grid(True, linestyle=':')
        plt.title(
            'Profile along Spectral direction (Spectral Bandpass 297 nm)')
        plt.xlabel('Spectral Pixel Index')
        plt.ylabel('Counts (DN)')
        #plt.xlim(655, 675)
        #plt.yscale('log')
        plt.xlim(max_row_each - 10, max_row_each + 10)

        plt.show()
        cc
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Spectral_Smile\Flight_Slight'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(data_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    bandpass_val = []
    bandpass_val_norm = []
    normalization_factor_all = []
    dark_current = 0
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    #
    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all[0:15]:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    if dark_current:
        dark_current = calculate_dark_current(image_dir, coadds=1)
    else:
        dark_current = 0

    count = 0
    spectral_alingn_unct = []
    for data_path in data_path_all[0:16]:

        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        check_image = create_image_active_region(full_frame)
        raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - dark_current

        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98

        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image / (coadds * prnu_spectrometer)
        #print(np.min(processed_image))
        wavelen = np.loadtxt(os.path.join(image_dir, 'Wavelength.csv'),
                             delimiter=',')
        wavelen = wavelen[count]

        #processed_image[processed_image>=0.85*16383] = 16383
        text1 = telemetry_file_name+'.img' +' (Laser WL:' + str(wavelen)+' nm) \n Int. time:' + '250 '+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' + data_path + '.png'
        #processed_image = uniform_filter(processed_image, size=(5, 3), mode='mirror')
        create_image(processed_image, text1, plot_save_dir)

        row_mean = np.mean(processed_image, axis=1)
        max_row_each = np.where(
            row_mean == max(np.mean(processed_image, axis=1)))[0]
        max_row_each = max_row_each[0]
        max_row = max_row_each
        #print(max_row_each)

        #        processed_image = processed_image[max_row-12: max_row+12, :]
        #        processed_image = uniform_filter(processed_image, size=(10, 4), mode='mirror')

        # print(wavelen)
        #        #plt.plot(processed_image[max_row-2, :], 'purple', label='2 lines up')
        #        plt.plot(processed_image[max_row-1, :], 'y-', label='1 lines up')
        #        #plt.plot(processed_image[max_row+2, :], 'g-', label='2 lines down')
        #        plt.plot(processed_image[max_row+1, :], 'r-', label='1 line down')
        #        plt.plot(processed_image[max_row, :], 'k-', label='Brightest')
        #        plt.grid(True, linestyle  = ':')
        #        plt.title('Profile along Spectral direction (Laser Wavelength ='  + str(wavelen) + 'nm)')
        #        plt.xlabel('Spatial Pixel Index')
        #        plt.ylabel('Counts (DN)')
        #        plt.show()
        #        col_index = np.sort([randint(100, 2000) for p in range(0, 10)])
        #        col_index = [480, 485, 490, 500, 510]
        #        for i in col_index:
        #                plt.plot(processed_image[:, i]/np.max(processed_image[:, i]),
        #                         'o--', label='Col. ' + str(i))
        #        plt.legend(loc='best', ncol=4)
        #        plt.grid(True, linestyle=':')
        #        plt.title('Profile along Spectral direction (Laswer WL:' + str(wavelen)+' nm)')
        #        plt.xlabel('Spectral Pixel Index')
        #        plt.ylabel('Normalized Counts')
        #        plt.show()
        #        cc

        #        cc
        wavelengths_all = []
        #print(count)
        smile_all = []
        FWHM_all = []

        for i in range(15, 2035):
            wavelengths = [wavelen] * 2020
            wavelengths_all.append(wavelengths)
            #wavelengths_all.append(wavelengths)

            smile, FWHM = fit_gaussian_func(max_row_each,
                                            processed_image[:, i], wavelen, i)
            smile_all.append(smile)
            FWHM_all.append(FWHM)
        #print(FWHM)
        mean_val = np.mean(smile_all)
        smile_all = moving_average(smile_all)
        #smile_all = uniform_filter(smile_all, size=6, mode='mirror')
        #print(np.array(smile_all).shape)
        band_center = pd.Series(smile_all)
        # Spectral alignment is calculated by comuting the difference between max and min spectral centers within 4 pixel moving window
        max_val = band_center.rolling(4).max()
        min_val = band_center.rolling(4).min()
        spectral_alignment = max_val - min_val
        spectral_alingn_unct.append(spectral_alignment)
        #plt.plot(max_val-min_val, label=str(wavelen)+ 'nm' )
        count = count + 1

        #plt.plot(wavelengths[1400:], FWHM_all[1400:], 'o', label=str(wavelen)+ 'nm')
        #plt.plot(uniform_filter(FWHM_all, size=12,mode='mirror'), label=str(wavelen)+ 'nm')
        #plt.show()

        #fit_gaussian_func(x_data, y_data, wavelen)
#
#
##        #max_row = 29 # for 297.8 nm
##        #max_row = 1546 #for 640nm
##        #max_row = 1369 #for 605nm
##        #max_row = 1050 #for 605nm
##        #max_row = 142 # for 320 nm
##
##        #max_row = 91 # for 310 nm
##        max_row = 15 # 675 nm
##        #max_row = 2032 # for 736 nm
##        #check_val = 1392
#        col_index = np.sort([randint(100, 2000) for p in range(0, 2)])
#        for i in col_index:
#
#            plt.plot(processed_image[:, i],
#                     'o--', label='Spatial Index ' + str(i))
#            plt.legend(loc='best', ncol=4)
#        plt.grid(True, linestyle=':')
#        plt.title('Profile along Spectral direction (Laser Wavelength ='  + str(wavelen) + 'nm')
#        plt.xlabel('Spectral Pixel Index')
#        plt.ylabel('Counts (DN)')
#        #plt.xlim(655, 675)
#        #plt.yscale('log')
#        plt.xlim(max_row_each-10, max_row_each+10)
#
#plt.show()

#
#plt.figure()
    spectral_alignment_unct = np.std(np.array(spectral_alingn_unct), axis=0)
    print(spectral_alignment_unct.shape)
    plt.plot(spectral_alignment_unct, 'k', label='1-Sigma Spectral. Alignment')
    plt.axhline(y=0.01,
                color='red',
                linestyle='--',
                linewidth=2,
                label='TEMPO Requirement')
    plt.title(
        'TEMPO Number of Samples per FWHM Vs. Wavelengths\n (*** Note: 2 pixel smoothing in spectral,12 pixel in spatial direction)',
        fontsize=12)
    plt.grid(True, linestyle=':')
    plt.xlabel('Wavelength (nm)', fontsize=12)
    plt.ylabel('Samples/FWHM', fontsize=12)
    #plt.ylim(2.6, 3.3)
    #plt.ylim(2.4, 3)

    #plt.ylim(-0.3, 0.3)
    # plt.axhline(y= 2.7, color='red', linestyle='--',  linewidth=2)
    #plt.axhline(y= -0.2, color='red', linestyle='--', linewidth=2)
    #plt.axhline(y= 0.2, color='red', linestyle='--', linewidth=2)

    plt.xlim(0, 2500)
    plt.legend(loc='best')
    #plt.text(500, 0.25, '----TEMPO Requirement',  color='red', fontsize=12)
    #plt.text(500, 2.72, '----TEMPO Requirement',  color='red', fontsize=12)
    plt.show()
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\FPA_Gain_vs_Temp'
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2016.09.19'
    image_save_dir = r'C:\Users\nmishra\Workspace\TEMPO\TEMPO_DC_FPA_TEMP_SENSTIVITY_NEW'
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    linearity = 1  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    temp_correction = 1

    if not os.path.exists(image_save_dir):
        os.makedirs(image_save_dir)

    temperature_files = [each for each in os.listdir(data_dir) \
                        if each.endswith('_PT_Dark')]
    for k in range(1, len(temperature_files)):

        image_data_files = os.path.join(data_dir, temperature_files[k],
                                        'Script_Data', 'saved_quads')

        data_path_all = [
            each for each in os.listdir(image_data_files)
            if not each.endswith('_118000.dat.sav')
            if each.endswith('.dat.sav')
        ]

        data_path_op_int = [each for each in os.listdir(image_data_files) \
                         if each.endswith('118000.dat.sav')][-1]

        data_path_all.append(data_path_op_int)
        save_dir = os.path.join(image_save_dir, temperature_files[k])
        all_quad_D_odd = []
        all_quad_D_even = []
        all_quad_C_odd = []
        all_quad_C_even = []
        all_quad_B_odd = []
        all_quad_B_even = []
        all_quad_A_odd = []
        all_quad_A_even = []
        all_int_time = []

        for data_path in data_path_all:
            data_path_name_split = data_path.split('_')
            int_time = round(int(data_path_name_split[-1].split('.')[0]))
            int_time = int(int_time) / 1000
            #print(int_time)
            all_int_time.append(int_time)

            data_file = os.path.join(image_data_files, data_path)
            print(data_path)

            #        cc
            telemetry_file_name = data_path.split('.')[0]
            #################### Telemetry Statistics###############################
            # Now lets parse the telemetry file to get the required information###
            #Note; The function can return the CCD temp and FPE temp. But these are
            #not needed on the image processing algorithm.. atleast for the time being

            telemetry_file = os.path.join(telemetry_dir,
                                          telemetry_file_name + '.h5')
            if os.path.exists(telemetry_file):
                coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                    telemetry_dir, telemetry_file_name)
                # print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                # round(fpa_temp, 2))
                print('Integ. Time =', int_time)
                print(coadds)

            else:
                #print('Telemetry file missing')
                coadds = 100
                #int_time = 6000
                fpe_temp = 25
                fpa_temp = 25

        ##############Image Processing begins here##############################

            full_frame = read_idl_file(data_file)

            if np.array(full_frame).ndim == 4:
                full_frame = np.mean(full_frame, axis=0)
            else:
                full_frame = perform_coaddition_correction(full_frame, coadds)

            quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

            ##########################OFFSET REMOVAL###############################
            # Input : Full_Frame TEMPO IMAGE
            # Otput : Bias Subtracted Active Region. The active region dimensions are
            #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
            #to store storage summation information
            bias_removed_quads = perform_bias_subtraction(full_frame)
            #---------------------------------------------------------------------
            #########################NON LINEARITY CORRECTION#######################
            if temp_correction:
                temp_corrected_quads = perform_temp_correction(
                    bias_removed_quads, fpe_temp)
            else:
                temp_corrected_quads = bias_removed_quads

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        # pass the difference instead of coadds
            if linearity:
                linearized_a = apply_linearity_correction(
                    temp_corrected_quads[0, :, :], quads[0])
                linearized_b = apply_linearity_correction(
                    temp_corrected_quads[1, :, :], quads[1])
                linearized_c = apply_linearity_correction(
                    temp_corrected_quads[2, :, :], quads[2])
                linearized_d = apply_linearity_correction(
                    temp_corrected_quads[3, :, :], quads[3])
                linearized_quads = np.array(
                    [linearized_a, linearized_b, linearized_c, linearized_d])
                #print(np.mean(linearized_quads))
            else:
                linearized_quads = temp_corrected_quads

            if cross_talk:
                cross_talk_removed_quads = remove_cross_talk(linearized_quads)
            else:
                cross_talk_removed_quads = linearized_quads

            if smear:
                smear_removed_quads = perform_smear_removal(
                    np.array(cross_talk_removed_quads), int_time)
            else:
                smear_removed_quads = cross_talk_removed_quads

            processed_image = create_final_image(np.array(smear_removed_quads))
            quad_d = processed_image[0:1028, 0:1024]
            quad_d_odd = quad_d[:, ::2]
            quad_d_even = quad_d[:, 1::2]
            all_quad_D_odd.append(
                np.mean(filter_outlier_median(quad_d_odd[300:900, 200:400])))
            all_quad_D_even.append(
                np.mean(filter_outlier_median(quad_d_even[300:900, 200:400])))

            quad_c = processed_image[0:1028, 1024:]
            quad_c_odd = quad_c[:, ::2]
            quad_c_even = quad_c[:, 1::2]
            all_quad_C_odd.append(
                np.mean(filter_outlier_median(quad_c_odd[300:900, 200:400])))
            all_quad_C_even.append(
                np.mean(filter_outlier_median(quad_c_even[300:900, 200:400])))

            quad_a = processed_image[1028:, 0:1024]
            quad_a_odd = quad_a[:, ::2]

            quad_a_even = quad_a[:, 1::2]
            all_quad_A_odd.append(
                np.mean(filter_outlier_median(quad_a_odd[300:900, 200:400])))
            all_quad_A_even.append(
                np.mean(filter_outlier_median(quad_a_even[300:900, 200:400])))

            quad_b = processed_image[1028:, 1024:]
            quad_b_odd = quad_b[:, ::2]
            quad_b_even = quad_b[:, 1::2]
            all_quad_B_odd.append(
                np.mean(filter_outlier_median(quad_b_odd[300:900, 200:400])))
            all_quad_B_even.append(
                np.mean(filter_outlier_median(quad_b_even[300:900, 200:400])))

        dframe1 = pd.DataFrame({
            'Int_time.': all_int_time,
            'Avg_Quad_A_odd': all_quad_A_odd,
            'Avg_Quad_A_even': all_quad_A_even,
            'Avg_Quad_B_odd': all_quad_B_odd,
            'Avg_Quad_B_even': all_quad_B_even,
            'Avg_Quad_C_odd': all_quad_C_odd,
            'Avg_Quad_C_even': all_quad_C_even,
            'Avg_Quad_D_odd': all_quad_D_odd,
            'Avg_Quad_D_even': all_quad_D_even
        })
        processed_file_dir = os.path.join(save_dir, temperature_files[k])
        if not os.path.exists(processed_file_dir):
            os.makedirs(processed_file_dir)

        processed_file_name = processed_file_dir + '/' + temperature_files[
            k] + '_Photon_transfer_data_all_FPA.csv'
        dframe1.to_csv(processed_file_name)
Example #9
0
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Photon_Transfer_TVAC'
    telemetry_dir = os.path.join(
        r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.28\processed\h5')
    image_dir = os.path.join(data_dir, 'saved_quads')
    image_save_dir = os.path.join(image_dir, 'saved_plots', 'Linearity',
                                  'Mean_Stats\Variance_inlcuded')
    if not os.path.exists(image_save_dir):
        os.makedirs(image_save_dir)

    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    all_active_A_odd = []
    all_active_B_odd = []
    all_active_C_odd = []
    all_active_D_odd = []

    all_active_A_even = []
    all_active_B_even = []
    all_active_C_even = []
    all_active_D_even = []

    all_active_A_odd_var = []
    all_active_B_odd_var = []
    all_active_C_odd_var = []
    all_active_D_odd_var = []

    all_active_A_even_var = []
    all_active_B_even_var = []
    all_active_C_even_var = []
    all_active_D_even_var = []

    int_time_all = []

    print('Total data = ', len(data_path_all))
    for data_path in range(0, len(data_path_all)):
        #print(data_path)
        data_path = data_path_all[data_path]
        print(data_path)

        data_file = os.path.join(image_dir, data_path)

        ################## Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file_name = data_path.split('.')[0]
        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            #            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
            #                  round(fpa_temp, 2))
            print('Integ. Time =', int_time)
            #if float(int_time)>=40:
            # break

        else:
            continue
            print('Telemetry file missing')


#            coadds = 10
#            int_time = 6000
#            fpe_temp = 43
#            fpa_temp = -21.5

        int_time_all.append(int_time)

        #############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        #full_frame = full_frame/coadds
        #print(num_quads)

        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed
        bias_removed_quads = perform_bias_subtraction(full_frame)
        even_quad_active, odd_quad_active = extract_active_region(
            bias_removed_quads)

        all_active_A_odd.append(odd_quad_active[0])
        all_active_B_odd.append(odd_quad_active[1])
        all_active_C_odd.append(odd_quad_active[2])
        all_active_D_odd.append(odd_quad_active[3])

        all_active_A_even.append(even_quad_active[0])
        all_active_B_even.append(even_quad_active[1])
        all_active_C_even.append(even_quad_active[2])
        all_active_D_even.append(even_quad_active[3])

        even_quad_var, odd_quad_var = extract_uncertainty(bias_removed_quads)

        all_active_A_odd_var.append(odd_quad_var[0])
        all_active_B_odd_var.append(odd_quad_var[1])
        all_active_C_odd_var.append(odd_quad_var[2])
        all_active_D_odd_var.append(odd_quad_var[3])

        all_active_A_even_var.append(even_quad_var[0])
        all_active_B_even_var.append(even_quad_var[1])
        all_active_C_even_var.append(even_quad_var[2])
        all_active_D_even_var.append(even_quad_var[3])

    all_active_A_odd = np.array(all_active_A_odd)
    all_active_B_odd = np.array(all_active_B_odd)
    all_active_C_odd = np.array(all_active_C_odd)
    all_active_D_odd = np.array(all_active_D_odd)

    all_active_A_even = np.array(all_active_A_even)
    all_active_B_even = np.array(all_active_B_even)
    all_active_C_even = np.array(all_active_C_even)
    all_active_D_even = np.array(all_active_D_even)

    all_active_A_odd_var = np.array(all_active_A_odd_var)
    all_active_B_odd_var = np.array(all_active_B_odd_var)
    all_active_C_odd_var = np.array(all_active_C_odd_var)
    all_active_D_odd_var = np.array(all_active_D_odd_var)

    all_active_A_even_var = np.array(all_active_A_even_var)
    all_active_B_even_var = np.array(all_active_B_even_var)
    all_active_C_even_var = np.array(all_active_C_even_var)
    all_active_D_even_var = np.array(all_active_D_even_var)

    quad_A_active_odd = os.path.join(image_save_dir, 'quad_A_active_odd.csv')
    quad_B_active_odd = os.path.join(image_save_dir, 'quad_B_active_odd.csv')
    quad_C_active_odd = os.path.join(image_save_dir, 'quad_C_active_odd.csv')
    quad_D_active_odd = os.path.join(image_save_dir, 'quad_D_active_odd.csv')

    quad_A_active_even = os.path.join(image_save_dir, 'quad_A_active_even.csv')
    quad_B_active_even = os.path.join(image_save_dir, 'quad_B_active_even.csv')
    quad_C_active_even = os.path.join(image_save_dir, 'quad_C_active_even.csv')
    quad_D_active_even = os.path.join(image_save_dir, 'quad_D_active_even.csv')

    quad_A_active_odd_var = os.path.join(image_save_dir,
                                         'quad_A_active_odd_var.csv')
    quad_B_active_odd_var = os.path.join(image_save_dir,
                                         'quad_B_active_odd_var.csv')
    quad_C_active_odd_var = os.path.join(image_save_dir,
                                         'quad_C_active_odd_var.csv')
    quad_D_active_odd_var = os.path.join(image_save_dir,
                                         'quad_D_active_odd_var.csv')

    quad_A_active_even_var = os.path.join(image_save_dir,
                                          'quad_A_active_even_var.csv')
    quad_B_active_even_var = os.path.join(image_save_dir,
                                          'quad_B_active_even_var.csv')
    quad_C_active_even_var = os.path.join(image_save_dir,
                                          'quad_C_active_even_var.csv')
    quad_D_active_even_var = os.path.join(image_save_dir,
                                          'quad_D_active_even_var.csv')

    integration_time = os.path.join(image_save_dir, 'int_time.csv')

    np.savetxt(quad_A_active_odd, all_active_A_odd, fmt='%1.3f', delimiter=",")
    np.savetxt(quad_B_active_odd, all_active_B_odd, fmt='%1.3f', delimiter=",")
    np.savetxt(quad_C_active_odd, all_active_C_odd, fmt='%1.3f', delimiter=",")
    np.savetxt(quad_D_active_odd, all_active_D_odd, fmt='%1.3f', delimiter=",")

    np.savetxt(quad_A_active_even,
               all_active_A_even,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_B_active_even,
               all_active_B_even,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_C_active_even,
               all_active_C_even,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_D_active_even,
               all_active_D_even,
               fmt='%1.3f',
               delimiter=",")

    np.savetxt(quad_A_active_odd_var,
               all_active_A_odd_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_B_active_odd_var,
               all_active_B_odd_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_C_active_odd_var,
               all_active_C_odd_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_D_active_odd_var,
               all_active_D_odd_var,
               fmt='%1.3f',
               delimiter=",")

    np.savetxt(quad_A_active_even_var,
               all_active_A_even_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_B_active_even_var,
               all_active_B_even_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_C_active_even_var,
               all_active_C_even_var,
               fmt='%1.3f',
               delimiter=",")
    np.savetxt(quad_D_active_even_var,
               all_active_D_even_var,
               fmt='%1.3f',
               delimiter=",")

    np.savetxt(integration_time, int_time_all, fmt='%1.3f', delimiter=",")
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Dark_data\2017.07.10'
    plot_dir = os.path.join(data_dir, 'Dark_Current_Image')
    image_dir = os.path.join(data_dir, 'saved_quads')    
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = r'E:\Image Data\2018.07.10\processed\h5'
    linearity = 0 # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1 
    dark_current = 1
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted([each for each in os.listdir(image_dir)
                            if each.endswith('.sav')])
    print(len(data_path_all))
    
    
    print('Total data = ', len(data_path_all))   
    count = 0 
    all_dark_current=[]
   
    for data_path in data_path_all[1:]:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
#        cc
        telemetry_file_name = data_path.split('.')[0]
       #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir, telemetry_file_name+'.h5')
       
        
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(telemetry_dir,
                                                                        telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
            print(int_time)
            print(coadds)
        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 160
            fpe_temp = 35.8
            fpa_temp = 11.9
           
       ##############Image Processing begins here##############################
        #CC 
        full_frame = read_idl_file(data_file)
        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        #create_image(raw_image, 'a', 'b')
       # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed


        bias_removed_quads = perform_bias_subtraction(full_frame)
        #raw_image = create_final_image(np.array(bias_removed_quads))
        #create_image(raw_image/coadds, 'a', 'b')
        

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name + '.img' + ' (Raw Data)\n Int. time:' + str(round(int_time, 1)) + \
                           'ms, Co-adds:' + str(int(coadds)) + \
                           ', FPE temp:' + str(round(fpe_temp, 1)) + 'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1)) + 'C'
                          
                             

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/'+ data_path+'.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

       #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(bias_removed_quads[0, :, :],
                                                      quads[0], coadds)
            linearized_b = apply_linearity_correction(bias_removed_quads[1, :, :],
                                                      quads[1], coadds)
            linearized_c = apply_linearity_correction(bias_removed_quads[2, :, :],
                                                      quads[2], coadds)
            linearized_d = apply_linearity_correction(bias_removed_quads[3, :, :],
                                                      quads[3], coadds)
            linearized_quads = np.array([linearized_a, linearized_b,
                                         linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()        

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(linearized_quads, int_time,
                                                        outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(np.array(cross_talk_removed_quads))
        processed_image = processed_image 
  
        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98
        outlier_spectrometer = create_final_image(np.array(outlier_mask))
        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad*ny_quad, 1))
        #outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer, (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image/(coadds*int_time)
        processed_image[processed_image >= 0.8*16383] = 0
        dark_current_each = np.median(processed_image, axis=1)
        all_dark_current.append(dark_current_each)
        
           
        #processed_image[processed_image==16383] = np.min(processed_image)
        #processed_image[processed_image<0] = 0
        #processed_image[processed_image==16383] = np.min(processed_image)
        #print(np.min(processed_image))
        
        text1 = telemetry_file_name+'.img' +', UV CCD \n' + \
                           '(Int. time:' + str(round(int_time,2))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 2))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 2))+'C)\n'+ \
                         'Mean DC = '+ str(round(np.mean(processed_image[0:1028:, :]),3)) +' DN/msec'
                           
                            
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = plot_dir + '/' + data_path + '.png'
        
        #processed_image = uniform_filter(processed_image, size=(5, 3), mode='mirror')
        dark_current = processed_image
        med_val = np.median(dark_current)
        dark_current[dark_current>0.15] = med_val
        dark_current[dark_current<-0.1]= med_val
       
        create_image(dark_current, text1, plot_save_dir)


       
    Dark_Current_name = plot_dir +'/'+ 'dark_current.csv'
    np.savetxt(Dark_Current_name, np.array(all_dark_current), delimiter=",")
    print('DONE!!')
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Dark_data\2017.07.10'
    telemetry_dir = r'E:\Image Data\2018.07.10'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)


#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])

    all_quads_odd = []
    all_quads_even = []
    print('Total data = ', len(data_path_all))

    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
            print('Integ. Time =', int_time)

        else:
            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21.5

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        full_frame = full_frame / coadds
        num_quads = full_frame.shape[0]
        #print(num_quads)

        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        even_quad, odd_quad = extract_trailing_overclocks(full_frame)
        all_quads_even.append(even_quad)
        all_quads_odd.append(odd_quad)

    all_quads_odd = np.array(all_quads_odd)
    all_quads_even = np.array(all_quads_even)
    x, y, z = np.shape(all_quads_odd)
    all_quads_odd = np.reshape(all_quads_odd, (x * y * z, 1))

    x, y, z = np.shape(all_quads_even)
    all_quads_even = np.reshape(all_quads_even, (x * y * z, 1))

    #variance_all_odd = np.var(all_quads_odd, axis=0)
    #rows, cols = variance_all_odd.shape

    #variance_all = np.reshape(variance_all_odd, (rows*cols,1))
    #mean_variance = np.mean(variance_all)
    #text = 'Mean Var = '+ str(round(mean_variance, 2))
    # text1 = '\nRN =' + str(round(np.sqrt(mean_variance), 2)) + ' DN'
    #label1 = text+text1

    plt.figure()
    plt.hist(all_quads_odd, 30, facecolor='blue', alpha=0.5)
    #plt.ylim(0, 380)
    #plt.xlim(0, 9)
    plt.grid(linestyle=':')
    plt.xlabel('DN')
    plt.ylabel('Frequency')
    plt.title('Histogram of Trailing Serial Overclocks (Quad B Odd)')
    #plt.legend(loc='best')
    #plt.show()

    plt.savefig(
        r'C:\Users\nmishra\Desktop\RN\2017_07_10_FPE_39.69_FPA_-21.18\Overclocks\Outlier_included'
        + '/' + 'QuadB_Odd.png')
    plt.close('all')
    #variance_all_even = np.var(all_quads_odd, axis=0)
    #rows, cols = variance_all_even.shape

    #variance_all = np.reshape(variance_all_even, (rows*cols,1))
    #mean_variance = np.mean(variance_all)
    #text = 'Mean Var = '+ str(round(mean_variance, 2))
    #text1 = '\nRN =' + str(round(np.sqrt(mean_variance), 2)) + ' DN'
    #label1 = text+text1

    plt.figure()
    plt.hist(all_quads_even, 30, facecolor='red', alpha=0.5)
    #plt.ylim(0, 380)
    #plt.xlim(0, 9)
    plt.grid(linestyle=':')
    plt.xlabel('DN')
    plt.ylabel('Frequency')
    plt.title('Histogram of Trailing Serial Overclocks (Quad B Even)')
    #plt.legend(loc='best')
    plt.savefig(
        r'C:\Users\nmishra\Desktop\RN\2017_07_10_FPE_39.69_FPA_-21.18\Overclocks\Outlier_included'
        + '/' + 'QuadB_Even.png')
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Spectral_Range\740_nm_2'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(data_dir, 'processed/h5')
    wavelen_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Spectral_Range'
    wavelen_val = data_dir.strip('_')[-8:]
    wavelen_val = wavelen_val[0:6]

    # Standar image processing step. If value is assigned 1, the routine is called.
    # If value is zero, routine is skipped.
    linearity = 1
    smear = 1
    cross_talk = 1
    bandpass_val = []
    bandpass_val_norm = []
    normalization_factor_all = []
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])

    wavelength = read_wavelength_file(wavelen_dir, wavelen_val)

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    dark_current = calculate_dark_current(image_dir, coadds=1)
    count = 252
    for data_path in data_path_all[252:]:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
        else:

            print('Telemetry file missing')
            coadds = 1
            int_time = 3500
            fpe_temp = 43
            fpa_temp = -21

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        #check_image = create_image_active_region(full_frame)
        # raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'
        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #create_image(check_image/coadds, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - (dark_current)
        prnu_map = parse_prnu_file()  # BATC Map
        prnu_spectrometer = prnu_map

        #Uncomment the lines below if my PRNU is to be used.
        #prnu_map = read_prnu_files() # LaRC map
        #prnu_spectrometer = create_final_image(np.array(prnu_map))
        #prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        #prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98
        #create_image(prnu_map, 'TEMPO PRNU Map', 'a')
        #create_image(prnu_spectrometer, 'TEMPO PRNU Map', 'a')
        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        #create_image(outlier_spectrometer,'Outliers = 504', 'b')
        #cc
        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        #outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        #processed_image = processed_image/(prnu_spectrometer)
        processed_image = processed_image / (coadds * prnu_spectrometer)

        processed_image[processed_image >= 0.9 * 16383] = 16383

        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' +str(wavelength[count])+ \
                       '_' + str(count)+'_nm' +'.png'

        #print('min_val=', np.min(processed_image))
        #processed_image = np.reshape(processed_image, (nx_quad*ny_quad, 1))
        #processed_image[outlier_spectrometer] = 16383

        #col_index = np.sort([randint(100, 2000) for p in range(0, 10)])
        row_mean = np.mean(processed_image, axis=1)
        max_row_each = np.where(
            row_mean == max(np.mean(processed_image, axis=1)))[0]
        #       max_row_each = max_row_each[0]
        max_row = max_row_each
        text1 = 'WL = ' + str(wavelength[count]) + 'nm'
        create_image(processed_image, max_row, text1, plot_save_dir)
        #        plt.plot(processed_image[2055, :],'r')
        #        plt.show()
        #        cc
        wavelen = wavelength[count]
        dframe_gauss = fit_gaussian_func(processed_image, wavelen)
        #max_row = 29 # for 297.8 nm
        #max_row = 1546 #for 640nm
        #max_row = 1369 #for 605nm
        #max_row = 1050 #for 605nm
        # max_row = 142 # for 320 nm
        # max_row = 319  # for 355 nm
        # max_row = 496  # for 390 nm
        # max_row = 673  # for 425 nm
        #max_row = 849  # for 460 nm
        #max_row = 992  # for 488.2 nm
        #max_row = 192 # for 330 nm
        #max_row = 91 # for 310 nm
        #max_row = 1724 # 675 nm
        #max_row = 2032 # for 736 nm
        max_row = 1989  # for 727.5 nm
        #check_val = 1392

        #subset_image = processed_image[max_row,  :]
        subset_image_normalized = processed_image[max_row - 18:max_row + 18, :]
        normalization_factor = np.sum(
            subset_image_normalized[subset_image_normalized < 16383])
        normalization_factor_all.append(normalization_factor)
        #normalization_factor_subset = np.sum(subset_image_normalized[subset_image_normalized<16383])
        #normalization_factor_all.append(normalization_factor_subset)
        bandpass_val.append(processed_image[max_row, :])
        bandpass_val_norm.append(
            (processed_image[max_row, :] / normalization_factor))
        count = count + 1
        #print(processed_image.shape)
    # cc


#        plt.plot(processed_image[max_row-4, :], 'purple', label='4 lines down')
#        plt.plot(processed_image[max_row-3, :], 'y-', label='3 lines down')
#        plt.plot(processed_image[max_row-2, :], 'g-', label='2 lines down')
#        plt.plot(processed_image[max_row-1, :], 'r-', label='1 line down')
#        plt.plot(processed_image[max_row, :], 'k-', label='max (296.54 nm), ' + 'row:' +str(max_row))
#        plt.plot(processed_image[max_row+1, :], 'm-', label='1 line up')
##        plt.plot(processed_image[max_row+2, :], 'b-', label='2 lines up')
##        plt.plot(processed_image[max_row+3, :], 'orange', label='3 lines up')
##        plt.plot(processed_image[max_row+4, :], 'cyan', label='4 lines up')
#        plt.title('Spatial Profile along illuminated rows (Laser WL: 727.5 nm)')
#        plt.ylabel('Counts (DN)')
#        plt.xlabel('Spatial Pixel Index')
#        plt.grid(True, linestyle=':')
#
#        #plt.yscale('log')
#        #plt.xlim(0, 2000)
#
#
#        #plt.legend(loc='best')
#        plt.show()
#        cc
#        #plt.ylim(2000, 40000)
#        plt.show(block=False)
#        #cc
#        plt.figure(3)
#        for i in col_index:
#
#            plt.plot(processed_image[:, i],
#                     'o--', label='Col. ' + str(i))
#            plt.legend(loc='best', ncol=4)
#        plt.grid(True, linestyle=':')
#        plt.title('Profile along Spectral direction (Spectral Bandpass 297 nm)')
#        plt.xlabel('Spectral Pixel Index')
#        plt.ylabel('Counts (DN)')
#        #plt.xlim(655, 675)
#        #plt.yscale('log')
#
#        plt.show()
#        cc

    save_bandpass = save_dir + '/' + 'bandpass_radiance_second_iteration.csv'
    save_bandpass_norm = save_dir + '/' + 'bandpass_radiance_normalized_second_iteration.csv'
    norm_factor = save_dir + '/' + 'integrated_radiance_second_iteration.csv'

    np.savetxt(save_bandpass, np.array(bandpass_val), delimiter=",")
    np.savetxt(save_bandpass_norm, np.array(bandpass_val_norm), delimiter=",")
    np.savetxt(norm_factor, np.array(normalization_factor_all), delimiter=",")
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Spectral_Co_registration'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(data_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    bandpass_val = []
    bandpass_val_norm = []
    normalization_factor_all = []
    dark_current = 0
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])

    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all[0:15]:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    if dark_current:
        dark_current = calculate_dark_current(image_dir, coadds=1)
    else:
        dark_current = 0

    count = 8
    peak_loc_all = []
    for data_path in data_path_all[8:]:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        check_image = create_image_active_region(full_frame)
        raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - dark_current

        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98

        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image / (coadds * prnu_spectrometer)
        #print(np.min(processed_image))

        # Read the laser wavelength
        wavelen = np.loadtxt(os.path.join(image_dir, 'Wavelength.csv'),
                             delimiter=',')

        # Read the spectral index
        spec_index = np.loadtxt(os.path.join(image_dir, 'Spectral_column.csv'),
                                delimiter=',')
        wavelen = wavelen[count]
        spec_pix = int(spec_index[count])
        #print('Test Data= ',  wavelen, spec_pix)

        #processed_image[processed_image>=0.85*16383] = 16383
        text1 = telemetry_file_name+'.img' +' (Laser WL:' +str(wavelen) +' nm)\n Int. time:' + str(coadds)+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' + data_path + '.png'
        #processed_image = uniform_filter(processed_image, size=(5, 3), mode='mirror')
        create_image(processed_image, text1, plot_save_dir)

        #        processed_image = processed_image[max_row-12: max_row+12, :]
        #        processed_image = uniform_filter(processed_image, size=(10, 4), mode='mirror')
        #        plt.plot(processed_image[:, 7]/np.max(processed_image[:, 7]),'r', label = 'Pin Hole 1(Sp. Index : 7)')
        #        plt.plot(processed_image[:, 514]/np.max(processed_image[:, 514]),'b', label = 'Pin Hole 2(Sp. Index : 514)')
        #        plt.plot(processed_image[:, 1023]/np.max(processed_image[:, 1023]),'g', label = 'Pin Hole 3(Sp. Index : 1023)')
        #        plt.plot(processed_image[:, 1532]/np.max(processed_image[:, 1532]),'m', label = 'Pin Hole 4(Sp. Index : 1532)')
        #        plt.plot(processed_image[:, 2041]/np.max(processed_image[:, 2041]),'k', label = 'Pin Hole 5(Sp. Index : 2041)')
        #        plt.title('Profile of a pinhole illumiation along spectral direction')
        #        plt.xlabel('Spectral Pixel Index')
        #        plt.ylabel('Normalized Counts')
        #        plt.legend(loc='best')
        #        plt.grid(True,linestyle=':')
        #        plt.show()
        #       # print(wavelen)

        peak_loc = fit_gaussian_func(processed_image, wavelen, spec_pix)
        peak_loc_all.append(peak_loc)
        count = count + 1


#        plt.plot(processed_image[spec_pix, :], 'purple', label='2 lines up')
#        plt.show()
#        p1 = processed_image[spec_pix, 1:9 ]
#        p2 = processed_image[spec_pix, 510:518]
#        p3 = processed_image[spec_pix, 1019:1027]
#        p4 = processed_image[spec_pix, 1528:1536]
#        p5 = processed_image[spec_pix, 2037:2045]
#        plt.plot(p1/np.max(p1), 'ro-', label='Pin Hole 1')
#        plt.plot(p2/np.max(p2), 'bo-', label='Pin Hole 1')
#        plt.plot(p3/np.max(p3), 'go-', label='Pin Hole 1')
#        plt.plot(p4/np.max(p4), 'mo-', label='Pin Hole 1')
#        plt.plot(p5/np.max(p5), 'ko-', label='Pin Hole 1')
#        plt.grid(True, linestyle  = ':')
#        plt.legend(loc='best')
#        plt.title('Spatial Profile of 5 pin hole')
#        plt.xlabel('Spatial Pixel Index')
#        plt.ylabel('Counts (DN)')
#        plt.show()
#

    final_data = os.path.join(image_dir, 'peak_loc_all.csv')

    np.savetxt(final_data, np.array(peak_loc_all), delimiter=",")
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Photon_Transfer_TVAC'
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.28'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    var_all = []
    signal_all = []
    all_image = []
    dark_current = 0
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)


#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted([
        each for each in os.listdir(image_dir)
        if each.endswith('2018_06_28_20_41_18_042102.img.sav')
    ])

    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all[0:15]:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    if dark_current:
        dark_current = calculate_dark_current(image_dir, coadds=1)
    else:
        dark_current = 0

    count = 0
    #peak_loc_all = []
    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
            print('Integ. Time =', int_time)

        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21.5

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 3))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - dark_current

        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98

        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        #outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image / (coadds * prnu_spectrometer)
        processed_image[processed_image >= 0.85 * 16383] = 16383
        #print(np.min(processed_image))

        #processed_image[processed_image>=0.85*16383] = 16383
        text1 = telemetry_file_name+'.img, ' + 'Int. time:' + str(round(int_time,2))+ \
                           'ms\n Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' + data_path + '.png'
        #processed_image = uniform_filter(processed_image, size=(5, 3), mode='mirror')
        #create_image(processed_image, text1, plot_save_dir)
        all_image.append(processed_image)
        count = count + 1

    var_all = np.var(all_image, axis=0)
    signal_all = np.median(all_image, axis=0)
Example #15
0
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Read_Noise'
    plot_dir = os.path.join(data_dir, 'Read_Noise_image')
    image_dir = os.path.join(data_dir, 'saved_quads')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.28'
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)


#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    print(len(data_path_all))

    print('Total data = ', len(data_path_all))
    count = 0
    all_dark_current = []

    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')

        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
            print(int_time)
            print(coadds)
        else:

            print('Telemetry file missing')
            coadds = 10
            int_time = 160
            fpe_temp = 35.8
            fpa_temp = 11.9

    ##############Image Processing begins here##############################
    #CC
        full_frame = read_idl_file(data_file)
        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        #create_image(raw_image, 'a', 'b')
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)
        #raw_image = create_final_image(np.array(bias_removed_quads))
        #create_image(raw_image/coadds, 'a', 'b')

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name + '.img' + ' (Raw Data)\n Int. time:' + str(round(int_time, 1)) + \
                           'ms, Co-adds:' + str(int(coadds)) + \
                           ', FPE temp:' + str(round(fpe_temp, 1)) + 'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1)) + 'C'

        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        #processed_image = processed_image

        #prnu_map = parse_prnu_file()
        prnu_map = read_prnu_files()
        prnu_spectrometer = create_final_image(np.array(prnu_map))
        prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98
        outlier_spectrometer = create_final_image(np.array(outlier_mask))
        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        #outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        processed_image = processed_image
        processed_image[processed_image >= 0.8 * 16383] = 0
        dark_current_each = processed_image
        #print(dark_current_each)

        all_dark_current.append(dark_current_each)

        #processed_image[processed_image==16383] = np.min(processed_image)
        #processed_image[processed_image<0] = 0
        #processed_image[processed_image==16383] = np.min(processed_image)
        #print(np.min(processed_image))

        text1 = telemetry_file_name+'.img' +', UV CCD \n' + \
                           '(Int. time:' + str(round(int_time,2))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 2))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 2))+'C)'

        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = plot_dir + '/' + data_path + '.png'
        #create_image(processed_image, text1, plot_save_dir)
        #processed_image = uniform_filter(processed_image, size=(5, 3), mode='mirror')
        #dark_current = processed_image
        #med_val = np.median(dark_current)
        #dark_current[dark_current>0.15] = med_val
        #dark_current[dark_current<-0.1]= med_val

    all_dark_current = np.array(all_dark_current)
    Dark_Current_name = plot_dir + '/' + 'read_noise.csv'
    np.savetxt(Dark_Current_name, np.array(all_dark_current), delimiter=",")

    #print(all_dark_current.shape)

    read_Noise = np.var(all_dark_current, axis=0)
    # print('read_noise_dimes = ', read_Noise.shape)
    create_image(read_Noise, text1, plot_save_dir)

    read_Noise = filter_outlier_median(read_Noise)
    label2 = 'mean variance = ' + str(round(np.mean(read_Noise), 3))
    #nx_quad, ny_quad = read_Noise.shape
    plt.figure(figsize=(10, 8))
    plt.hist(read_Noise, 60, normed=0, facecolor='red', alpha=1, label=label2)
    #title = 'Histogram of Variance of photon transfer sequence at 0 ms'
    plt.title(
        'Histogram of variance of processed Laser Image\n Int., time = 0, Num. images= 100',
        fontsize=14,
        fontweight="bold")
    plt.grid(True, linestyle=':')
    plt.xlabel('Temporal Noise (Variance)', fontsize=14, fontweight="bold")
    plt.ylabel('Frequency', fontsize=14, fontweight="bold")
    plt.legend(loc='best')
    ax = plt.gca()
    #plt.xlim(0, 300)
    ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
    plt.show()

    plt.savefig(plot_dir + '/' + '_Read_Noise.png', dpi=100)
    plt.close('all')

    print('DONE!!')
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'C:\Users\nmishra\Workspace\TEMPO_Spectrometer\Spectral_Band_pass\488.2_nm'
    image_dir = os.path.join(data_dir, 'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots')
    telemetry_dir = os.path.join(data_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    bandpass_val = []
    bandpass_val_norm = []
    normalization_factor_all = []
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])
    #
    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    resultFile = open(os.path.join(save_dir, 'file_name_all.csv'), 'w')

    for results in data_path_all:
        resultFile.write(results + "\n")
    resultFile.close()
    dark_current = calculate_dark_current(image_dir, coadds=15)

    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)
        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
            print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
                  round(fpa_temp, 2))
        else:

            print('Telemetry file missing')
            coadds = 15
            int_time = 90
            fpe_temp = 42.98
            fpa_temp = -21

    ##############Image Processing begins here##############################
        full_frame = read_idl_file(data_file)
        check_image = create_image_active_region(full_frame)
        raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        text1 = telemetry_file_name+'.img' +' (Raw Data)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, 1))+'C'
        # Now let us save the raw image
        raw_image_save = os.path.join(image_save_dir, 'raw_image')
        if not os.path.exists(raw_image_save):
            os.makedirs(raw_image_save)
        plot_save_dir = raw_image_save + '/' + data_path + '.png'
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #create_image(check_image/coadds, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))
        outlier_mask = read_outlier_mask()

        # Note : all the arrays after this step are masked arrays

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time, outlier_mask)
        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------
        #print('Max. Val. Cross Talked = ', np.max(cross_talk_removed_quads))
        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        processed_image = processed_image - (dark_current / coadds)
        prnu_map = parse_prnu_file()  # BATC Map
        prnu_spectrometer = prnu_map

        #Uncomment the lines below if my PRNU is to be used.
        #prnu_map = read_prnu_files() # LaRC map
        #prnu_spectrometer = create_final_image(np.array(prnu_map))
        #prnu_spectrometer[prnu_spectrometer > 1.03] = 1.02
        #prnu_spectrometer[prnu_spectrometer < 0.97] = 0.98
        #create_image(prnu_map, 'TEMPO PRNU Map', 'a')
        #create_image(prnu_spectrometer, 'TEMPO PRNU Map', 'a')
        outlier_spectrometer = create_final_image(np.array(outlier_mask))

        #create_image(outlier_spectrometer,'Outliers = 504', 'b')
        #cc
        nx_quad, ny_quad = processed_image.shape
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad * ny_quad, 1))
        outlier_detectors = np.array(np.where([outlier_spectrometer == 1]))
        #print('outliers =', outlier_detectors.shape[1])
        outlier_spectrometer = np.reshape(outlier_spectrometer,
                                          (nx_quad, ny_quad))
        #create_image(outlier_spectrometer,'outliers = '+str(outlier_detectors.shape[1]),'b')
        #processed_image = processed_image/(prnu_spectrometer)
        processed_image = processed_image / (coadds * prnu_spectrometer)

        processed_image[processed_image >= 0.85 * 16383] = 16383
        text1 = telemetry_file_name+'.img' +' (Spectral Bandpass 675 nm)\n Int. time:' + str(round(int_time, 1))+ \
                           'ms, Co-adds:' +str(int(coadds))+\
                           ', FPE temp:'+ str(round(fpe_temp, 1))+'C, ' + \
                           ' FPA temp: ' + str(round(fpa_temp, ))+'C'
        processed_image_save = os.path.join(image_save_dir, 'processed_image')
        if not os.path.exists(processed_image_save):
            os.makedirs(processed_image_save)
        plot_save_dir = processed_image_save + '/' + data_path + '.png'

        #print('min_val=', np.min(processed_image))
        #processed_image = np.reshape(processed_image, (nx_quad*ny_quad, 1))
        #processed_image[outlier_spectrometer] = 16383
        #        create_image(processed_image, text1, plot_save_dir)
        #        #col_index = np.sort([randint(100, 2000) for p in range(0, 10)])
        #        row_mean = np.mean(processed_image, axis=1)
        #        max_row_each = np.where(row_mean == max(np.mean(processed_image, axis=1)))[0]
        #        max_row_each = max_row_each[0]
        #        print(max_row_each)
        #        cc

        #max_row = 29 # for 297.8 nm
        #max_row = 1546 #for 640nm
        #max_row = 1369 #for 605nm
        #max_row = 1050 #for 605nm
        #max_row = 142 # for 320 nm
        # max_row = 319  # for 355 nm
        # max_row = 496  # for 390 nm
        # max_row = 673  # for 425 nm
        #max_row = 849  # for 460 nm
        max_row = 992  # for 488.2 nm
        #max_row = 192 # for 330 nm
        #max_row = 91 # for 310 nm
        # max_row = 1724 # 675 nm
        #max_row = 2032 # for 736 nm
        #check_val = 1392

        subset_image = processed_image[max_row - 11:max_row + 11, :]
        #subset_image_normalized = processed_image[max_row_each-14 : max_row_each+14]
        #subset_image = subset_image[subset_image]
        normalization_factor = np.sum(subset_image[subset_image < 16383])
        normalization_factor_all.append(normalization_factor)
        #normalization_factor_subset = np.sum(subset_image_normalized[subset_image_normalized<16383])
        #normalization_factor_all.append(normalization_factor_subset)
        #bandpass_val.append(processed_image[max_row, :])
        bandpass_val_norm.append(
            np.true_divide(processed_image[max_row, :], normalization_factor))
        #print(processed_image.shape)
    # cc


#        plt.figure(2)
#        plt.plot(processed_image[max_row-4, :], 'purple', label='4 lines down')
#        plt.plot(processed_image[max_row-3, :], 'y-', label='3 lines down')
#        plt.plot(processed_image[max_row-2, :], 'g-', label='2 lines down')
#        plt.plot(processed_image[max_row-1, :], 'r-', label='1 line down')
#        plt.plot(processed_image[max_row, :], 'k-', label='max (297 nm), ' + 'row:' +str(max_row))
#        plt.plot(processed_image[max_row+1, :], 'm-', label='1 line up')
#        plt.plot(processed_image[max_row+2, :], 'b-', label='2 lines up')
#        plt.plot(processed_image[max_row+3, :], 'orange', label='3 lines up')
#        plt.plot(processed_image[max_row+4, :], 'cyan', label='4 lines up')
#        plt.title('Spatial Profile along illuminated rows')
#        plt.ylabel('Counts (DN)')
#        plt.xlabel('Spatial Pixel Index')
#        #plt.yscale('log')
#        #plt.xlim(0, 2000)
#        plt.show()
#        cc
#        plt.legend(loc='best')
#        #plt.ylim(2000, 40000)
#        plt.show(block=False)
#        #cc
#        plt.figure(3)
#        for i in col_index:
#
#            plt.plot(processed_image[:, i],
#                     'o--', label='Col. ' + str(i))
#            plt.legend(loc='best', ncol=4)
#        plt.grid(True, linestyle=':')
#        plt.title('Profile along Spectral direction (Spectral Bandpass 297 nm)')
#        plt.xlabel('Spectral Pixel Index')
#        plt.ylabel('Counts (DN)')
#        #plt.xlim(655, 675)
#        #plt.yscale('log')
#
#        plt.show()
#        cc

# cc

#            prnu_map = parse_prnu_file()
#            final_image = processed_image/prnu_map

######################Save the processed image variable################

# Note : The final images is saved as python variable and read when needed
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
#
#        file_name = data_path.split('.')[0]
#        variable_name = save_dir +'/'+ file_name
#        with open(variable_name, 'wb') as data:
#            pickle.dump(processed_image, data)
#            #cc

#create_image(np.array(processed_image))
#cc
    save_bandpass = save_dir + '/' + 'bandpass_radiance.csv'
    save_bandpass_norm = save_dir + '/' + 'bandpass_radiance_normalized.csv'
    norm_factor = save_dir + '/' + 'integrated_radiance.csv'

    np.savetxt(save_bandpass, np.array(bandpass_val), delimiter=",")
    np.savetxt(save_bandpass_norm, np.array(bandpass_val_norm), delimiter=",")
    np.savetxt(norm_factor, np.array(normalization_factor_all), delimiter=",")
    cc
Example #17
0
def main():
    """
    This is the main function that does all the processing. It calls the required
    functions to process the raw image. Raw images are saved in .sav (IDL)
    format and read through this script.
    """
    data_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\Photon_Transfer_TVAC'
    telemetry_dir = r'F:\TEMPO\Data\GroundTest\FPS\Spectrometer\2018.06.28'
    image_dir = os.path.join(data_dir, r'saved_quads')
    save_dir = os.path.join(image_dir, 'processed_image')
    image_save_dir = os.path.join(image_dir, 'saved_plots_straylight')
    telemetry_dir = os.path.join(telemetry_dir, 'processed/h5')
    linearity = 0  # option to turn on/off linearity correction
    smear = 1
    cross_talk = 1
    #    all_image = []
    #    var_all = []
    #    signal_all = []
    #dark_current = 0
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)


#    data_path_all = sorted([each for each in os.listdir(image_dir)
#                            if each.startswith('2017_07_30_00_24_59_38064')
#                            and  each.endswith('.sav')])
#
    data_path_all = sorted(
        [each for each in os.listdir(image_dir) if each.endswith('.sav')])

    #    #dark_data = data_path_all[1:len(data_path_all):2]

    print('Total data = ', len(data_path_all))
    #    resultFile = open (os.path.join(save_dir,'file_name_all.csv'),'w')
    #
    #    for results in data_path_all[0:15]:
    #         resultFile.write(results+"\n")
    #    resultFile.close()
    #count = 0
    #peak_loc_all = []
    for data_path in data_path_all:
        data_file = os.path.join(image_dir, data_path)
        print(data_path)

        #        cc
        telemetry_file_name = data_path.split('.')[0]
        #################### Telemetry Statistics###############################
        # Now lets parse the telemetry file to get the required information###
        #Note; The function can return the CCD temp and FPE temp. But these are
        #not needed on the image processing algorithm.. atleast for the time being

        telemetry_file = os.path.join(telemetry_dir,
                                      telemetry_file_name + '.h5')
        if os.path.exists(telemetry_file):
            coadds, int_time, fpe_temp, fpa_temp = parse_telemetry_file(
                telemetry_dir, telemetry_file_name)
        # print('FPE Temp. = ', round(fpe_temp, 2), 'FPA Temp = ',
        # round(fpa_temp, 2))
        #print('Integ. Time =' , int_time)
        #print(coadds)

        else:
            #print('Telemetry file missing')
            coadds = 10
            int_time = 6000
            fpe_temp = 43
            fpa_temp = -21.5

    ##############Image Processing begins here##############################

    #print(coadds)

        full_frame = read_idl_file(data_file)

        full_frame = perform_coaddition_correction(full_frame, coadds)

        #full_frame = full_frame/coadds
        #print(np.max(full_frame))
        #cc
        #check_image = create_image_active_region(full_frame)
        #raw_image = create_final_image(np.array(full_frame))
        # print('Max. Val. Raw = ', np.max(raw_image))
        quads = ['Quad A', 'Quad B', 'Quad C', 'Quad D']

        ##########################OFFSET REMOVAL###############################
        # Input : Full_Frame TEMPO IMAGE
        # Otput : Bias Subtracted Active Region. The active region dimensions are
        #now 1028*1024. 2 pixel lines from SMEAR overclocks are now used for
        #to store storage summation information
        # For dark data, only offset removal is needed to compute the dark current
        # For light data, additional processing such as linearity, smear and
        #cross talk is needed

        bias_removed_quads = perform_bias_subtraction(full_frame)

        #print('Max. Val. Offset Removed  = ', np.max(bias_removed_quads))

        #create_image(raw_image/coadds, text1, plot_save_dir)
        #prnu_spectrometer[prnu_spectrometer < 0.9] = 0.9
        #prnu_spectrometer[prnu_spectrometer > 1.2] = 1.2
        #print(np.min(check_image))
        #create_image(check_image, text1, plot_save_dir)
        #---------------------------------------------------------------------

        #########################NON LINEARITY CORRECTION#######################
        # Input : Bias Subtracted Active regions
        # Output : Linearized Active Region Quads
        # pass the difference instead of coadds
        if linearity:
            linearized_a = apply_linearity_correction(
                bias_removed_quads[0, :, :], quads[0], coadds)
            linearized_b = apply_linearity_correction(
                bias_removed_quads[1, :, :], quads[1], coadds)
            linearized_c = apply_linearity_correction(
                bias_removed_quads[2, :, :], quads[2], coadds)
            linearized_d = apply_linearity_correction(
                bias_removed_quads[3, :, :], quads[3], coadds)
            linearized_quads = np.array(
                [linearized_a, linearized_b, linearized_c, linearized_d])
            #print(np.mean(linearized_quads))
        else:
            linearized_quads = bias_removed_quads
            #----------------------------------------------------------------------
            ##########################SMEAR REMOVAL################################
            # Input : linearized quads ( all quads together)
            # Output : SMEAR offset corrected Quad

        #### lets' create the masked array with outlier mask################
        # The outlier mask is array of list of 4 quads.
        #print('Max. Val. Linearized  = ', np.max(linearized_quads))

        if smear:
            smear_removed_quads = perform_smear_removal(
                linearized_quads, int_time)

        else:
            smear_removed_quads = linearized_quads
            #----------------------------------------------------------------------

            ##########################CROSS-TALK REMOVAL###########################
            # Input : smear removed quads (all quads together)
            # Output : cross talk removed quads
        #print('Max. Val. Smeared  = ', np.max(smear_removed_quads))

        if cross_talk:
            cross_talk_removed_quads = remove_cross_talk(
                np.array(smear_removed_quads))
        else:
            cross_talk_removed_quads = smear_removed_quads
            #----------------------------------------------------------------------

        processed_image = create_final_image(
            np.array(cross_talk_removed_quads))
        straylight_image = processed_image[:, 0:2]
        straylight_data = np.mean(straylight_image, axis=1)

        # Gain is 17.2
        processed_file_name = image_save_dir + '/' + data_path + '.csv'
        np.savetxt(processed_file_name,
                   straylight_data,
                   fmt='%1.2f',
                   delimiter=",")