def find_and_check_files_in_current_folder(source_directory, extension): """ Searching of files in the specified directory with specified file extension """ fileList = find_files_only_in_current_folder(source_directory, extension, 1) print('') if len( fileList ) > 1: # Check if files have same parameters if there are more then one file in list # Check if all files (except the last) have same size same_or_not = check_if_all_files_of_same_size(source_directory, fileList, 1) # Check if all files in this folder have the same parameters in headers if extension == '.jds': equal_or_not = check_if_JDS_files_of_equal_parameters( source_directory, fileList) if extension == '.adr': equal_or_not = check_if_ADR_files_of_equal_parameters( source_directory, fileList) if same_or_not and equal_or_not: #print('\n\n\n :-) All files seem to be of the same parameters! :-) \n\n\n') decision = 1 else: print( '\n\n\n ************************************************************************************* \n * *' ) print( ' * Seems files in folders are different check the errors and restart the script! *' ) print( ' * * ' '\n ************************************************************************************* \n\n\n' ) decision = int( input( '* Enter "1" to start processing, or "0" to stop the script: ' )) if decision != 1: sys.exit('\n\n\n *** Program stopped! *** \n\n\n') return fileList
# *** Creating a TXT logfile *** Log_File = open(result_path + '/Service/Log.txt', "w") Log_File.write( '\n\n *********************************************************\n') Log_File.write(' * {0} v.{1} LOG * (c) YeS 2018 \n'.format( Software_name, Software_version)) Log_File.write( ' *********************************************************\n\n') Log_File.write(' Date of data processing: %s \n' % current_date) Log_File.write(' Time of data processing: %s \n\n' % current_time) # *** Search ADR files in the directory *** fileList = find_files_only_in_current_folder(directory, '.adr', 1) # Main loop by files for file_no in range(len(fileList)): print('\n\n\n * File ', str(file_no + 1), ' of', str(len(fileList))) print(' * File path: ', str(fileList[file_no])) Log_File = open(result_path + '/Service/Log.txt', "a") Log_File.write('\n\n\n * File ' + str(file_no + 1) + ' of %s \n' % str(len(fileList))) Log_File.write(' * File path: %s \n\n\n' % str(fileList[file_no])) # ********************************************************************************* # *** Opening datafile *** fname = '' if len(fname) < 1:
freq_list_SygA_CIm = [] ampl_list_CasA_CIm = [] ampl_list_SygA_CIm = [] file_text = [] date_of_experiment_spectra = '' for source in ['SygA', 'CasA']: if source == 'SygA': path_to_data = path_to_data_SygA if source == 'CasA': path_to_data = path_to_data_CasA # Finding TXT files with interferometric responces in the folder file_name_list = find_files_only_in_current_folder(path_to_data, '.txt', 0) # Reading description of the observation from INFO file info_txt_file = find_files_only_in_current_folder(path_to_data, '.info', 0) TXT_file = open(path_to_data + info_txt_file[0], "r") for line in TXT_file: # Loop by all lines in file file_text.append(line) if line.startswith(" Description:"): # Searching comments words_in_line = line.split() if source == 'SygA': description_SygA = words_in_line[1] # reading description of data file if source == 'CasA': description_CasA = words_in_line[1] # reading description of data file if line.startswith(" Culmination"): # Searching comments words_in_line = line.split() if source == 'SygA': date_of_experiment_spectra = words_in_line[2]
for i in range(len(file_path_list)): file_path_list[i] = file_path_list[i][:-len(file_name_list[i])] list_of_folder_names = find_unique_strings_in_list(file_path_list) print('\n Number of ADR files found: ', len(file_name_list)) print('\n List of folders to be analyzed: \n') for i in range(len(list_of_folder_names)): print(' ', i + 1, ') ', list_of_folder_names[i]) # Take only one folder, find all files num_of_folders = len(list_of_folder_names) same_or_not = np.zeros(num_of_folders) equal_or_not = np.zeros(num_of_folders) for folder_no in range(num_of_folders): file_name_list_current = find_files_only_in_current_folder( list_of_folder_names[folder_no], '.adr', 0) print('\n\n\n\n * Folder ', folder_no + 1, ' of ', num_of_folders, ', path: ', list_of_folder_names[folder_no], '\n **********************************************************') for i in range(len(file_name_list_current)): print(' ', i + 1, ') ', file_name_list_current[i]) # Check if all files (except the last) have same size same_or_not[folder_no] = check_if_all_files_of_same_size( list_of_folder_names[folder_no], file_name_list_current, 1) # Check if all files in this folder have the same parameters in headers equal_or_not[folder_no] = check_if_ADR_files_of_equal_parameters( list_of_folder_names[folder_no], file_name_list_current) if int(np.sum((equal_or_not[:])) == num_of_folders) and (int(
startTime = time.time() currentTime = time.strftime("%H:%M:%S") currentDate = time.strftime("%d.%m.%Y") print (' Today is ', currentDate, ' time is ', currentTime) #******************************************************************************* # R E A D I N G D A T A * #******************************************************************************* # Search needed files in the directory and subdirectories if path_to_data[-5:-1].upper() == 'UTR2': file_name_list = find_files_only_in_current_folder(path_to_data, 'jds.txt', 1) elif path_to_data[-5:-1].upper() == 'GURT': file_name_list = find_files_only_in_current_folder(path_to_data, '.txt', 1) else: print (' ERROR! The data folder name has no name of the telescope in the end!!! \n\n') sys.exit(' Program stopped! \n\n') frequency = [] ratio_CRe = [] ratio_CIm = [] for file_no in range (len(file_name_list)): freq = [] val_01 = [] val_02 = []
# Take pulsar parameters from catalogue pulsar_ra, pulsar_dec, pulsar_dm, p_bar = catalogue_pulsar(pulsar_name) # Calculation of the maximal time shift for dispersion delay removing shift_vector = DM_full_shift_calc(8192, 16.5, 33.0, 2014 / pow(10, 6), 0.000496, pulsar_dm, 'jds') max_shift = np.abs(shift_vector[0]) print(' * Pulsar ', pulsar_name) print(' Period: ', p_bar, 's.') print(' Dispersion measure: {} pc・cm\u00b3'.format(pulsar_dm)) print(' Maximal shift of dynamic spectrum: ', max_shift, ' points') print(' or : ', np.round(max_shift * 16384/33000000, 1), ' seconds') # nfft/f_cl print(' or : ~', int(np.ceil((max_shift * 16384/33000000) / 16)), ' files in 2 ch 33 MHz mode\n\n') # Reading initial jds file list to save the list of files in the result folder file_list = find_files_only_in_current_folder(source_directory, '.jds', 0) # # # Start commenting lines here! # # # ''' if not only_extract_pulse: print('\n\n * Converting waveform from JDS to WF32 format... \n\n') initial_wf32_files = convert_jds_wf_to_wf32(source_directory, result_directory, no_of_bunches_per_file) print('\n List of WF32 files: ', initial_wf32_files, '\n')
def jds_wf_simple_reader(directory, no_of_spectra_to_average, skip_data_blocks, VminNorm, VmaxNorm, colormap, custom_dpi, save_long_file_aver, dyn_spectr_save_init, dyn_spectr_save_norm): current_time = time.strftime("%H:%M:%S") current_date = time.strftime("%d.%m.%Y") # *** Creating a folder where all pictures and results will be stored (if it doesn't exist) *** result_folder = 'RESULTS_JDS_waveform_' + directory.split('/')[-2] if not os.path.exists(result_folder): os.makedirs(result_folder) service_folder = result_folder + '/Service' if not os.path.exists(service_folder): os.makedirs(service_folder) if dyn_spectr_save_init == 1: initial_spectra_folder = result_folder + '/Initial spectra' if not os.path.exists(initial_spectra_folder): os.makedirs(initial_spectra_folder) # *** Search JDS files in the directory *** file_list = find_files_only_in_current_folder(directory, '.jds', 1) print('') if len( file_list ) > 1: # Check if files have same parameters if there are more then one file in list # Check if all files (except the last) have same size same_or_not = check_if_all_files_of_same_size(directory, file_list, 1) # Check if all files in this folder have the same parameters in headers equal_or_not = check_if_JDS_files_of_equal_parameters( directory, file_list) if same_or_not and equal_or_not: print( '\n\n\n :-) All files seem to be of the same parameters! :-) \n\n\n' ) else: print( '\n\n\n ************************************************************************************* ' ) print( ' * *' ) print( ' * Seems files in folders are different check the errors and restart the script! *' ) print( ' * * ' '\n ************************************************************************************* \n\n\n' ) decision = int( input( '* Enter "1" to start processing, or "0" to stop the script: ' )) if decision != 1: sys.exit( '\n\n\n *** Program stopped! *** \n\n\n') # To print in console the header of first file print('\n First file header parameters: \n') # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size ] = FileHeaderReaderJDS(directory + file_list[0], 0, 1) # Main loop by files start for file_no in range(len(file_list)): # loop by files # *** Opening datafile *** fname = directory + file_list[file_no] # ********************************************************************************* # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size ] = FileHeaderReaderJDS(fname, 0, 0) # Create long data files and copy first data file header to them if file_no == 0 and save_long_file_aver == 1: with open(fname, 'rb') as file: # *** Data file header read *** file_header = file.read(1024) # *** Creating a name for long timeline TXT file *** tl_file_name = df_filename + '_Timeline.txt' tl_file = open( tl_file_name, 'w') # Open and close to delete the file with the same name tl_file.close() # *** Creating a binary file with data for long data storage *** file_data_a_name = df_filename + '_Data_chA.dat' file_data_a = open(file_data_a_name, 'wb') file_data_a.write(file_header) file_data_a.seek(574) # FFT size place in header file_data_a.write(np.int32(data_block_size).tobytes()) file_data_a.seek(624) # Lb place in header file_data_a.write(np.int32(0).tobytes()) file_data_a.seek(628) # Hb place in header file_data_a.write(np.int32(data_block_size / 2).tobytes()) file_data_a.seek(632) # Wb place in header file_data_a.write(np.int32(data_block_size / 2).tobytes()) file_data_a.seek(636) # Navr place in header file_data_a.write( bytes([np.int32(Navr * no_of_spectra_to_average)])) file_data_a.close() if Channel == 2: file_data_b_name = df_filename + '_Data_chB.dat' file_data_b = open(file_data_b_name, 'wb') file_data_b.write(file_header) file_data_b.seek(574) # FFT size place in header file_data_b.write(np.int32(data_block_size).tobytes()) file_data_b.seek(624) # Lb place in header file_data_b.write(np.int32(0).tobytes()) file_data_b.seek(628) # Hb place in header file_data_b.write(np.int32(data_block_size / 2).tobytes()) file_data_b.seek(632) # Wb place in header file_data_b.write(np.int32(data_block_size / 2).tobytes()) file_data_b.seek(636) # Navr place in header file_data_b.write( bytes([np.int32(Navr * no_of_spectra_to_average)])) file_data_b.close() del file_header # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!! # Manually set frequencies for one channel mode if (Channel == 0 and int(CLCfrq / 1000000) == 66) or (Channel == 1 and int(CLCfrq / 1000000) == 66): freq_points_num = 8192 frequency = np.linspace(0.0, 33.0, freq_points_num) # Manually set frequencies for two channels mode if Channel == 2 or (Channel == 0 and int(CLCfrq / 1000000) == 33) or ( Channel == 1 and int(CLCfrq / 1000000) == 33): freq_points_num = 8192 frequency = np.linspace(16.5, 33.0, freq_points_num) # For new receiver (temporary): if Channel == 2 and int(CLCfrq / 1000000) == 80: freq_points_num = 8192 frequency = np.linspace(0.0, 40.0, freq_points_num) # Calculation of number of blocks and number of spectra in the file if Channel == 0 or Channel == 1: # Single channel mode no_of_av_spectra_per_file = (df_filesize - 1024) / ( 2 * data_block_size * no_of_spectra_to_average) else: # Two channels mode no_of_av_spectra_per_file = (df_filesize - 1024) / ( 4 * data_block_size * no_of_spectra_to_average) no_of_blocks_in_file = (df_filesize - 1024) / data_block_size no_of_av_spectra_per_file = int(no_of_av_spectra_per_file) fine_clock_frq = (int(CLCfrq / 1000000.0) * 1000000.0) # Real time resolution of averaged spectra real_av_spectra_dt = (1 / fine_clock_frq) * ( data_block_size - 4) * no_of_spectra_to_average if file_no == 0: print(' Number of blocks in file: ', no_of_blocks_in_file) print(' Number of spectra to average: ', no_of_spectra_to_average) print(' Number of averaged spectra in file: ', no_of_av_spectra_per_file) print(' Time resolution of averaged spectrum: ', round(real_av_spectra_dt * 1000, 3), ' ms.') print('\n *** Reading data from file *** \n') # ******************************************************************************* # R E A D I N G D A T A * # ******************************************************************************* with open(fname, 'rb') as file: file.seek( 1024 + data_block_size * 4 * skip_data_blocks) # Jumping to 1024 byte from file beginning # *** DATA READING process *** # Preparing arrays for dynamic spectra dyn_spectra_ch_a = np.zeros( (int(data_block_size / 2), no_of_av_spectra_per_file), float) if Channel == 2: # Two channels mode dyn_spectra_ch_b = np.zeros( (int(data_block_size / 2), no_of_av_spectra_per_file), float) # !!! Fake timing. Real timing to be done!!! # TimeFigureScaleFig = np.linspace(0, no_of_av_spectra_per_file, no_of_av_spectra_per_file+1) # for i in range(no_of_av_spectra_per_file): # TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i]) time_scale_fig = [] time_scale_full = [] bar = IncrementalBar(' File ' + str(file_no + 1) + ' of ' + str(len(file_list)) + ' reading: ', max=no_of_av_spectra_per_file, suffix='%(percent)d%%') for av_sp in range(no_of_av_spectra_per_file): bar.next() # Reading and reshaping all data with readers if Channel == 0 or Channel == 1: # Single channel mode wf_data = np.fromfile(file, dtype='i2', count=no_of_spectra_to_average * data_block_size) wf_data = np.reshape( wf_data, [data_block_size, no_of_spectra_to_average], order='F') if Channel == 2: # Two channels mode wf_data = np.fromfile(file, dtype='i2', count=2 * no_of_spectra_to_average * data_block_size) wf_data = np.reshape( wf_data, [data_block_size, 2 * no_of_spectra_to_average], order='F') # Timing timeline_block_str = jds_waveform_time(wf_data, CLCfrq, data_block_size) time_scale_fig.append(timeline_block_str[-1][0:12]) time_scale_full.append(df_creation_timeUTC[0:10] + ' ' + timeline_block_str[-1][0:12]) # Nulling the time blocks in waveform data wf_data[data_block_size - 4:data_block_size, :] = 0 # Scaling of the data - seems to be wrong in absolute value wf_data = wf_data / 32768.0 if Channel == 0 or Channel == 1: # Single channel mode wf_data_ch_a = wf_data # All the data is channel A data del wf_data # Deleting unnecessary array to free the memory if Channel == 2: # Two channels mode # Resizing to obtain the matrix for separation of channels wf_data_new = np.zeros( (2 * data_block_size, no_of_spectra_to_average)) for i in range(2 * no_of_spectra_to_average): if i % 2 == 0: wf_data_new[0:data_block_size, int(i / 2)] = wf_data[:, i] # Even else: wf_data_new[data_block_size:2 * data_block_size, int(i / 2)] = wf_data[:, i] # Odd del wf_data # Deleting unnecessary array to free the memory # Separating the data into two channels wf_data_ch_a = np.zeros( (data_block_size, no_of_spectra_to_average)) # Preparing empty array wf_data_ch_b = np.zeros( (data_block_size, no_of_spectra_to_average)) # Preparing empty array wf_data_ch_a[:, :] = wf_data_new[0:( 2 * data_block_size):2, :] # Separation to channel A wf_data_ch_b[:, :] = wf_data_new[1:( 2 * data_block_size):2, :] # Separation to channel B del wf_data_new # preparing matrices for spectra spectra_ch_a = np.zeros_like(wf_data_ch_a) if Channel == 2: spectra_ch_b = np.zeros_like(wf_data_ch_b) # Calculation of spectra for i in range(no_of_spectra_to_average): spectra_ch_a[:, i] = np.power( np.abs(np.fft.fft(wf_data_ch_a[:, i])), 2) if Channel == 2: # Two channels mode spectra_ch_b[:, i] = np.power( np.abs(np.fft.fft(wf_data_ch_b[:, i])), 2) # Storing only first (left) mirror part of spectra spectra_ch_a = spectra_ch_a[:int(data_block_size / 2), :] if Channel == 2: spectra_ch_b = spectra_ch_b[:int(data_block_size / 2), :] # At 33 MHz the specter is usually upside down, to correct it we use flip up/down if int(CLCfrq / 1000000) == 33: spectra_ch_a = np.flipud(spectra_ch_a) if Channel == 2: spectra_ch_b = np.flipud(spectra_ch_b) # Plotting first waveform block and first immediate spectrum in a file if av_sp == 0: # First data block in a file i = 0 # First immediate spectrum in a block # Prepare parameters for plot data_1 = wf_data_ch_a[:, i] if Channel == 0 or Channel == 1: # Single channel mode no_of_sets = 1 data_2 = [] if Channel == 2: no_of_sets = 2 data_2 = wf_data_ch_b[:, i] suptitle = ('Waveform data, first block in file ' + str(df_filename)) Title = (ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Description: ' + str(df_description)) TwoOrOneValuePlot( no_of_sets, np.linspace(no_of_sets, data_block_size, data_block_size), data_1, data_2, 'Channel A', 'Channel B', 1, data_block_size, -0.6, 0.6, -0.6, 0.6, 'ADC clock counts', 'Amplitude, V', 'Amplitude, V', suptitle, Title, service_folder + '/' + df_filename[0:14] + ' Waveform first data block.png', current_date, current_time, software_version) # Prepare parameters for plot data_1 = 10 * np.log10(spectra_ch_a[:, i]) if Channel == 0 or Channel == 1: # Single channel mode no_of_sets = 1 data_2 = [] if Channel == 2: no_of_sets = 2 data_2 = 10 * np.log10(spectra_ch_b[:, i]) suptitle = ('Immediate spectrum, first in file ' + str(df_filename)) Title = (ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Description: ' + str(df_description)) TwoOrOneValuePlot( no_of_sets, frequency, data_1, data_2, 'Channel A', 'Channel B', frequency[0], frequency[-1], -80, 60, -80, 60, 'Frequency, MHz', 'Intensity, dB', 'Intensity, dB', suptitle, Title, service_folder + '/' + df_filename[0:14] + ' Immediate spectrum first in file.png', current_date, current_time, software_version) # Deleting the unnecessary matrices del wf_data_ch_a if Channel == 2: del wf_data_ch_b # Calculation the averaged spectrum aver_spectra_ch_a = spectra_ch_a.mean(axis=1)[:] if Channel == 2: aver_spectra_ch_b = spectra_ch_b.mean(axis=1)[:] # Plotting only first averaged spectrum if av_sp == 0: # Prepare parameters for plot data_1 = 10 * np.log10(aver_spectra_ch_a) if Channel == 0 or Channel == 1: # Single channel mode no_of_sets = 1 data_2 = [] if Channel == 2: no_of_sets = 2 data_2 = 10 * np.log10(aver_spectra_ch_b) suptitle = ('Average spectrum, first data block in file ' + str(df_filename)) Title = (ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Avergaed spectra: ' + str(no_of_spectra_to_average) + ', Description: ' + str(df_description)) TwoOrOneValuePlot( no_of_sets, frequency, data_1, data_2, 'Channel A', 'Channel B', frequency[0], frequency[-1], -80, 60, -80, 60, 'Frequency, MHz', 'Intensity, dB', 'Intensity, dB', suptitle, Title, service_folder + '/' + df_filename[0:14] + ' Average spectrum first data block in file.png', current_date, current_time, software_version) # Adding calculated averaged spectrum to dynamic spectra array dyn_spectra_ch_a[:, av_sp] = aver_spectra_ch_a[:] if Channel == 2: dyn_spectra_ch_b[:, av_sp] = aver_spectra_ch_b[:] bar.finish() # file.close() # Close the data file # Saving averaged spectra to long data files if save_long_file_aver == 1: temp = dyn_spectra_ch_a.transpose().copy(order='C') file_data_a = open(file_data_a_name, 'ab') file_data_a.write(temp) file_data_a.close() if Channel == 2: temp = dyn_spectra_ch_b.transpose().copy(order='C') file_data_b = open(file_data_b_name, 'ab') file_data_b.write(temp) file_data_b.close() # Saving time data to ling timeline file with open(tl_file_name, 'a') as tl_file: for i in range(no_of_av_spectra_per_file): tl_file.write((time_scale_full[i][:]) + ' \n') # str del time_scale_full # Log data (make dB scale) with np.errstate(invalid='ignore', divide='ignore'): dyn_spectra_ch_a = 10 * np.log10(dyn_spectra_ch_a) if Channel == 2: dyn_spectra_ch_b = 10 * np.log10(dyn_spectra_ch_b) # If the data contains minus infinity values change them to particular values dyn_spectra_ch_a[np.isinf(dyn_spectra_ch_a)] = 40 if Channel == 2: dyn_spectra_ch_b[np.isinf(dyn_spectra_ch_b)] = 40 # ******************************************************************************* # P L O T T I N G D Y N A M I C S P E C T R A * # ******************************************************************************* # if dyn_spectr_save_init == 1 or dyn_spectr_save_norm == 1: # print('\n *** Making figures of dynamic spectra *** \n') if dyn_spectr_save_init == 1: # Plot of initial dynamic spectra v_min_a = np.min(dyn_spectra_ch_a) v_max_a = np.max(dyn_spectra_ch_a) v_min_b = v_min_a v_max_b = v_max_a if Channel == 2: v_min_b = np.min(dyn_spectra_ch_b) v_max_b = np.max(dyn_spectra_ch_b) if Channel == 0 or Channel == 1: # Single channel mode dyn_spectra_ch_b = dyn_spectra_ch_a suptitle = ('Dynamic spectrum (initial) ' + str(df_filename) + ' - Fig. ' + str(1) + ' of ' + str(1) + '\n Initial parameters: dt = ' + str(round(TimeRes * 1000., 3)) + ' ms, df = ' + str(round(df / 1000., 3)) + ' kHz, Receiver: ' + str(df_system_name) + ', Place: ' + str(df_obs_place) + '\n' + ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Avergaed spectra: ' + str(no_of_spectra_to_average) + ' (' + str(round(no_of_spectra_to_average * TimeRes, 3)) + ' sec.), Description: ' + str(df_description)) fig_file_name = (initial_spectra_folder + '/' + df_filename[0:14] + ' Initial dynamic spectrum fig.' + str(0 + 1) + '.png') if Channel == 0 or Channel == 1: # Single channel mode OneDynSpectraPlot(dyn_spectra_ch_a, v_min_a, v_max_a, suptitle, 'Intensity, dB', no_of_av_spectra_per_file, time_scale_fig, frequency, freq_points_num, colormap, 'UTC Time, HH:MM:SS.msec', fig_file_name, current_date, current_time, software_version, custom_dpi) if Channel == 2: TwoDynSpectraPlot(dyn_spectra_ch_a, dyn_spectra_ch_b, v_min_a, v_max_a, v_min_b, v_max_b, suptitle, 'Intensity, dB', 'Intensity, dB', no_of_av_spectra_per_file, time_scale_fig, time_scale_fig, frequency, freq_points_num, colormap, 'Channel A', 'Channel B', fig_file_name, current_date, current_time, software_version, custom_dpi) if dyn_spectr_save_norm == 1: # Normalization and cleaning of data Normalization_dB(dyn_spectra_ch_a.transpose(), freq_points_num, no_of_av_spectra_per_file) if Channel == 2: Normalization_dB(dyn_spectra_ch_b.transpose(), freq_points_num, no_of_av_spectra_per_file) simple_channel_clean(dyn_spectra_ch_a, 8) if Channel == 2: simple_channel_clean(dyn_spectra_ch_b, 8) # Plot of normalized and cleaned dynamic spectra suptitle = ('Normalized and cleaned dynamic spectrum (initial) ' + str(df_filename) + ' - Fig. ' + str(0 + 1) + ' of ' + str(1) + '\n Initial parameters: dt = ' + str(round(TimeRes * 1000, 3)) + ' ms, df = ' + str(round(df / 1000., 3)) + ' kHz, Receiver: ' + str(df_system_name) + ', Place: ' + str(df_obs_place) + '\n' + ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Avergaed spectra: ' + str(no_of_spectra_to_average) + ' (' + str(round(no_of_spectra_to_average * TimeRes, 3)) + ' sec.), Description: ' + str(df_description)) fig_file_name = (result_folder + '/' + df_filename[0:14] + ' Normalized and cleaned dynamic spectrum fig.' + str(0 + 1) + '.png') if Channel == 0 or Channel == 1: # Single channel mode OneDynSpectraPlot(dyn_spectra_ch_a, VminNorm, VmaxNorm, suptitle, 'Intensity, dB', no_of_av_spectra_per_file, time_scale_fig, frequency, freq_points_num, colormap, 'UTC Time, HH:MM:SS.msec', fig_file_name, current_date, current_time, software_version, custom_dpi) if Channel == 2: TwoDynSpectraPlot(dyn_spectra_ch_a, dyn_spectra_ch_b, VminNorm, VmaxNorm, VminNorm, VmaxNorm, suptitle, 'Intensity, dB', 'Intensity, dB', no_of_av_spectra_per_file, time_scale_fig, time_scale_fig, frequency, freq_points_num, colormap, 'Channel A', 'Channel B', fig_file_name, current_date, current_time, software_version, custom_dpi) del time_scale_fig, file_data_a if Channel == 2: del file_data_b results_files_list = [] results_files_list.append(file_data_a_name) if Channel == 2: results_files_list.append(file_data_b_name) return results_files_list
def jds_wf_simple_reader(directory, no_of_spectra_to_average, skip_data_blocks, VminNorm, VmaxNorm, colormap, custom_dpi, save_long_file_aver, dyn_spectr_save_init, dyn_spectr_save_norm): """ Does not seem to work better or faster, takes a lot of RAM (32 GB) but works Is not used in any other scripts and is more a demonstration The same functions in non-fast file works approximately the same time but consumes less memory The only advantage of this function is reading the whole file at once """ current_time = time.strftime("%H:%M:%S") current_date = time.strftime("%d.%m.%Y") # *** Creating a folder where all pictures and results will be stored (if it doesn't exist) *** # result_folder = 'RESULTS_JDS_waveform_' + directory.split('/')[-2] result_folder = 'RESULTS_JDS_waveform' if not os.path.exists(result_folder): os.makedirs(result_folder) if dyn_spectr_save_init == 1: initial_spectra_folder = result_folder + '/Initial spectra' if not os.path.exists(initial_spectra_folder): os.makedirs(initial_spectra_folder) # *** Search JDS files in the directory *** file_list = find_files_only_in_current_folder(directory, '.jds', 1) print('') if len( file_list ) > 1: # Check if files have same parameters if there are more then one file in list # Check if all files (except the last) have same size same_or_not = check_if_all_files_of_same_size(directory, file_list, 1) # Check if all files in this folder have the same parameters in headers equal_or_not = check_if_JDS_files_of_equal_parameters( directory, file_list) if same_or_not and equal_or_not: print( '\n\n\n :-) All files seem to be of the same parameters! :-) \n\n\n' ) else: print( '\n\n\n ************************************************************************************* ' ) print( ' * *' ) print( ' * Seems files in folders are different check the errors and restart the script! *' ) print( ' * * ' '\n ************************************************************************************* \n\n\n' ) decision = int( input( '* Enter "1" to start processing, or "0" to stop the script: ' )) if decision != 1: sys.exit( '\n\n\n *** Program stopped! *** \n\n\n') # To print in console the header of first file print('\n First file header parameters: \n') # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size ] = FileHeaderReaderJDS(directory + file_list[0], 0, 1) # Main loop by files start for fileNo in range(len(file_list)): # loop by files # *** Opening datafile *** fname = directory + file_list[fileNo] # ********************************************************************************* # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size ] = FileHeaderReaderJDS(fname, 0, 0) # Create long data files and copy first data file header to them if fileNo == 0 and save_long_file_aver == 1: with open(fname, 'rb') as file: # *** Data file header read *** file_header = file.read(1024) # *** Creating a name for long timeline TXT file *** TLfile_name = df_filename + '_Timeline.txt' TLfile = open( TLfile_name, 'w') # Open and close to delete the file with the same name TLfile.close() # *** Creating a binary file with data for long data storage *** file_data_A_name = df_filename + '_Data_chA.dat' file_data_A = open(file_data_A_name, 'wb') file_data_A.write(file_header) file_data_A.seek(574) # FFT size place in header file_data_A.write(np.int32(data_block_size).tobytes()) file_data_A.seek(624) # Lb place in header file_data_A.write(np.int32(0).tobytes()) file_data_A.seek(628) # Hb place in header file_data_A.write(np.int32(data_block_size / 2).tobytes()) file_data_A.seek(632) # Wb place in header file_data_A.write(np.int32(data_block_size / 2).tobytes()) file_data_A.seek(636) # Navr place in header file_data_A.write( bytes([np.int32(Navr * no_of_spectra_to_average)])) file_data_A.close() if Channel == 2: file_data_B_name = df_filename + '_Data_chB.dat' file_data_B = open(file_data_B_name, 'wb') file_data_B.write(file_header) file_data_B.seek(574) # FFT size place in header file_data_B.write(np.int32(data_block_size).tobytes()) file_data_B.seek(624) # Lb place in header file_data_B.write(np.int32(0).tobytes()) file_data_B.seek(628) # Hb place in header file_data_B.write(np.int32(data_block_size / 2).tobytes()) file_data_B.seek(632) # Wb place in header file_data_B.write(np.int32(data_block_size / 2).tobytes()) file_data_B.seek(636) # Navr place in header file_data_B.write( bytes([np.int32(Navr * no_of_spectra_to_average)])) file_data_B.close() del file_header # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!! # Manually set frequencies for one channel mode if (Channel == 0 and int(CLCfrq / 1000000) == 66) or (Channel == 1 and int(CLCfrq / 1000000) == 66): freq_points_num = 8192 frequency = np.linspace(0.0, 33.0, freq_points_num) # Manually set frequencies for two channels mode if Channel == 2 or (Channel == 0 and int(CLCfrq / 1000000) == 33) or ( Channel == 1 and int(CLCfrq / 1000000) == 33): freq_points_num = 8192 frequency = np.linspace(16.5, 33.0, freq_points_num) # For new receiver (temporary): if Channel == 2 and int(CLCfrq / 1000000) == 80: freq_points_num = 8192 frequency = np.linspace(0.0, 40.0, freq_points_num) # Calculation of number of blocks and number of spectra in the file if Channel == 0 or Channel == 1: # Single channel mode no_of_av_spectra_per_file = (df_filesize - 1024) / ( 2 * data_block_size * no_of_spectra_to_average) else: # Two channels mode no_of_av_spectra_per_file = (df_filesize - 1024) / ( 4 * data_block_size * no_of_spectra_to_average) no_of_blocks_in_file = (df_filesize - 1024) / data_block_size no_of_av_spectra_per_file = int(no_of_av_spectra_per_file) fine_CLCfrq = (int(CLCfrq / 1000000.0) * 1000000.0) # Real time resolution of averaged spectra real_av_spectra_dt = (1 / fine_CLCfrq) * (data_block_size - 4) * no_of_spectra_to_average if fileNo == 0: print(' Number of blocks in file: ', no_of_blocks_in_file) print(' Number of spectra to average: ', no_of_spectra_to_average) print(' Number of averaged spectra in file: ', no_of_av_spectra_per_file) print(' Time resolution of averaged spectrum: ', round(real_av_spectra_dt * 1000, 3), ' ms.') print('\n *** Reading data from file *** \n') # ******************************************************************************* # R E A D I N G D A T A * # ******************************************************************************* with open(fname, 'rb') as file: file.seek( 1024 + data_block_size * 4 * skip_data_blocks) # Jumping to 1024 byte from file beginning # *** DATA READING process *** # !!! Fake timing. Real timing to be done!!! TimeFigureScaleFig = np.linspace(0, no_of_av_spectra_per_file, no_of_av_spectra_per_file + 1) for i in range(no_of_av_spectra_per_file): TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i]) TimeScaleFig = [] TimeScaleFull = [] # Calculation of number of blocks and number of spectra in the file if Channel == 0 or Channel == 1: # Single channel mode no_of_spectra_in_file = int( (df_filesize - 1024) / (1 * 2 * data_block_size)) else: # Two channels mode no_of_spectra_in_file = int( (df_filesize - 1024) / (1 * 4 * data_block_size)) no_of_av_spectra_per_file = 1 # Reading and reshaping all data with time data if Channel == 0 or Channel == 1: # Single channel mode wf_data = np.fromfile(file, dtype='i2', count=no_of_spectra_in_file * data_block_size) wf_data = np.reshape(wf_data, [data_block_size, no_of_spectra_in_file], order='F') if Channel == 2: # Two channels mode wf_data = np.fromfile(file, dtype='i2', count=2 * no_of_spectra_in_file * data_block_size) wf_data = np.reshape( wf_data, [data_block_size, 2 * no_of_spectra_in_file], order='F') print('Waveform read, shape: ', wf_data.shape) # Timing timeline_block_str = jds_waveform_time(wf_data, CLCfrq, data_block_size) timeline_block_str = timeline_block_str[0::16] for j in range(len(timeline_block_str)): TimeScaleFig.append(timeline_block_str[j][0:12]) TimeScaleFull.append(df_creation_timeUTC[0:10] + ' ' + timeline_block_str[j][0:12]) # Nulling the time blocks in waveform data wf_data[data_block_size - 4:data_block_size, :] = 0 # Scaling of the data - seems to be wrong in absolute value wf_data = wf_data / 32768.0 if Channel == 0 or Channel == 1: # Single channel mode wf_data_chA = wf_data # All the data is channel A data del wf_data # Deleting unnecessary array to free the memory if Channel == 2: # Two channels mode # Resizing to obtain the matrix for separation of channels wf_data_new = np.zeros( (2 * data_block_size, no_of_spectra_in_file)) for i in range(2 * no_of_spectra_in_file): if i % 2 == 0: wf_data_new[0:data_block_size, int(i / 2)] = wf_data[:, i] # Even else: wf_data_new[data_block_size:2 * data_block_size, int(i / 2)] = wf_data[:, i] # Odd del wf_data # Deleting unnecessary array to free the memory # Separating the data into two channels wf_data_chA = np.zeros( (data_block_size, no_of_spectra_in_file)) # Preparing empty array wf_data_chB = np.zeros( (data_block_size, no_of_spectra_in_file)) # Preparing empty array wf_data_chA[:, :] = wf_data_new[0:( 2 * data_block_size):2, :] # Separation to channel A wf_data_chB[:, :] = wf_data_new[1:( 2 * data_block_size):2, :] # Separation to channel B del wf_data_new print('Before transpose, shape: ', wf_data_chA.shape) # preparing matrices for spectra wf_data_chA = np.transpose(wf_data_chA) spectra_ch_a = np.zeros_like(wf_data_chA) if Channel == 2: wf_data_chB = np.transpose(wf_data_chB) spectra_ch_b = np.zeros_like(wf_data_chB) print('After transpose, shape: ', wf_data_chA.shape) # Calculation of spectra spectra_ch_a[:] = np.power(np.abs(np.fft.fft(wf_data_chA[:])), 2) if Channel == 2: # Two channels mode spectra_ch_b[:] = np.power(np.abs(np.fft.fft(wf_data_chB[:])), 2) print('After fft, spectrum shape: ', spectra_ch_a.shape) # Storing only first (left) mirror part of spectra spectra_ch_a = spectra_ch_a[:, :int(data_block_size / 2)] if Channel == 2: spectra_ch_b = spectra_ch_b[:, :int(data_block_size / 2)] print('After fft cut, spectrum shape: ', spectra_ch_a.shape) # At 33 MHz the specter is usually upside down, to correct it we use flip up/down if int(CLCfrq / 1000000) == 33: spectra_ch_a = np.fliplr(spectra_ch_a) if Channel == 2: spectra_ch_b = np.fliplr(spectra_ch_b) # Deleting the unnecessary matrices del wf_data_chA if Channel == 2: del wf_data_chB # Dimensions of [data_block_size / 2, no_of_spectra_in_file] # Calculation the averaged spectrum print('Shape before averaging: ', spectra_ch_a.shape) spectra_ch_a = np.reshape(spectra_ch_a, [ int(no_of_spectra_in_file / no_of_spectra_to_average), no_of_spectra_to_average, int(data_block_size / 2) ], order='F') spectra_ch_a = spectra_ch_a.mean(axis=1)[:] print('Shape after averaging: ', spectra_ch_a.shape) if Channel == 2: spectra_ch_b = np.reshape(spectra_ch_b, [ int(no_of_spectra_in_file / no_of_spectra_to_average), no_of_spectra_to_average, int(data_block_size / 2) ], order='F') spectra_ch_b = spectra_ch_b.mean(axis=1)[:] file.close() # Close the data file # Saving averaged spectra to a long data files if save_long_file_aver == 1: temp = spectra_ch_a.copy(order='C') file_data_A = open(file_data_A_name, 'ab') file_data_A.write(temp) file_data_A.close() if Channel == 2: temp = spectra_ch_b.copy(order='C') file_data_B = open(file_data_B_name, 'ab') file_data_B.write(temp) file_data_B.close() # Saving time data to long timeline file with open(TLfile_name, 'a') as TLfile: for i in range(no_of_av_spectra_per_file): TLfile.write((TimeScaleFull[i][:]) + ' \n') # str # Log data (make dB scale) with np.errstate(invalid='ignore', divide='ignore'): spectra_ch_a = 10 * np.log10(spectra_ch_a) if Channel == 2: spectra_ch_b = 10 * np.log10(spectra_ch_b) # If the data contains minus infinity values change them to particular values spectra_ch_a[np.isinf(spectra_ch_a)] = 40 if Channel == 2: spectra_ch_b[np.isinf(spectra_ch_b)] = 40 # ******************************************************************************* # P L O T T I N G D Y N A M I C S P E C T R A * # ******************************************************************************* spectra_ch_a = np.transpose(spectra_ch_a) if Channel == 2: spectra_ch_b = np.transpose(spectra_ch_b) no_of_av_spectra_per_file = spectra_ch_a.shape[1] if dyn_spectr_save_init == 1: # Plot of initial dynamic spectra VminA = np.min(spectra_ch_a) VmaxA = np.max(spectra_ch_a) VminB = VminA VmaxB = VmaxA if Channel == 2: VminB = np.min(spectra_ch_b) VmaxB = np.max(spectra_ch_b) if Channel == 0 or Channel == 1: # Single channel mode spectra_ch_b = spectra_ch_a suptitle = ('Dynamic spectrum (initial) ' + str(df_filename) + ' - Fig. ' + str(1) + ' of ' + str(1) + '\n Initial parameters: dt = ' + str(round(TimeRes * 1000., 3)) + ' ms, df = ' + str(round(df / 1000., 3)) + ' kHz, Receiver: ' + str(df_system_name) + ', Place: ' + str(df_obs_place) + '\n' + ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Avergaed spectra: ' + str(no_of_spectra_to_average) + ' (' + str(round(no_of_spectra_to_average * TimeRes, 3)) + ' sec.), Description: ' + str(df_description)) fig_file_name = (initial_spectra_folder + '/' + df_filename[0:14] + ' Initial dynamic spectrum fig.' + str(0 + 1) + '.png') if Channel == 0 or Channel == 1: # Single channel mode OneDynSpectraPlot(spectra_ch_a, VminA, VmaxA, suptitle, 'Intensity, dB', no_of_av_spectra_per_file, TimeScaleFig, frequency, freq_points_num, colormap, 'UTC Time, HH:MM:SS.msec', fig_file_name, current_date, current_time, software_version, custom_dpi) if Channel == 2: TwoDynSpectraPlot(spectra_ch_a, spectra_ch_b, VminA, VmaxA, VminB, VmaxB, suptitle, 'Intensity, dB', 'Intensity, dB', no_of_av_spectra_per_file, TimeScaleFig, TimeScaleFig, frequency, freq_points_num, colormap, 'Channel A', 'Channel B', fig_file_name, current_date, current_time, software_version, custom_dpi) if dyn_spectr_save_norm == 1: # Normalization and cleaning of data Normalization_dB(spectra_ch_a.transpose(), freq_points_num, no_of_av_spectra_per_file) if Channel == 2: Normalization_dB(spectra_ch_b.transpose(), freq_points_num, no_of_av_spectra_per_file) simple_channel_clean(spectra_ch_a, 8) if Channel == 2: simple_channel_clean(spectra_ch_b, 8) # Plot of normalized and cleaned dynamic spectra suptitle = ('Normalized and cleaned dynamic spectrum (initial) ' + str(df_filename) + ' - Fig. ' + str(0 + 1) + ' of ' + str(1) + '\n Initial parameters: dt = ' + str(round(TimeRes * 1000, 3)) + ' ms, df = ' + str(round(df / 1000., 3)) + ' kHz, Receiver: ' + str(df_system_name) + ', Place: ' + str(df_obs_place) + '\n' + ReceiverMode + ', Fclock = ' + str(round(CLCfrq / 1000000, 1)) + ' MHz, Avergaed spectra: ' + str(no_of_spectra_to_average) + ' (' + str(round(no_of_spectra_to_average * TimeRes, 3)) + ' sec.), Description: ' + str(df_description)) fig_file_name = (result_folder + '/' + df_filename[0:14] + ' Normalized and cleaned dynamic spectrum fig.' + str(0 + 1) + '.png') if Channel == 0 or Channel == 1: # Single channel mode OneDynSpectraPlot(spectra_ch_a, VminNorm, VmaxNorm, suptitle, 'Intensity, dB', no_of_av_spectra_per_file, TimeScaleFig, frequency, freq_points_num, colormap, 'UTC Time, HH:MM:SS.msec', fig_file_name, current_date, current_time, software_version, custom_dpi) if Channel == 2: TwoDynSpectraPlot(spectra_ch_a, spectra_ch_b, VminNorm, VmaxNorm, VminNorm, VmaxNorm, suptitle, 'Intensity, dB', 'Intensity, dB', no_of_av_spectra_per_file, TimeScaleFig, TimeScaleFig, frequency, freq_points_num, colormap, 'Channel A', 'Channel B', fig_file_name, current_date, current_time, software_version, custom_dpi) results_files_list = [] results_files_list.append(file_data_A_name) if Channel == 2: results_files_list.append(file_data_B_name) return results_files_list
' * TXT data files reader v1.0 * (c) YeS 2019' ) print(' **************************************************** \n\n\n') startTime = time.time() currentTime = time.strftime("%H:%M:%S") currentDate = time.strftime("%d.%m.%Y") print(' Today is ', currentDate, ' time is ', currentTime, '\n') # *** Creating a folder where all pictures will be stored (if it doesn't exist) *** newpath = "TXT_Results" if not os.path.exists(newpath): os.makedirs(newpath) # TXT files to be analyzed: to find all files in directory filename = find_files_only_in_current_folder(common_path, '.txt', 1) for i in range(len(filename)): filename[i] = common_path + filename[i] #******************************************************************************* # R E A D I N G D A T A * #******************************************************************************* # *** Reading files *** [x_value, y_value] = read_date_time_and_one_value_txt(filename) y_value = np.array(y_value) a, b = y_value.shape date_time = x_value[0][:] text_freqs = []
def jds_wf_true_resolution(source_directory, result_directory, no_of_points_for_fft, no_of_bunches_per_file): # *** Search JDS files in the source_directory *** fileList = find_files_only_in_current_folder(source_directory, '.jds', 1) print('') if len( fileList ) > 1: # Check if files have same parameters if there are more then one file in list # Check if all files (except the last) have same size same_or_not = check_if_all_files_of_same_size(source_directory, fileList, 1) # Check if all files in this folder have the same parameters in headers equal_or_not = check_if_JDS_files_of_equal_parameters( source_directory, fileList) if same_or_not and equal_or_not: print( '\n\n\n :-) All files seem to be of the same parameters! :-) \n\n\n' ) else: print( '\n\n\n ************************************************************************************* \n * *' ) print( ' * Seems files in folders are different check the errors and restart the script! *' ) print( ' * * ' '\n ************************************************************************************* \n\n\n' ) decision = int( input( '* Enter "1" to start processing, or "0" to stop the script: ' )) if decision != 1: sys.exit('\n\n\n *** Program stopped! *** \n\n\n') # To print in console the header of first file print('\n First file header parameters: \n') # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, FreqPointsNum, data_block_size ] = FileHeaderReaderJDS(source_directory + fileList[0], 0, 1) if Channel == 0 or Channel == 1: # Single channel mode wf_data_chA = np.empty([0]) else: wf_data_chA = np.empty([0]) wf_data_chB = np.empty([0]) # CLCfrq = 80 # Main loop by files start for fileNo in range(len(fileList)): # loop by files print('\n\n * File ', str(fileNo + 1), ' of', str(len(fileList))) print(' * File path: ', str(fileList[fileNo])) # *** Opening datafile *** fname = source_directory + fileList[fileNo] # ********************************************************************************* # *** Data file header read *** [ df_filename, df_filesize, df_system_name, df_obs_place, df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency, FreqPointsNum, data_block_size ] = FileHeaderReaderJDS(fname, 0, 0) # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!! # Manually set frequencies for one channel mode FreqPointsNum = int(no_of_points_for_fft / 2) ''' if (Channel == 0 and int(CLCfrq/1000000) == 66) or (Channel == 1 and int(CLCfrq/1000000) == 66): # FreqPointsNum = 8192 frequency = np.linspace(0.0, 33.0, FreqPointsNum) # Manually set frequencies for two channels mode if Channel == 2 or (Channel == 0 and int(CLCfrq/1000000) == 33) or (Channel == 1 and int(CLCfrq/1000000) == 33): #FreqPointsNum = 8192 frequency = np.linspace(16.5, 33.0, FreqPointsNum) # For new receiver (temporary): if Channel == 2 and int(CLCfrq/1000000) == 80: #FreqPointsNum = 8192 frequency = np.linspace(0.0, 40.0, FreqPointsNum) ''' # Create long data files and copy first data file header to them if fileNo == 0: #''' with open(fname, 'rb') as file: # *** Data file header read *** file_header = file.read(1024) # *** Creating a name for long timeline TXT file *** TLfile_name = df_filename + '_Timeline.txt' TLfile = open( TLfile_name, 'w') # Open and close to delete the file with the same name TLfile.close() # *** Creating a binary file with data for long data storage *** file_data_A_name = df_filename + '_Data_chA.dat' file_data_A = open(file_data_A_name, 'wb') file_data_A.write(file_header) file_data_A.seek(574) # FFT size place in header file_data_A.write(np.int32(no_of_points_for_fft).tobytes()) file_data_A.seek(624) # Lb place in header file_data_A.write(np.int32(0).tobytes()) file_data_A.seek(628) # Hb place in header file_data_A.write(np.int32(FreqPointsNum).tobytes()) file_data_A.seek(632) # Wb place in header file_data_A.write(np.int32(FreqPointsNum).tobytes()) file_data_A.seek(636) # Navr place in header #file_data_A.write(bytes([np.int32(Navr)])) # !!! To correct !!! #file_data_A.write(np.int32(no_of_points_for_fft/8192).tobytes()) # !!! Check for correctness !!! file_data_A.write( np.int32(1).tobytes()) # !!! Check for correctness !!! file_data_A.close() if Channel == 2: file_data_B_name = df_filename + '_Data_chB.dat' file_data_B = open(file_data_B_name, 'wb') file_data_B.write(file_header) file_data_B.seek(574) # FFT size place in header file_data_B.write(np.int32(no_of_points_for_fft).tobytes()) file_data_B.seek(624) # Lb place in header file_data_B.write(np.int32(0).tobytes()) file_data_B.seek(628) # Hb place in header file_data_B.write(np.int32(FreqPointsNum).tobytes()) file_data_B.seek(632) # Wb place in header file_data_B.write(np.int32(FreqPointsNum).tobytes()) file_data_B.seek(636) # Navr place in header #file_data_B.write(bytes([np.int32(Navr)])) # !!! To correct !!! file_data_B.write(np.int32(1).tobytes()) file_data_B.close() del file_header #''' # Calculation of number of blocks and number of spectra in the file if Channel == 0 or Channel == 1: # Single channel mode no_of_spectra_in_bunch = int( (df_filesize - 1024) / (no_of_bunches_per_file * 2 * data_block_size)) else: # Two channels mode no_of_spectra_in_bunch = int( (df_filesize - 1024) / (no_of_bunches_per_file * 4 * data_block_size)) no_of_blocks_in_file = (df_filesize - 1024) / data_block_size fine_CLCfrq = (int(CLCfrq / 1000000.0) * 1000000.0) # Real time resolution of averaged spectra real_spectra_dt = float(no_of_points_for_fft / fine_CLCfrq) real_spectra_df = float((fine_CLCfrq / 2) / (no_of_points_for_fft / 2)) if fileNo == 0: print(' Number of blocks in file: ', no_of_blocks_in_file) print(' Number of spectra in bunch: ', no_of_spectra_in_bunch) print(' Number of bunches to read in file: ', no_of_bunches_per_file) print(' Time resolution of calculated spectra: ', round(real_spectra_dt * 1000, 3), ' ms') print(' Frequency resolution of calculated spectra: ', round(real_spectra_df / 1000, 3), ' kHz') print('\n *** Reading data from file *** \n') # ******************************************************************************* # R E A D I N G D A T A * # ******************************************************************************* with open(fname, 'rb') as file: file.seek(1024) # Jumping to 1024 byte from file beginning # *** DATA READING process *** # Preparing arrays for dynamic spectra #dyn_spectra_chA = np.zeros((int(data_block_size/2), no_of_bunches_per_file), float) #if Channel == 2: # Two channels mode # dyn_spectra_chB = np.zeros((int(data_block_size/2), no_of_bunches_per_file), float) # !!! Fake timing. Real timing to be done!!! TimeFigureScaleFig = np.linspace(0, no_of_bunches_per_file, no_of_bunches_per_file + 1) for i in range(no_of_bunches_per_file): TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i]) time_scale_bunch = [] #bar = IncrementalBar(' File ' + str(fileNo+1) + ' of ' + str(len(fileList)) + ' reading: ', # max=no_of_bunches_per_file, suffix='%(percent)d%%') for bunch in range(no_of_bunches_per_file): #bar.next() # Reading and reshaping all data with time data if Channel == 0 or Channel == 1: # Single channel mode wf_data = np.fromfile(file, dtype='i2', count=no_of_spectra_in_bunch * data_block_size) wf_data = np.reshape( wf_data, [data_block_size, no_of_spectra_in_bunch], order='F') if Channel == 2: # Two channels mode wf_data = np.fromfile(file, dtype='i2', count=2 * no_of_spectra_in_bunch * data_block_size) wf_data = np.reshape( wf_data, [data_block_size, 2 * no_of_spectra_in_bunch], order='F') # Timing timeline_block_str = jds_waveform_time(wf_data, CLCfrq, data_block_size) if Channel == 2: # Two channels mode timeline_block_str = timeline_block_str[0:int( len(timeline_block_str) / 2)] # Cut the timeline of second channel for i in range(len(timeline_block_str)): time_scale_bunch.append(df_creation_timeUTC[0:10] + ' ' + timeline_block_str[i]) # [0:12] # deleting the time blocks from waveform data real_data_block_size = data_block_size - 4 wf_data = wf_data[0:real_data_block_size, :] # *** !!! Not sure if it is necessary now !!! *** # Scaling of the data - seems to be wrong in absolute value wf_data = wf_data / 32768 # .0 # Separation data into channels if Channel == 0 or Channel == 1: # Single channel mode wf_data_chA = np.append( wf_data_chA, np.reshape( wf_data, [real_data_block_size * no_of_spectra_in_bunch, 1], order='F')) del wf_data # Deleting unnecessary array name just in case if Channel == 2: # Two channels mode # Separating the data into two channels #print(wf_data.size, wf_data.shape) wf_data = np.reshape( wf_data, [2 * real_data_block_size * no_of_spectra_in_bunch, 1], order='F') wf_data_chA = np.append(wf_data_chA, wf_data[0:( 2 * real_data_block_size * no_of_spectra_in_bunch):2]) # Separation to channel A wf_data_chB = np.append(wf_data_chB, wf_data[1:( 2 * real_data_block_size * no_of_spectra_in_bunch):2]) # Separation to channel B del wf_data no_of_spectra_to_compute = int( np.floor(len(wf_data_chA) / no_of_points_for_fft)) print(' Bunch # ', bunch + 1, ' Number of true spectra in bunch: ', no_of_spectra_to_compute) # Cutting the full array and saving residuals to buffer ready_wf_array_chA = wf_data_chA[:no_of_spectra_to_compute * no_of_points_for_fft] ready_wf_array_chA = np.reshape( ready_wf_array_chA, [no_of_points_for_fft, no_of_spectra_to_compute], order='F') wf_data_chA = wf_data_chA[no_of_spectra_to_compute * no_of_points_for_fft:] if Channel == 2: ready_wf_array_chB = wf_data_chB[: no_of_spectra_to_compute * no_of_points_for_fft] ready_wf_array_chB = np.reshape( ready_wf_array_chB, [no_of_points_for_fft, no_of_spectra_to_compute], order='F') wf_data_chB = wf_data_chB[no_of_spectra_to_compute * no_of_points_for_fft:] # preparing matrices for spectra spectra_chA = np.zeros_like(ready_wf_array_chA) if Channel == 2: spectra_chB = np.zeros_like(ready_wf_array_chB) # Calculation of spectra for i in range(no_of_spectra_to_compute): spectra_chA[:, i] = np.power( np.abs(np.fft.fft(ready_wf_array_chA[:, i])), 2) if Channel == 2: # Two channels mode spectra_chB[:, i] = np.power( np.abs(np.fft.fft(ready_wf_array_chB[:, i])), 2) # Storing only first (left) mirror part of spectra spectra_chA = spectra_chA[:int(no_of_points_for_fft / 2), :] if Channel == 2: spectra_chB = spectra_chB[:int(no_of_points_for_fft / 2), :] # At 33 MHz the specter is usually upside down, to correct it we use flip up/down if int(CLCfrq / 1000000) == 33: spectra_chA = np.flipud(spectra_chA) if Channel == 2: spectra_chB = np.flipud(spectra_chB) ''' ''' # Saving spectra data to dat file temp = spectra_chA.transpose().copy(order='C') file_data_A = open(file_data_A_name, 'ab') file_data_A.write(temp) file_data_A.close() if Channel == 2: temp = spectra_chB.transpose().copy(order='C') file_data_B = open(file_data_B_name, 'ab') file_data_B.write(temp) file_data_B.close() # Saving time data to ling timeline file with open(TLfile_name, 'a') as TLfile: for i in range(no_of_spectra_in_bunch): TLfile.write( (str(time_scale_bunch[i][:])) + ' \n') # str #bar.finish() file.close() # Close the data file