Ejemplo n.º 1
0
        if df_filename[-4:] == '.adr':

            freqList = freqList_GURT
            AmplitudeReIm = AmplitudeReIm_GURT
            [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
                    CLCfrq, df_creation_timeUTC, ReceiverMode, Mode, sumDifMode,
                    NAvr, TimeRes, fmin, fmax, df, frequency, FFTsize, SLine,
                    Width, BlockSize] = FileHeaderReaderADR(path_to_data + dat_files_list[file_no], 0, 0)

        if df_filename[-4:] == '.jds':     # If data obrained from DSPZ receiver

            freqList = freqList_UTR2
            AmplitudeReIm = AmplitudeReIm_GURT
            [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
                    CLCfrq, df_creation_timeUTC, SpInFile, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax,
                    df, frequency, FreqPointsNum, dataBlockSize] = FileHeaderReaderJDS(path_to_data + dat_files_list[file_no], 0, 0)

        if AutoSourceSwitch == 1:
            if df_filename[-4:] == '.jds':     # If data obrained from DSPZ receiver
                '''
                if '3c461' in df_description.lower() or 'cas' in df_description.lower() or '461' in df_description.lower():
                    source = '3C461'
                elif '3c405' in df_description.lower() or 'cyg' in df_description.lower() or '405' in df_description.lower():
                    source = '3C405'
                else:
                    print('  Source not detected !!!')
                    source  = str(input(' * Enter source name like 3C405 or 3C461:            '))
                '''
                if file_no == 0:
                    source = '3C405'
                elif file_no == 1:
Ejemplo n.º 2
0
def normalize_dat_file(directory, filename, no_of_spectra_in_bunch,
                       median_filter_window, show_aver_spectra):
    """
    function calculates the average spectrum in DAT file and normalizes all spectra in file to average spectra
    Input parameters:
        directory - name of directory with initial dat file
        filename - name of initial dat file
        no_of_spectra_in_bunch - number of spectra in bunch to read
        median_filter_window - window of median filter to process the average spectrum
        show_aver_spectra - boolean variable which indicates if the picture of average spectrum to be shown and
                            the script paused till the picture window is closed
    Output parameters:
        output_file_name -  name of result normalized .dat file
    """

    print(
        '\n   Preparations and calculation of the average spectrum to normalize... \n'
    )

    output_file_name = directory + 'Norm_' + filename
    filename = directory + filename

    # Opening DAT datafile
    file = open(filename, 'rb')

    # *** Data file header read ***
    df_filesize = os.stat(filename).st_size  # Size of file
    df_filename = file.read(32).decode('utf-8').rstrip(
        '\x00')  # Initial data file name
    file.close()

    if df_filename[-4:] == '.adr':

        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, ReceiverMode, Mode,
            sumDifMode, NAvr, TimeRes, fmin, fmax, df, frequency, FFTsize,
            SLine, Width, BlockSize
        ] = FileHeaderReaderADR(filename, 0, 0)

    if df_filename[-4:] == '.jds':  # If data obtained from DSPZ receiver

        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
            FreqPointsNum, dataBlockSize
        ] = FileHeaderReaderJDS(filename, 0, 0)

    # Calculation of the dimensions of arrays to read
    nx = len(frequency)  # the first dimension of the array
    ny = int((
        (df_filesize - 1024) /
        (nx * 8)))  # the second dimension of the array: file size - 1024 bytes

    # Number of data blocks to read from file
    num_of_blocks = int(ny // no_of_spectra_in_bunch)

    # Read data from file by blocks and average it
    file = open(filename, 'rb')
    file.seek(1024)
    average_array = np.empty((nx, 0), float)
    for block in range(num_of_blocks):
        if block == (num_of_blocks - 1):
            spectra_num_in_bunch = ny - (num_of_blocks -
                                         1) * no_of_spectra_in_bunch
        else:
            spectra_num_in_bunch = no_of_spectra_in_bunch

        data = np.fromfile(file,
                           dtype=np.float64,
                           count=nx * spectra_num_in_bunch)
        data = np.reshape(data, [nx, spectra_num_in_bunch], order='F')
        tmp = np.empty((nx, 1), float)
        # tmp[:, 0] = data.mean(axis=1)[:]
        tmp[:, 0] = data.min(axis=1)[:]
        average_array = np.append(average_array, tmp, axis=1)  #

    # Average average spectra of all data blocks
    average_profile = average_array.mean(axis=1)

    init_average_profile = average_profile.copy()

    # # Make a figure of average spectrum (profile)
    # fig = plt.figure(figsize=(9, 5))
    # ax1 = fig.add_subplot(111)
    # ax1.plot(10 * np.log10(average_profile), linestyle='-', linewidth='1.00', label='Average spectra')
    # ax1.legend(loc='upper right', fontsize=6)
    # ax1.grid(b=True, which='both', color='silver', linestyle='-')
    # ax1.set_xlabel('Frequency points, num.', fontsize=6, fontweight='bold')
    # ax1.set_ylabel('Intensity, dB', fontsize=6, fontweight='bold')
    # pylab.savefig('Averaged_spectra_'+filename[:-4]+'_before_filtering.png', bbox_inches='tight', dpi=160)
    # plt.close('all')

    # Apply median filter to average profile
    average_profile = median_filter(average_profile, median_filter_window)
    med_average_profile = average_profile.copy()
    average_profile = average_filter(average_profile,
                                     median_filter_window + 20)

    # Make a figure of filtered average spectrum (profile)
    fig = plt.figure(figsize=(12, 8))
    ax1 = fig.add_subplot(111)
    ax1.plot(10 * np.log10(init_average_profile),
             linestyle='-',
             linewidth='1.50',
             label='Initial spectra',
             color='C0',
             alpha=0.6)
    ax1.plot(10 * np.log10(med_average_profile),
             linestyle='-',
             linewidth='1.25',
             label='Median spectra',
             color='C1',
             alpha=0.8)
    ax1.plot(10 * np.log10(average_profile),
             linestyle='-',
             linewidth='1.00',
             label='Median averaged spectra',
             color='C3')
    ax1.legend(loc='upper right', fontsize=6)
    ax1.grid(b=True, which='both', color='silver', linestyle='-')
    ax1.set_xlabel('Frequency points, num.', fontsize=6, fontweight='bold')
    ax1.set_ylabel('Intensity, dB', fontsize=6, fontweight='bold')
    pylab.savefig('Averaged_spectra_' + filename[:-4] + '_after_filtering.png',
                  bbox_inches='tight',
                  dpi=160)
    if show_aver_spectra:
        print('\n   Close the figure window to continue processing!!!\n')
        plt.show()
    plt.close('all')

    del init_average_profile, med_average_profile

    # Normalization
    print('   Spectra normalization... \n')
    file.seek(0)
    file_header = file.read(1024)
    normalized_file = open(output_file_name, 'wb')
    normalized_file.write(file_header)
    del file_header

    bar = IncrementalBar(' Normalizing of the DAT file: ',
                         max=num_of_blocks,
                         suffix='%(percent)d%%')
    bar.start()

    for block in range(num_of_blocks):

        if block == (num_of_blocks - 1):
            spectra_num_in_bunch = ny - (num_of_blocks -
                                         1) * no_of_spectra_in_bunch
        else:
            spectra_num_in_bunch = no_of_spectra_in_bunch

        data = np.fromfile(file,
                           dtype=np.float64,
                           count=nx * spectra_num_in_bunch)
        data = np.reshape(data, [nx, spectra_num_in_bunch], order='F')
        for j in range(spectra_num_in_bunch):
            data[:, j] = data[:, j] / average_profile[:]
        temp = data.transpose().copy(order='C')
        normalized_file.write(np.float64(temp))

        bar.next()

    file.close()
    normalized_file.close()
    bar.finish()

    # *** Creating a new timeline TXT file for results ***
    new_tl_file_name = output_file_name.split('_Data_', 1)[0] + '_Timeline.txt'
    new_tl_file = open(
        new_tl_file_name,
        'w')  # Open and close to delete the file with the same name
    new_tl_file.close()

    # *** Reading timeline file ***
    old_tl_file_name = filename.split('_Data_', 1)[0] + '_Timeline.txt'
    old_tl_file = open(old_tl_file_name, 'r')
    new_tl_file = open(new_tl_file_name, 'w')

    # Read time from timeline file
    time_scale_bunch = old_tl_file.readlines()

    # Saving time data to new file
    for j in range(len(time_scale_bunch)):
        new_tl_file.write((time_scale_bunch[j][:]) + '')

    old_tl_file.close()
    new_tl_file.close()

    return output_file_name
def check_long_spectra_files_from_wf(directory, file_re, file_im):
    '''
    Checks spectra from the long data files
    '''

    # Preparing long data files
    fname = directory + fileList[0]
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency, FreqPointsNum, data_block_size
    ] = FileHeaderReaderJDS(fname, 0, 1)

    no_of_blocks_in_file = (df_filesize - 1024) / data_block_size
    print(' Number of blocks in file:             ', no_of_blocks_in_file)

    no_of_blocks_in_batch = int(no_of_blocks_in_file /
                                (2 * no_of_batches_in_file))
    print(' Number of blocks in batch:            ', no_of_blocks_in_batch)

    # *** Calculation of the dimensions of arrays to read ***

    nx = len(frequency)  # the first dimension of the array

    ny = int((
        (df_filesize - 1024) /
        (nx * 8)))  # the second dimension of the array: file size - 1024 bytes
    istart = 0
    istop = len(timeline)

    print(' Number of frequency channels:     ', nx, '\n')
    print(' Number of spectra:                ', ny, '\n')
    print(' Recomended spectra number for averaging is:  ', int(ny / 1024))

    averageConst = int(ny / 1024)
    if int(averageConst) < 1: averageConst = 1

    # *** Data reading and averaging ***

    print('\n\n\n  *** Data reading and averaging *** \n\n')

    file1 = open(file_re, 'rb')
    file2 = open(file_im, 'rb')

    file1.seek(
        1024 + istart * 8 * nx, os.SEEK_SET
    )  # Jumping to 1024+number of spectra to skip byte from file beginning
    file2.seek(
        1024 + istart * 8 * nx, os.SEEK_SET
    )  # Jumping to 1024+number of spectra to skip byte from file beginning

    array = np.empty((nx, 0), float)
    numOfBlocks = int(ny / averageConst)
    for block in range(numOfBlocks):

        data1 = np.fromfile(file1, dtype=np.float64, count=nx * averageConst)
        data2 = np.fromfile(file2, dtype=np.float64, count=nx * averageConst)

        data = np.power(np.abs(data1[:, :] + 1j * data2[:, :]), 2)

        del data1, data2

        data = np.reshape(data, [nx, averageConst], order='F')

        dataApp = np.empty((nx, 1), float)

        with np.errstate(invalid='ignore'):
            dataApp[:, 0] = 10 * np.log10(data.mean(axis=1)[:])

        array = np.append(array, dataApp, axis=1)
        array[np.isnan(array)] = -120

        del dataApp, data

    file1.close()
    if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'): file2.close()

    return
Ejemplo n.º 4
0
    del hour, minutes, seconds, phase_of_second
    return timeline_block_str


if __name__ == '__main__':

    fname = 'DATA/E010621_090610.jds'

    print('\n\n Parameters of the file: ')

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency, FreqPointsNum, data_block_size
    ] = FileHeaderReaderJDS(fname, 0, 1)

    # *******************************************************************************
    #                           R E A D I N G   D A T A                             *
    # *******************************************************************************
    no_of_spectra_to_average = 64

    print('\n  *** Reading data from file *** \n')

    with open(fname, 'rb') as file:
        file.seek(1024)  # Jumping to 1024 byte from file beginning #
        for av_sp in range(1):

            # Reading and reshaping all data with readers
            wf_data = np.fromfile(file,
                                  dtype='i2',
Ejemplo n.º 5
0
                df_description, CLCfrq, df_creation_timeUTC, ReceiverMode,
                Mode, sumDifMode, NAvr, TimeRes, fmin, fmax, df, frequency,
                FFTsize, SLine, Width, BlockSize
            ] = FileHeaderReaderADR(path_to_data + dat_files_list[file_no], 0,
                                    0)

        if df_filename[-4:] == '.jds':  # If data obtained from DSPZ receiver

            freqList = freqList_UTR2
            AmplitudeReIm = AmplitudeReIm_GURT
            [
                df_filename, df_filesize, df_system_name, df_obs_place,
                df_description, CLCfrq, df_creation_timeUTC, SpInFile,
                ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
                FreqPointsNum, dataBlockSize
            ] = FileHeaderReaderJDS(path_to_data + dat_files_list[file_no], 0,
                                    0)

        if AutoSourceSwitch == 1:
            if df_filename[
                    -4:] == '.jds':  # If data obrained from DSPZ receiver
                '''
                if '3c461' in df_description.lower() or 'cas' in df_description.lower() or '461' in df_description.lower():
                    source = '3C461'
                elif '3c405' in df_description.lower() or 'cyg' in df_description.lower() or '405' in df_description.lower():
                    source = '3C405'
                else:
                    print('  Source not detected !!!')
                    source  = str(input(' * Enter source name like 3C405 or 3C461:            '))
                '''
                if file_no == 0:
                    source = '3C405'
Ejemplo n.º 6
0
def DAT_file_reader(common_path, DAT_file_name, typesOfData, DAT_result_path,
                    averOrMin, StartStopSwitch, SpecFreqRange, VminMan,
                    VmaxMan, VminNormMan, VmaxNormMan, RFImeanConst, customDPI,
                    colormap, ChannelSaveTXT, ChannelSavePNG, ListOrAllFreq,
                    AmplitudeReIm, freqStart, freqStop, dateTimeStart,
                    dateTimeStop, freqStartTXT, freqStopTXT, freqList,
                    print_or_not):

    startTime = time.time()
    currentTime = time.strftime("%H:%M:%S")
    currentDate = time.strftime("%d.%m.%Y")

    # Files to be analyzed:
    filename = common_path + DAT_file_name + '_Data_chA.dat'
    timeLineFileName = common_path + DAT_file_name + '_Timeline.txt'

    for j in range(len(typesOfData)):  # Main loop by types of data to analyze

        # Current name of DAT file to be analyzed dependent on data type:
        temp = list(filename)
        temp[-7:-4] = typesOfData[j]
        filename = "".join(temp)
        temp = list(DAT_file_name + '_Data_chA.dat')
        temp[-7:-4] = typesOfData[j]
        only_file_name = "".join(temp)

        if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'):
            temp = list(filename)
            temp[-7:-4] = 'chA'
            filename01 = "".join(temp)
            temp[-7:-4] = 'chB'
            filename02 = "".join(temp)
            filename = filename01

        # Print the type of data to be analyzed
        if print_or_not == 1:
            print('\n\n   Processing data type: ', typesOfData[j], '\n')
        currentTime = time.strftime("%H:%M:%S")
        print('   Processing file: ', only_file_name, '   started at: ',
              currentTime)
        if print_or_not == 1: print('\n')

        #*************************************************************
        #         WHAT TO PLOT AND CORRESPONDING PARAMETERS          *
        #*************************************************************

        YaxName = 'Intensity, dB'
        Label = 'Intensity'
        nameAdd = ''
        fileNameAdd = ''
        fileNameAddNorm = ''
        fileNameAddSpectr = ''
        Vmin = VminMan  # Switch once more to initial manual settings after changes in previous loop
        Vmax = VmaxMan
        VminNorm = VminNormMan
        VmaxNorm = VmaxNormMan

        if typesOfData[j] == 'chA':
            nameAdd = ' channel A'
            fileNameAdd = ''
            fileNameAddNorm = '001_'
            fileNameAddSpectr = '008_'

        if typesOfData[j] == 'chB':
            nameAdd = ' channel B'
            fileNameAdd = ''
            fileNameAddNorm = '001_'
            fileNameAddSpectr = '008_'

        if typesOfData[j] == 'C_m':
            nameAdd = ' correlation module'
            Vmin = -160
            VmaxNorm = 2 * VmaxNormMan
            fileNameAdd = ''
            fileNameAddNorm = '004_'
            fileNameAddSpectr = '011_'

        if typesOfData[j] == 'C_p':
            nameAdd = ' correlation phase'
            YaxName = 'Phase, rad'
            Label = 'Phase'
            Vmin = -3.5
            Vmax = 3.5
            fileNameAdd = '005_'
            fileNameAddSpectr = '012_'

        if typesOfData[j] == 'CRe':
            nameAdd = ' correlation RE part'
            YaxName = 'Amplitude'
            fileNameAdd = '006_'
            fileNameAddSpectr = '013_'

        if typesOfData[j] == 'CIm':
            nameAdd = ' correlation IM part'
            YaxName = 'Amplitude'
            fileNameAdd = '007_'
            fileNameAddSpectr = '014_'

        if typesOfData[j] == 'A+B':
            nameAdd = ' sum A + B'
            fileNameAddNorm = '003_'
            fileNameAddSpectr = '009_'

        if typesOfData[j] == 'A-B':
            nameAdd = ' difference |A - B|'
            Vmin = Vmin - 20
            Vmax = Vmax - 20
            fileNameAdd = ''
            fileNameAddNorm = '002_'
            fileNameAddSpectr = '010_'

        #*********************************************************************************

        # *** Creating a folder where all pictures and results will be stored (if it doen't exist) ***
        newpath = common_path + 'DAT_Results_' + DAT_result_path
        if not os.path.exists(newpath):
            os.makedirs(newpath)

        # *** Opening DAT datafile ***

        file = open(filename, 'rb')

        # *** Data file header read ***
        df_filesize = (os.stat(filename).st_size)  # Size of file
        df_filename = file.read(32).decode('utf-8').rstrip(
            '\x00')  # Initial data file name
        file.close()

        if df_filename[-4:] == '.adr':

            [
                df_filename, df_filesize, df_system_name, df_obs_place,
                df_description, CLCfrq, df_creation_timeUTC, ReceiverMode,
                Mode, sumDifMode, NAvr, TimeRes, fmin, fmax, df, frequency,
                FFTsize, SLine, Width, BlockSize
            ] = FileHeaderReaderADR(filename, 0, 0)

            FreqPointsNum = len(frequency)

        if df_filename[-4:] == '.jds':  # If data obrained from DSPZ receiver

            [
                df_filename, df_filesize, df_system_name, df_obs_place,
                df_description, CLCfrq, df_creation_timeUTC, SpInFile,
                ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
                FreqPointsNum, dataBlockSize
            ] = FileHeaderReaderJDS(filename, 0, 0)

            sumDifMode = ''

        #************************************************************************************
        #                            R E A D I N G   D A T A                                *
        #************************************************************************************

        # *** Reading timeline file ***
        TLfile = open(timeLineFileName, 'r')
        timeline = []
        for line in TLfile:
            timeline.append(str(line))
        TLfile.close()

        if StartStopSwitch == 1:  # If we read only specified time limits of files

            # *** Converting text to ".datetime" format ***
            dt_timeline = []
            for i in range(
                    len(timeline)):  # converting text to ".datetime" format

                # Check is the uS field is empty. If so it means it is equal to '000000'
                uSecond = timeline[i][20:26]
                if len(uSecond) < 2: uSecond = '000000'

                dt_timeline.append(
                    datetime(int(timeline[i][0:4]), int(timeline[i][5:7]),
                             int(timeline[i][8:10]), int(timeline[i][11:13]),
                             int(timeline[i][14:16]), int(timeline[i][17:19]),
                             int(uSecond)))

            dt_dateTimeStart = datetime(int(dateTimeStart[0:4]),
                                        int(dateTimeStart[5:7]),
                                        int(dateTimeStart[8:10]),
                                        int(dateTimeStart[11:13]),
                                        int(dateTimeStart[14:16]),
                                        int(dateTimeStart[17:19]), 0)
            dt_dateTimeStop = datetime(int(dateTimeStop[0:4]),
                                       int(dateTimeStop[5:7]),
                                       int(dateTimeStop[8:10]),
                                       int(dateTimeStop[11:13]),
                                       int(dateTimeStop[14:16]),
                                       int(dateTimeStop[17:19]), 0)

            # *** Showing the time limits of file and time limits of chosen part
            if print_or_not == 1:
                print(
                    '\n\n                               Start                         End \n'
                )
            if print_or_not == 1:
                print('  File time limits:   ', dt_timeline[0], ' ',
                      dt_timeline[len(timeline) - 1], '\n')
            if print_or_not == 1:
                print('  Chosen time limits: ', dt_dateTimeStart, '        ',
                      dt_dateTimeStop, '\n')

            # Verifying that chosen time limits are inside file and are correct
            if (dt_timeline[len(timeline) - 1] >= dt_dateTimeStart >
                    dt_timeline[0]) and (
                        dt_timeline[len(timeline) - 1] > dt_dateTimeStop >=
                        dt_timeline[0]) and (dt_dateTimeStop >
                                             dt_dateTimeStart):
                if print_or_not == 1: print('  Time is chosen correctly! \n\n')
            else:
                print('  ERROR! Time is chosen out of file limits!!! \n\n')
                sys.exit('           Program stopped')

            # Finding the closest spectra to the chosen time limits
            A = []
            B = []
            for i in range(len(timeline)):
                dt_diff_start = dt_timeline[i] - dt_dateTimeStart
                dt_diff_stop = dt_timeline[i] - dt_dateTimeStop
                A.append(
                    abs(
                        divmod(dt_diff_start.total_seconds(), 60)[0] * 60 +
                        divmod(dt_diff_start.total_seconds(), 60)[1]))
                B.append(
                    abs(
                        divmod(dt_diff_stop.total_seconds(), 60)[0] * 60 +
                        divmod(dt_diff_stop.total_seconds(), 60)[1]))

            istart = A.index(min(A))
            istop = B.index(min(B))
            if print_or_not == 1:
                print('\n Start specter number is:          ', istart)
            if print_or_not == 1:
                print('\n Stop specter number is:           ', istop)
            if print_or_not == 1:
                print('\n Total number of spectra to read:  ', istop - istart)

        # *** Calculation of the dimensions of arrays to read ***
        nx = len(frequency)  # the first dimension of the array
        if StartStopSwitch == 1:  # If we read only specified time limits of files
            ny = int(
                istop - istart
            )  # the second dimension of the array: number of spectra to read
        else:
            ny = int(
                ((df_filesize - 1024) / (nx * 8))
            )  # the second dimension of the array: file size - 1024 bytes
            istart = 0
            istop = len(timeline)

        if print_or_not == 1: print(' ')
        if print_or_not == 1:
            print(' Number of frequency channels:     ', nx, '\n')
        if print_or_not == 1:
            print(' Number of spectra:                ', ny, '\n')
        if print_or_not == 1:
            print(' Recomended spectra number for averaging is:  ',
                  int(ny / 1024))
        # averageConst = raw_input('\n Enter number of spectra to be averaged:       ')

        #if (len(averageConst) < 1 or int(averageConst) < 1):
        #    averageConst = 1
        #else:
        #    averageConst = int(averageConst)
        averageConst = int(ny / 1024)
        if int(averageConst) < 1: averageConst = 1

        # *** Data reading and averaging ***
        if print_or_not == 1:
            print('\n\n\n  *** Data reading and averaging *** \n\n')

        file1 = open(filename, 'rb')
        if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'):
            file2 = open(filename02, 'rb')

        file1.seek(
            1024 + istart * 8 * nx, os.SEEK_SET
        )  # Jumping to 1024+number of spectra to skip byte from file beginning
        if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'):
            file2.seek(
                1024 + istart * 8 * nx, os.SEEK_SET
            )  # Jumping to 1024+number of spectra to skip byte from file beginning

        array = np.empty((nx, 0), float)
        numOfBlocks = int(ny / averageConst)
        for block in range(numOfBlocks):

            data1 = np.fromfile(file1,
                                dtype=np.float64,
                                count=nx * averageConst)
            if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'):
                data2 = np.fromfile(file2,
                                    dtype=np.float64,
                                    count=nx * averageConst)

            if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'):
                if typesOfData[j] == 'A+B': data = data1 + data2
                if typesOfData[j] == 'A-B': data = data1 - data2
            else:
                data = data1

            del data1
            if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'): del data2

            data = np.reshape(data, [nx, averageConst], order='F')

            dataApp = np.empty((nx, 1), float)

            if (typesOfData[j] == 'chA' or typesOfData[j] == 'chB'
                    or typesOfData[j] == 'A+B'):
                # If analyzing intensity - average and log data
                if averOrMin == 0:
                    with np.errstate(invalid='ignore'):
                        dataApp[:, 0] = 10 * np.log10(data.mean(axis=1)[:])
                elif averOrMin == 1:
                    with np.errstate(invalid='ignore'):
                        dataApp[:, 0] = 10 * np.log10(np.amin(data, axis=1)[:])
                else:
                    print('\n\n Error!!! Wrong value of parameters!')
                array = np.append(array, dataApp, axis=1)
                array[np.isnan(array)] = -120

            if (typesOfData[j] == 'A-B'):
                # If analyzing intensity - average and log absolute values of data
                with np.errstate(invalid='ignore'):
                    dataApp[:, 0] = 10 * np.log10(np.abs(data.mean(axis=1)[:]))
                array = np.append(array, dataApp, axis=1)
                array[np.isnan(array)] = -120

            if (typesOfData[j] == 'C_p' or typesOfData[j] == 'CRe'
                    or typesOfData[j] == 'CIm'
                ):  # If analyzing phase/Re/Im - no logarythming needed
                # If analyzing phase of Re/Im we do not log data, only averaging
                dataApp[:, 0] = (data.mean(axis=1)[:])
                array = np.append(array, dataApp, axis=1)
                array[np.isnan(array)] = 0

            if typesOfData[j] == 'C_m':
                dataApp[:, 0] = (data.mean(axis=1)[:])
                array = np.append(array, dataApp, axis=1)
                #array[np.isinf(array)] = -120

        del dataApp, data
        file1.close()
        if (typesOfData[j] == 'A+B' or typesOfData[j] == 'A-B'): file2.close()

        if print_or_not == 1:
            print('\n Array shape is now             ', array.shape)

        # *** Cutting timeline to time limits ***
        dateTimeNew = timeline[istart:istop:averageConst]
        del dateTimeNew[numOfBlocks:]
        if print_or_not == 1:
            print('\n TimeLine length is now:        ', len(dateTimeNew))

        #*******************************************************************************
        #                                F I G U R E S                                 *
        #*******************************************************************************
        if print_or_not == 1: print('\n\n\n  *** Building images *** \n\n')

        # Exact string timescales to show on plots
        TimeScaleFig = np.empty_like(dateTimeNew)
        for i in range(len(dateTimeNew)):
            TimeScaleFig[i] = str(dateTimeNew[i][0:11] + '\n' +
                                  dateTimeNew[i][11:23])

        # Limits of figures for common case or for Re/Im parts to show the interferometric picture
        if typesOfData[j] == 'CRe' or typesOfData[j] == 'CIm':
            Vmin = 0 - AmplitudeReIm
            Vmax = 0 + AmplitudeReIm

        # *** Immediate spectrum ***

        Suptitle = ('Immediate spectrum ' + str(df_filename[0:18]) + ' ' +
                    nameAdd)
        Title = ('Initial parameters: dt = ' + str(round(TimeRes, 3)) +
                 ' Sec, df = ' + str(round(df / 1000, 3)) + ' kHz ' +
                 sumDifMode + 'Processing: Averaging ' + str(averageConst) +
                 ' spectra (' + str(round(averageConst * TimeRes, 3)) +
                 ' sec.)')

        TwoOrOneValuePlot(
            1, frequency, array[:, [1]], [], 'Spectrum', ' ', frequency[0],
            frequency[FreqPointsNum - 1], Vmin, Vmax, Vmin, Vmax,
            'Frequency, MHz', YaxName, ' ', Suptitle, Title,
            newpath + '/' + fileNameAddSpectr + df_filename[0:14] + '_' +
            typesOfData[j] + ' Immediate Spectrum.png', currentDate,
            currentTime, Software_version)

        # *** Decide to use only list of frequencies or all frequencies in range
        if ListOrAllFreq == 0:
            freqList = np.array(freqList)
        if ListOrAllFreq == 1:
            freqList = np.array(frequency)

        # *** Finding frequency most close to specified by user ***
        for fc in range(len(freqList)):
            if (freqList[fc] > freqStartTXT) and (freqList[fc] < freqStopTXT):
                newFreq = np.array(frequency)
                newFreq = np.absolute(newFreq - freqList[fc])
                index = np.argmin(newFreq) + 1
                tempArr1 = np.arange(0, len(dateTimeNew), 1)

                if ChannelSavePNG == 1 or typesOfData[
                        j] == 'CRe' or typesOfData[j] == 'CIm':
                    if typesOfData[j] == 'CRe' or typesOfData[j] == 'CIm':
                        Vmin = 0 - AmplitudeReIm
                        Vmax = 0 + AmplitudeReIm

                    # *** Plotting intensity changes at particular frequency ***
                    timeline = []
                    for i in range(len(dateTimeNew)):
                        timeline.append(
                            str(dateTimeNew[i][0:11] + '\n' +
                                dateTimeNew[i][11:23]))

                    Suptitle = 'Intensity variation ' + str(
                        df_filename[0:18]) + ' ' + nameAdd
                    Title = ('Initial parameters: dt = ' +
                             str(round(TimeRes, 3)) + ' Sec, df = ' +
                             str(round(df / 1000, 3)) + ' kHz, Frequency = ' +
                             str(round(frequency[index], 3)) + ' MHz ' +
                             sumDifMode + ' Processing: Averaging ' +
                             str(averageConst) + ' spectra (' +
                             str(round(averageConst * TimeRes, 3)) + ' sec.)')

                    FileName = (newpath + '/' + df_filename[0:14] + '_' +
                                typesOfData[j] + df_filename[-4:] +
                                ' variation at ' +
                                str(round(frequency[index], 3)) + ' MHz.png')

                    OneValueWithTimePlot(
                        timeline, array[[index], :].transpose(), Label, 0,
                        len(dateTimeNew), Vmin, Vmax, 0, 0,
                        'UTC Date and time, YYYY-MM-DD HH:MM:SS.ms', YaxName,
                        Suptitle, Title, FileName, currentDate, currentTime,
                        Software_version)

                # *** Saving value changes at particular frequency to TXT file ***
                if ChannelSaveTXT == 1:
                    SingleChannelData = open(
                        newpath + '/' + df_filename[0:14] + '_' +
                        filename[-7:-4:] + df_filename[-4:] +
                        ' variation at ' + str(round(frequency[index], 3)) +
                        ' MHz.txt', "w")
                    for i in range(len(dateTimeNew)):
                        SingleChannelData.write(
                            str(dateTimeNew[i]).rstrip() + '   ' +
                            str(array.transpose()[i, index]) + ' \n')
                    SingleChannelData.close()

        # *** Cutting the array inside frequency range specified by user ***
        if SpecFreqRange == 1 and (
                frequency[0] <= freqStart <= frequency[FreqPointsNum - 1]
        ) and (frequency[0] <= freqStop <=
               frequency[FreqPointsNum - 1]) and (freqStart < freqStop):
            print('\n You have chosen the frequency range', freqStart, '-',
                  freqStop, 'MHz')
            A = []
            B = []
            for i in range(len(frequency)):
                A.append(abs(frequency[i] - freqStart))
                B.append(abs(frequency[i] - freqStop))
            ifmin = A.index(min(A))
            ifmax = B.index(min(B))
            array = array[ifmin:ifmax, :]
            print('\n New data array shape is: ', array.shape)
            freqLine = frequency[ifmin:ifmax]
        else:
            freqLine = frequency

        # Limits of figures for common case or for Re/Im parts to show the interferometric picture
        Vmin = np.min(array)
        Vmax = np.max(array)
        if typesOfData[j] == 'CRe' or typesOfData[j] == 'CIm':
            Vmin = 0 - AmplitudeReIm
            Vmax = 0 + AmplitudeReIm

        # *** Dynamic spectrum of initial signal***

        Suptitle = ('Dynamic spectrum starting from file ' +
                    str(df_filename[0:18]) + ' ' + nameAdd +
                    '\n Initial parameters: dt = ' + str(round(TimeRes, 3)) +
                    ' Sec, df = ' + str(round(df / 1000, 3)) + ' kHz, ' +
                    sumDifMode + ' Processing: Averaging ' +
                    str(averageConst) + ' spectra (' +
                    str(round(averageConst * TimeRes, 3)) + ' sec.)\n' +
                    ' Receiver: ' + str(df_system_name) + ', Place: ' +
                    str(df_obs_place) + ', Description: ' +
                    str(df_description))
        fig_file_name = (newpath + '/' + fileNameAdd + df_filename[0:14] +
                         '_' + typesOfData[j] + ' Dynamic spectrum.png')

        OneDynSpectraPlot(array, Vmin, Vmax, Suptitle, 'Intensity, dB',
                          len(dateTimeNew), TimeScaleFig, freqLine,
                          len(freqLine), colormap,
                          'UTC Date and time, YYYY-MM-DD HH:MM:SS.msec',
                          fig_file_name, currentDate, currentTime,
                          Software_version, customDPI)

        if (typesOfData[j] != 'C_p' and typesOfData[j] != 'CRe'
                and typesOfData[j] != 'CIm'):

            # *** Normalization and cleaning of dynamic spectra ***
            Normalization_dB(array.transpose(), len(freqLine),
                             len(dateTimeNew))
            simple_channel_clean(array.transpose(), RFImeanConst)

            # *** Dynamic spectra of cleaned and normalized signal ***

            Suptitle = (
                'Dynamic spectrum cleaned and normalized starting from file ' +
                str(df_filename[0:18]) + ' ' + nameAdd +
                '\n Initial parameters: dt = ' + str(round(TimeRes, 3)) +
                ' Sec, df = ' + str(round(df / 1000, 3)) + ' kHz, ' +
                sumDifMode + ' Processing: Averaging ' + str(averageConst) +
                ' spectra (' + str(round(averageConst * TimeRes, 3)) +
                ' sec.)\n' + ' Receiver: ' + str(df_system_name) +
                ', Place: ' + str(df_obs_place) + ', Description: ' +
                str(df_description))
            fig_file_name = (newpath + '/' + fileNameAddNorm +
                             df_filename[0:14] + '_' + typesOfData[j] +
                             ' Dynamic spectrum cleanned and normalized' +
                             '.png')

            OneDynSpectraPlot(array, VminNorm,
                              VmaxNorm, Suptitle, 'Intensity, dB',
                              len(dateTimeNew), TimeScaleFig, freqLine,
                              len(freqLine), colormap,
                              'UTC Date and time, YYYY-MM-DD HH:MM:SS.msec',
                              fig_file_name, currentDate, currentTime,
                              Software_version, customDPI)
            '''
            # *** TEMPLATE FOR JOURNLS Dynamic spectra of cleaned and normalized signal ***
            plt.figure(2, figsize=(16.0, 7.0))
            ImA = plt.imshow(np.flipud(array), aspect='auto', extent=[0,len(dateTimeNew),freqLine[0],freqLine[len(freqLine)-1]], vmin=VminNorm, vmax=VmaxNorm, cmap=colormap) #
            plt.ylabel('Frequency, MHz', fontsize=12, fontweight='bold')
            #plt.suptitle('Dynamic spectrum cleaned and normalized starting from file '+str(df_filename[0:18])+' '+nameAdd+
            #            '\n Initial parameters: dt = '+str(round(TimeRes,3))+
            #            ' Sec, df = '+str(round(df/1000,3))+' kHz, '+sumDifMode+
            #            ' Processing: Averaging '+str(averageConst)+' spectra ('+str(round(averageConst*TimeRes,3))+' sec.)\n'+
            #            ' Receiver: '+str(df_system_name)+
            #            ', Place: '+str(df_obs_place) +
            #            ', Description: '+str(df_description),
            #            fontsize=10, fontweight='bold', x = 0.46, y = 0.96)
            plt.yticks(fontsize=12, fontweight='bold')
            rc('font', weight='bold')
            cbar = plt.colorbar(ImA, pad=0.005)
            cbar.set_label('Intensity, dB', fontsize=12, fontweight='bold')
            cbar.ax.tick_params(labelsize=12)
            ax1 = plt.figure(2).add_subplot(1,1,1)
            a = ax1.get_xticks().tolist()
            for i in range(len(a)-1):   #a-1
                k = int(a[i])
                #a[i] = str(dateTimeNew[k][0:11]+'\n'+dateTimeNew[k][11:23])
                a[i] = str(dateTimeNew[k][11:19])
            ax1.set_xticklabels(a)
            plt.xticks(fontsize=12, fontweight='bold')
            plt.xlabel('UTC time, HH:MM:SS', fontsize=12, fontweight='bold')
            #plt.text(0.72, 0.04,'Processed '+currentDate+ ' at '+currentTime, fontsize=6, transform=plt.gcf().transFigure)
            pylab.savefig('DAT_Results/' + fileNameAddNorm + df_filename[0:14]+'_'+typesOfData[j]+' Dynamic spectrum cleanned and normalized'+'.png', bbox_inches='tight', dpi = customDPI)
            #pylab.savefig('DAT_Results/' +fileNameAddNorm+ df_filename[0:14]+'_'+typesOfData[j]+ ' Dynamic spectrum cleanned and normalized'+'.eps', bbox_inches='tight', dpi = customDPI)
                                                                                 #filename[-7:-4:]
            plt.close('all')
            '''

    ok = 1
    return ok
Ejemplo n.º 7
0
def convert_one_jds_wf_to_wf32(source_file, result_directory,
                               no_of_bunches_per_file):
    """
    function converts jds waveform data to wf32 waveform data for further processing (coherent dedispersion) and
    saves txt files with time data
    Input parameters:
        source_directory - directory where initial jds waveform data are stored
        result_directory - directory where new wf32 files will be stored
        no_of_bunches_per_file - number of data bunches per file to peocess (depends on RAM volume on the PC)
    Output parameters:
        result_wf32_files - list of results files
    """

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        clock_freq, df_creation_timeUTC, channel, receiver_mode, Mode, Navr,
        time_res, fmin, fmax, df, frequency, freq_points_num, data_block_size
    ] = FileHeaderReaderJDS(source_file, 0, 0)
    if Mode > 0:
        sys.exit(
            '  ERROR!!! Data recorded in wrong mode! Waveform mode needed.\n\n    Program stopped!'
        )

    result_wf32_files = []

    fname = source_file

    # Create long data files and copy first data file header to them
    with open(fname, 'rb') as file:
        # *** Data file header read ***
        file_header = file.read(1024)

    # *** Creating a name for long timeline TXT file ***
    tl_file_name = df_filename + '_Timeline.wtxt'
    tl_file = open(result_directory + tl_file_name,
                   'w')  # Open and close to delete the file with the same name
    tl_file.close()

    # *** Creating a binary file with data for long data storage ***
    file_data_A_name = result_directory + df_filename + '_Data_chA.wf32'
    result_wf32_files.append(file_data_A_name)
    file_data_A = open(file_data_A_name, 'wb')
    file_data_A.write(file_header)
    file_data_A.close()

    if channel == 2:
        file_data_B_name = result_directory + df_filename + '_Data_chB.wf32'
        result_wf32_files.append(file_data_B_name)
        file_data_B = open(file_data_B_name, 'wb')
        file_data_B.write(file_header)
        file_data_B.close()

    del file_header

    # Calculation of number of blocks and number of spectra in the file
    if channel == 0 or channel == 1:  # Single channel mode
        no_of_spectra_in_bunch = int(
            (df_filesize - 1024) /
            (no_of_bunches_per_file * 2 * data_block_size))
    else:  # Two channels mode
        no_of_spectra_in_bunch = int(
            (df_filesize - 1024) /
            (no_of_bunches_per_file * 4 * data_block_size))

    no_of_blocks_in_file = (df_filesize - 1024) / data_block_size

    print('  Number of blocks in file:                  ',
          no_of_blocks_in_file)
    print('  Number of bunches to read in file:         ',
          no_of_bunches_per_file, '\n')

    # *******************************************************************************
    #                           R E A D I N G   D A T A                             *
    # *******************************************************************************

    with open(fname, 'rb') as file:
        file.seek(1024)  # Jumping to 1024 byte from file beginning

        # !!! Fake timing. Real timing to be done!!!
        TimeFigureScaleFig = np.linspace(0, no_of_bunches_per_file,
                                         no_of_bunches_per_file + 1)
        for i in range(no_of_bunches_per_file):
            TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i])

        time_scale_bunch = []

        bar = IncrementalBar('  File reading: ',
                             max=no_of_bunches_per_file,
                             suffix='%(percent)d%%')

        for bunch in range(no_of_bunches_per_file):

            bar.next()

            # Reading and reshaping all data with time data
            if channel == 0 or channel == 1:  # Single channel mode
                wf_data = np.fromfile(file,
                                      dtype='i2',
                                      count=no_of_spectra_in_bunch *
                                      data_block_size)
                wf_data = np.reshape(wf_data,
                                     [data_block_size, no_of_spectra_in_bunch],
                                     order='F')
            if channel == 2:  # Two channels mode
                wf_data = np.fromfile(file,
                                      dtype='i2',
                                      count=2 * no_of_spectra_in_bunch *
                                      data_block_size)
                wf_data = np.reshape(
                    wf_data, [data_block_size, 2 * no_of_spectra_in_bunch],
                    order='F')

            # Timing
            timeline_block_str = jds_waveform_time(wf_data, clock_freq,
                                                   data_block_size)
            if channel == 2:  # Two channels mode
                timeline_block_str = timeline_block_str[0:int(
                    len(timeline_block_str) /
                    2)]  # Cut the timeline of second channel
            for i in range(len(timeline_block_str)):
                time_scale_bunch.append(df_creation_timeUTC[0:10] + ' ' +
                                        timeline_block_str[i])  # [0:12]

            # Deleting the time blocks from waveform data
            real_data_block_size = data_block_size - 4
            wf_data = wf_data[0:real_data_block_size, :]

            # Separation data into channels
            if channel == 0 or channel == 1:  # Single channel mode
                wf_data_chA = np.reshape(
                    wf_data,
                    [real_data_block_size * no_of_spectra_in_bunch, 1],
                    order='F')
                del wf_data  # Deleting unnecessary array name just in case

            if channel == 2:  # Two channels mode

                # Separating the data into two channels
                wf_data = np.reshape(
                    wf_data,
                    [2 * real_data_block_size * no_of_spectra_in_bunch, 1],
                    order='F')
                wf_data_chA = wf_data[0:(2 * real_data_block_size *
                                         no_of_spectra_in_bunch):2]  # A
                wf_data_chB = wf_data[1:(2 * real_data_block_size *
                                         no_of_spectra_in_bunch):2]  # B
                del wf_data

            # Saving WF data to dat file
            file_data_A = open(file_data_A_name, 'ab')
            file_data_A.write(
                np.float32(wf_data_chA).transpose().copy(order='C'))
            file_data_A.close()
            if channel == 2:
                file_data_B = open(file_data_B_name, 'ab')
                file_data_B.write(
                    np.float32(wf_data_chB).transpose().copy(order='C'))
                file_data_B.close()

            # Saving time data to ling timeline file
            with open(tl_file_name, 'a') as tl_file:
                for i in range(no_of_spectra_in_bunch):
                    tl_file.write((str(time_scale_bunch[i][:])) + ' \n')  # str

        bar.finish()

        file.close()  # Close the data file
        del file_data_A
        if channel == 2:
            del file_data_B

    return result_wf32_files
    file = open(data_filename, 'rb')

    # reading FHEADER
    df_filesize = (os.stat(data_filename).st_size)                          # Size of file
    df_filename = file.read(32).decode('utf-8').rstrip('\x00')              # Initial data file name
    file.close()

    receiver_type = df_filename[-4:]

    # Reading file header to obtain main parameters of the file
    if receiver_type == '.adr':
        [TimeRes, fmin, fmax, df, frequency_list, FFTsize] = FileHeaderReaderADR(data_filename, 0)
    if receiver_type == '.jds':
        [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, SpInFile, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency_list, FFTsize, dataBlockSize] = FileHeaderReaderJDS(data_filename, 0, 1)


    #************************************************************************************
    #                            R E A D I N G   D A T A                                *
    #************************************************************************************
    if receiver_type == '.jds':
        num_frequencies = len(frequency_list)-4

    shift_vector = DM_full_shift_calc(len(frequency_list), fmin, fmax, df / pow(10,6), TimeRes, DM, receiver_type)

    #plot1D(shift_vector, newpath+'/01 - Shift parameter.png', 'Shift parameter', 'Shift parameter', 'Shift parameter', 'Frequency channel number', customDPI)

    max_shift = np.abs(shift_vector[0])
    print (' Maximal shift is:              ', max_shift, ' pixels \n')
Ejemplo n.º 9
0
def jds_wf_simple_reader(directory, no_of_spectra_to_average, skip_data_blocks,
                         VminNorm, VmaxNorm, colormap, custom_dpi,
                         save_long_file_aver, dyn_spectr_save_init,
                         dyn_spectr_save_norm):

    current_time = time.strftime("%H:%M:%S")
    current_date = time.strftime("%d.%m.%Y")

    # *** Creating a folder where all pictures and results will be stored (if it doesn't exist) ***
    result_folder = 'RESULTS_JDS_waveform_' + directory.split('/')[-2]
    if not os.path.exists(result_folder):
        os.makedirs(result_folder)
    service_folder = result_folder + '/Service'
    if not os.path.exists(service_folder):
        os.makedirs(service_folder)
    if dyn_spectr_save_init == 1:
        initial_spectra_folder = result_folder + '/Initial spectra'
        if not os.path.exists(initial_spectra_folder):
            os.makedirs(initial_spectra_folder)

    # *** Search JDS files in the directory ***

    file_list = find_files_only_in_current_folder(directory, '.jds', 1)
    print('')

    if len(
            file_list
    ) > 1:  # Check if files have same parameters if there are more then one file in list
        # Check if all files (except the last) have same size
        same_or_not = check_if_all_files_of_same_size(directory, file_list, 1)

        # Check if all files in this folder have the same parameters in headers
        equal_or_not = check_if_JDS_files_of_equal_parameters(
            directory, file_list)

        if same_or_not and equal_or_not:
            print(
                '\n\n\n        :-)  All files seem to be of the same parameters!  :-) \n\n\n'
            )
        else:
            print(
                '\n\n\n ************************************************************************************* '
            )
            print(
                ' *                                                                                   *'
            )
            print(
                ' *   Seems files in folders are different check the errors and restart the script!   *'
            )
            print(
                ' *                                                                                   *  '
                '\n ************************************************************************************* \n\n\n'
            )

            decision = int(
                input(
                    '* Enter "1" to start processing, or "0" to stop the script:     '
                ))
            if decision != 1:
                sys.exit(
                    '\n\n\n              ***  Program stopped! *** \n\n\n')

    # To print in console the header of first file
    print('\n  First file header parameters: \n')

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size
    ] = FileHeaderReaderJDS(directory + file_list[0], 0, 1)

    # Main loop by files start
    for file_no in range(len(file_list)):  # loop by files

        # *** Opening datafile ***
        fname = directory + file_list[file_no]

        # *********************************************************************************

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode,
            Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num,
            data_block_size
        ] = FileHeaderReaderJDS(fname, 0, 0)

        # Create long data files and copy first data file header to them
        if file_no == 0 and save_long_file_aver == 1:

            with open(fname, 'rb') as file:
                # *** Data file header read ***
                file_header = file.read(1024)

            # *** Creating a name for long timeline TXT file ***
            tl_file_name = df_filename + '_Timeline.txt'
            tl_file = open(
                tl_file_name,
                'w')  # Open and close to delete the file with the same name
            tl_file.close()

            # *** Creating a binary file with data for long data storage ***
            file_data_a_name = df_filename + '_Data_chA.dat'
            file_data_a = open(file_data_a_name, 'wb')
            file_data_a.write(file_header)
            file_data_a.seek(574)  # FFT size place in header
            file_data_a.write(np.int32(data_block_size).tobytes())
            file_data_a.seek(624)  # Lb place in header
            file_data_a.write(np.int32(0).tobytes())
            file_data_a.seek(628)  # Hb place in header
            file_data_a.write(np.int32(data_block_size / 2).tobytes())
            file_data_a.seek(632)  # Wb place in header
            file_data_a.write(np.int32(data_block_size / 2).tobytes())
            file_data_a.seek(636)  # Navr place in header
            file_data_a.write(
                bytes([np.int32(Navr * no_of_spectra_to_average)]))
            file_data_a.close()

            if Channel == 2:
                file_data_b_name = df_filename + '_Data_chB.dat'
                file_data_b = open(file_data_b_name, 'wb')
                file_data_b.write(file_header)
                file_data_b.seek(574)  # FFT size place in header
                file_data_b.write(np.int32(data_block_size).tobytes())
                file_data_b.seek(624)  # Lb place in header
                file_data_b.write(np.int32(0).tobytes())
                file_data_b.seek(628)  # Hb place in header
                file_data_b.write(np.int32(data_block_size / 2).tobytes())
                file_data_b.seek(632)  # Wb place in header
                file_data_b.write(np.int32(data_block_size / 2).tobytes())
                file_data_b.seek(636)  # Navr place in header
                file_data_b.write(
                    bytes([np.int32(Navr * no_of_spectra_to_average)]))
                file_data_b.close()

            del file_header

        # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!!

        # Manually set frequencies for one channel mode

        if (Channel == 0 and int(CLCfrq / 1000000)
                == 66) or (Channel == 1 and int(CLCfrq / 1000000) == 66):
            freq_points_num = 8192
            frequency = np.linspace(0.0, 33.0, freq_points_num)

        # Manually set frequencies for two channels mode
        if Channel == 2 or (Channel == 0 and int(CLCfrq / 1000000) == 33) or (
                Channel == 1 and int(CLCfrq / 1000000) == 33):
            freq_points_num = 8192
            frequency = np.linspace(16.5, 33.0, freq_points_num)
        # For new receiver (temporary):
        if Channel == 2 and int(CLCfrq / 1000000) == 80:
            freq_points_num = 8192
            frequency = np.linspace(0.0, 40.0, freq_points_num)

        # Calculation of number of blocks and number of spectra in the file
        if Channel == 0 or Channel == 1:  # Single channel mode
            no_of_av_spectra_per_file = (df_filesize - 1024) / (
                2 * data_block_size * no_of_spectra_to_average)
        else:  # Two channels mode
            no_of_av_spectra_per_file = (df_filesize - 1024) / (
                4 * data_block_size * no_of_spectra_to_average)

        no_of_blocks_in_file = (df_filesize - 1024) / data_block_size

        no_of_av_spectra_per_file = int(no_of_av_spectra_per_file)
        fine_clock_frq = (int(CLCfrq / 1000000.0) * 1000000.0)

        # Real time resolution of averaged spectra
        real_av_spectra_dt = (1 / fine_clock_frq) * (
            data_block_size - 4) * no_of_spectra_to_average

        if file_no == 0:
            print(' Number of blocks in file:             ',
                  no_of_blocks_in_file)
            print(' Number of spectra to average:         ',
                  no_of_spectra_to_average)
            print(' Number of averaged spectra in file:   ',
                  no_of_av_spectra_per_file)
            print(' Time resolution of averaged spectrum: ',
                  round(real_av_spectra_dt * 1000, 3), ' ms.')
            print('\n  *** Reading data from file *** \n')

        # *******************************************************************************
        #                           R E A D I N G   D A T A                             *
        # *******************************************************************************

        with open(fname, 'rb') as file:
            file.seek(
                1024 + data_block_size * 4 *
                skip_data_blocks)  # Jumping to 1024 byte from file beginning

            # *** DATA READING process ***

            # Preparing arrays for dynamic spectra
            dyn_spectra_ch_a = np.zeros(
                (int(data_block_size / 2), no_of_av_spectra_per_file), float)
            if Channel == 2:  # Two channels mode
                dyn_spectra_ch_b = np.zeros(
                    (int(data_block_size / 2), no_of_av_spectra_per_file),
                    float)

            # !!! Fake timing. Real timing to be done!!!
            # TimeFigureScaleFig = np.linspace(0, no_of_av_spectra_per_file, no_of_av_spectra_per_file+1)
            # for i in range(no_of_av_spectra_per_file):
            #     TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i])

            time_scale_fig = []
            time_scale_full = []

            bar = IncrementalBar(' File ' + str(file_no + 1) + ' of ' +
                                 str(len(file_list)) + ' reading: ',
                                 max=no_of_av_spectra_per_file,
                                 suffix='%(percent)d%%')

            for av_sp in range(no_of_av_spectra_per_file):

                bar.next()

                # Reading and reshaping all data with readers
                if Channel == 0 or Channel == 1:  # Single channel mode
                    wf_data = np.fromfile(file,
                                          dtype='i2',
                                          count=no_of_spectra_to_average *
                                          data_block_size)
                    wf_data = np.reshape(
                        wf_data, [data_block_size, no_of_spectra_to_average],
                        order='F')
                if Channel == 2:  # Two channels mode
                    wf_data = np.fromfile(file,
                                          dtype='i2',
                                          count=2 * no_of_spectra_to_average *
                                          data_block_size)
                    wf_data = np.reshape(
                        wf_data,
                        [data_block_size, 2 * no_of_spectra_to_average],
                        order='F')

                # Timing
                timeline_block_str = jds_waveform_time(wf_data, CLCfrq,
                                                       data_block_size)
                time_scale_fig.append(timeline_block_str[-1][0:12])
                time_scale_full.append(df_creation_timeUTC[0:10] + ' ' +
                                       timeline_block_str[-1][0:12])

                # Nulling the time blocks in waveform data
                wf_data[data_block_size - 4:data_block_size, :] = 0

                # Scaling of the data - seems to be wrong in absolute value
                wf_data = wf_data / 32768.0

                if Channel == 0 or Channel == 1:  # Single channel mode
                    wf_data_ch_a = wf_data  # All the data is channel A data
                    del wf_data  # Deleting unnecessary array to free the memory

                if Channel == 2:  # Two channels mode

                    # Resizing to obtain the matrix for separation of channels
                    wf_data_new = np.zeros(
                        (2 * data_block_size, no_of_spectra_to_average))
                    for i in range(2 * no_of_spectra_to_average):
                        if i % 2 == 0:
                            wf_data_new[0:data_block_size,
                                        int(i / 2)] = wf_data[:, i]  # Even
                        else:
                            wf_data_new[data_block_size:2 * data_block_size,
                                        int(i / 2)] = wf_data[:, i]  # Odd
                    del wf_data  # Deleting unnecessary array to free the memory

                    # Separating the data into two channels
                    wf_data_ch_a = np.zeros(
                        (data_block_size,
                         no_of_spectra_to_average))  # Preparing empty array
                    wf_data_ch_b = np.zeros(
                        (data_block_size,
                         no_of_spectra_to_average))  # Preparing empty array
                    wf_data_ch_a[:, :] = wf_data_new[0:(
                        2 * data_block_size):2, :]  # Separation to channel A
                    wf_data_ch_b[:, :] = wf_data_new[1:(
                        2 * data_block_size):2, :]  # Separation to channel B
                    del wf_data_new

                # preparing matrices for spectra
                spectra_ch_a = np.zeros_like(wf_data_ch_a)
                if Channel == 2:
                    spectra_ch_b = np.zeros_like(wf_data_ch_b)

                # Calculation of spectra
                for i in range(no_of_spectra_to_average):
                    spectra_ch_a[:, i] = np.power(
                        np.abs(np.fft.fft(wf_data_ch_a[:, i])), 2)
                    if Channel == 2:  # Two channels mode
                        spectra_ch_b[:, i] = np.power(
                            np.abs(np.fft.fft(wf_data_ch_b[:, i])), 2)

                # Storing only first (left) mirror part of spectra
                spectra_ch_a = spectra_ch_a[:int(data_block_size / 2), :]
                if Channel == 2:
                    spectra_ch_b = spectra_ch_b[:int(data_block_size / 2), :]

                # At 33 MHz the specter is usually upside down, to correct it we use flip up/down
                if int(CLCfrq / 1000000) == 33:
                    spectra_ch_a = np.flipud(spectra_ch_a)
                    if Channel == 2:
                        spectra_ch_b = np.flipud(spectra_ch_b)

                # Plotting first waveform block and first immediate spectrum in a file
                if av_sp == 0:  # First data block in a file
                    i = 0  # First immediate spectrum in a block

                    # Prepare parameters for plot
                    data_1 = wf_data_ch_a[:, i]
                    if Channel == 0 or Channel == 1:  # Single channel mode
                        no_of_sets = 1
                        data_2 = []
                    if Channel == 2:
                        no_of_sets = 2
                        data_2 = wf_data_ch_b[:, i]

                    suptitle = ('Waveform data, first block in file ' +
                                str(df_filename))
                    Title = (ReceiverMode + ', Fclock = ' +
                             str(round(CLCfrq / 1000000, 1)) +
                             ' MHz, Description: ' + str(df_description))

                    TwoOrOneValuePlot(
                        no_of_sets,
                        np.linspace(no_of_sets, data_block_size,
                                    data_block_size), data_1, data_2,
                        'Channel A', 'Channel B', 1, data_block_size, -0.6,
                        0.6, -0.6, 0.6, 'ADC clock counts', 'Amplitude, V',
                        'Amplitude, V', suptitle, Title, service_folder + '/' +
                        df_filename[0:14] + ' Waveform first data block.png',
                        current_date, current_time, software_version)

                    # Prepare parameters for plot
                    data_1 = 10 * np.log10(spectra_ch_a[:, i])
                    if Channel == 0 or Channel == 1:  # Single channel mode
                        no_of_sets = 1
                        data_2 = []
                    if Channel == 2:
                        no_of_sets = 2
                        data_2 = 10 * np.log10(spectra_ch_b[:, i])

                    suptitle = ('Immediate spectrum, first in file ' +
                                str(df_filename))
                    Title = (ReceiverMode + ', Fclock = ' +
                             str(round(CLCfrq / 1000000, 1)) +
                             ' MHz, Description: ' + str(df_description))

                    TwoOrOneValuePlot(
                        no_of_sets, frequency, data_1, data_2, 'Channel A',
                        'Channel B', frequency[0], frequency[-1], -80, 60, -80,
                        60, 'Frequency, MHz', 'Intensity, dB', 'Intensity, dB',
                        suptitle, Title,
                        service_folder + '/' + df_filename[0:14] +
                        ' Immediate spectrum first in file.png', current_date,
                        current_time, software_version)

                # Deleting the unnecessary matrices
                del wf_data_ch_a
                if Channel == 2:
                    del wf_data_ch_b

                # Calculation the averaged spectrum
                aver_spectra_ch_a = spectra_ch_a.mean(axis=1)[:]
                if Channel == 2:
                    aver_spectra_ch_b = spectra_ch_b.mean(axis=1)[:]

                # Plotting only first averaged spectrum
                if av_sp == 0:

                    # Prepare parameters for plot
                    data_1 = 10 * np.log10(aver_spectra_ch_a)
                    if Channel == 0 or Channel == 1:  # Single channel mode
                        no_of_sets = 1
                        data_2 = []
                    if Channel == 2:
                        no_of_sets = 2
                        data_2 = 10 * np.log10(aver_spectra_ch_b)

                    suptitle = ('Average spectrum, first data block in file ' +
                                str(df_filename))
                    Title = (ReceiverMode + ', Fclock = ' +
                             str(round(CLCfrq / 1000000, 1)) +
                             ' MHz, Avergaed spectra: ' +
                             str(no_of_spectra_to_average) +
                             ', Description: ' + str(df_description))

                    TwoOrOneValuePlot(
                        no_of_sets, frequency, data_1, data_2, 'Channel A',
                        'Channel B', frequency[0], frequency[-1], -80, 60, -80,
                        60, 'Frequency, MHz', 'Intensity, dB', 'Intensity, dB',
                        suptitle, Title,
                        service_folder + '/' + df_filename[0:14] +
                        ' Average spectrum first data block in file.png',
                        current_date, current_time, software_version)

                # Adding calculated averaged spectrum to dynamic spectra array
                dyn_spectra_ch_a[:, av_sp] = aver_spectra_ch_a[:]
                if Channel == 2:
                    dyn_spectra_ch_b[:, av_sp] = aver_spectra_ch_b[:]

            bar.finish()

        # file.close()  # Close the data file

        # Saving averaged spectra to long data files
        if save_long_file_aver == 1:
            temp = dyn_spectra_ch_a.transpose().copy(order='C')
            file_data_a = open(file_data_a_name, 'ab')
            file_data_a.write(temp)
            file_data_a.close()
            if Channel == 2:
                temp = dyn_spectra_ch_b.transpose().copy(order='C')
                file_data_b = open(file_data_b_name, 'ab')
                file_data_b.write(temp)
                file_data_b.close()

            # Saving time data to ling timeline file
            with open(tl_file_name, 'a') as tl_file:
                for i in range(no_of_av_spectra_per_file):
                    tl_file.write((time_scale_full[i][:]) + ' \n')  # str
            del time_scale_full

        # Log data (make dB scale)
        with np.errstate(invalid='ignore', divide='ignore'):
            dyn_spectra_ch_a = 10 * np.log10(dyn_spectra_ch_a)
            if Channel == 2:
                dyn_spectra_ch_b = 10 * np.log10(dyn_spectra_ch_b)

        # If the data contains minus infinity values change them to particular values
        dyn_spectra_ch_a[np.isinf(dyn_spectra_ch_a)] = 40
        if Channel == 2:
            dyn_spectra_ch_b[np.isinf(dyn_spectra_ch_b)] = 40

        # *******************************************************************************
        #             P L O T T I N G    D Y N A M I C    S P E C T R A                 *
        # *******************************************************************************

        # if dyn_spectr_save_init == 1 or dyn_spectr_save_norm == 1:
        #    print('\n  *** Making figures of dynamic spectra *** \n')

        if dyn_spectr_save_init == 1:
            # Plot of initial dynamic spectra

            v_min_a = np.min(dyn_spectra_ch_a)
            v_max_a = np.max(dyn_spectra_ch_a)
            v_min_b = v_min_a
            v_max_b = v_max_a
            if Channel == 2:
                v_min_b = np.min(dyn_spectra_ch_b)
                v_max_b = np.max(dyn_spectra_ch_b)

            if Channel == 0 or Channel == 1:  # Single channel mode
                dyn_spectra_ch_b = dyn_spectra_ch_a

            suptitle = ('Dynamic spectrum (initial) ' + str(df_filename) +
                        ' - Fig. ' + str(1) + ' of ' + str(1) +
                        '\n Initial parameters: dt = ' +
                        str(round(TimeRes * 1000., 3)) + ' ms, df = ' +
                        str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                        str(df_system_name) + ', Place: ' + str(df_obs_place) +
                        '\n' + ReceiverMode + ', Fclock = ' +
                        str(round(CLCfrq / 1000000, 1)) +
                        ' MHz, Avergaed spectra: ' +
                        str(no_of_spectra_to_average) + ' (' +
                        str(round(no_of_spectra_to_average * TimeRes, 3)) +
                        ' sec.), Description: ' + str(df_description))

            fig_file_name = (initial_spectra_folder + '/' + df_filename[0:14] +
                             ' Initial dynamic spectrum fig.' + str(0 + 1) +
                             '.png')

            if Channel == 0 or Channel == 1:  # Single channel mode
                OneDynSpectraPlot(dyn_spectra_ch_a, v_min_a, v_max_a, suptitle,
                                  'Intensity, dB', no_of_av_spectra_per_file,
                                  time_scale_fig, frequency, freq_points_num,
                                  colormap, 'UTC Time, HH:MM:SS.msec',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)

            if Channel == 2:
                TwoDynSpectraPlot(dyn_spectra_ch_a, dyn_spectra_ch_b, v_min_a,
                                  v_max_a, v_min_b, v_max_b, suptitle,
                                  'Intensity, dB', 'Intensity, dB',
                                  no_of_av_spectra_per_file, time_scale_fig,
                                  time_scale_fig, frequency, freq_points_num,
                                  colormap, 'Channel A', 'Channel B',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)

        if dyn_spectr_save_norm == 1:

            # Normalization and cleaning of data

            Normalization_dB(dyn_spectra_ch_a.transpose(), freq_points_num,
                             no_of_av_spectra_per_file)
            if Channel == 2:
                Normalization_dB(dyn_spectra_ch_b.transpose(), freq_points_num,
                                 no_of_av_spectra_per_file)

            simple_channel_clean(dyn_spectra_ch_a, 8)
            if Channel == 2:
                simple_channel_clean(dyn_spectra_ch_b, 8)

            # Plot of normalized and cleaned dynamic spectra

            suptitle = ('Normalized and cleaned dynamic spectrum (initial) ' +
                        str(df_filename) + ' - Fig. ' + str(0 + 1) + ' of ' +
                        str(1) + '\n Initial parameters: dt = ' +
                        str(round(TimeRes * 1000, 3)) + ' ms, df = ' +
                        str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                        str(df_system_name) + ', Place: ' + str(df_obs_place) +
                        '\n' + ReceiverMode + ', Fclock = ' +
                        str(round(CLCfrq / 1000000, 1)) +
                        ' MHz, Avergaed spectra: ' +
                        str(no_of_spectra_to_average) + ' (' +
                        str(round(no_of_spectra_to_average * TimeRes, 3)) +
                        ' sec.), Description: ' + str(df_description))

            fig_file_name = (result_folder + '/' + df_filename[0:14] +
                             ' Normalized and cleaned dynamic spectrum fig.' +
                             str(0 + 1) + '.png')

            if Channel == 0 or Channel == 1:  # Single channel mode
                OneDynSpectraPlot(dyn_spectra_ch_a, VminNorm, VmaxNorm,
                                  suptitle, 'Intensity, dB',
                                  no_of_av_spectra_per_file, time_scale_fig,
                                  frequency, freq_points_num, colormap,
                                  'UTC Time, HH:MM:SS.msec', fig_file_name,
                                  current_date, current_time, software_version,
                                  custom_dpi)
            if Channel == 2:
                TwoDynSpectraPlot(dyn_spectra_ch_a, dyn_spectra_ch_b, VminNorm,
                                  VmaxNorm, VminNorm, VmaxNorm, suptitle,
                                  'Intensity, dB', 'Intensity, dB',
                                  no_of_av_spectra_per_file, time_scale_fig,
                                  time_scale_fig, frequency, freq_points_num,
                                  colormap, 'Channel A', 'Channel B',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)
        del time_scale_fig, file_data_a
        if Channel == 2:
            del file_data_b

    results_files_list = []
    results_files_list.append(file_data_a_name)
    if Channel == 2:
        results_files_list.append(file_data_b_name)

    return results_files_list
def cut_needed_pulsar_period_from_dat_to_dat(common_path, filename,
                                             pulsar_name, period_number,
                                             profile_pic_min, profile_pic_max,
                                             spectrum_pic_min,
                                             spectrum_pic_max, periods_per_fig,
                                             customDPI, colormap):
    """
    Function to find and cut the selected pulsar period (by its number) from the DAT files
    """

    software_version = '2021.08.07'

    current_time = time.strftime("%H:%M:%S")
    current_date = time.strftime("%d.%m.%Y")

    # Creating a folder where all pictures and results will be stored (if it doesn't exist)
    result_path = "RESULTS_pulsar_extracted_pulse_" + filename
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    # Taking pulsar period from catalogue
    pulsar_ra, pulsar_dec, DM, p_bar = catalogue_pulsar(pulsar_name)

    # DAT file to be analyzed:
    filepath = common_path + filename

    # Timeline file to be analyzed:
    timeline_filepath = common_path + filename.split(
        '_Data_')[0] + '_Timeline.txt'

    # Opening DAT datafile
    file = open(filepath, 'rb')

    # Data file header read
    df_filesize = os.stat(filepath).st_size  # Size of file
    df_filepath = file.read(32).decode('utf-8').rstrip(
        '\x00')  # Initial data file name
    file.close()

    if df_filepath[-4:] == '.adr':
        [
            df_filepath, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, ReceiverMode, Mode,
            sumDifMode, NAvr, time_resolution, fmin, fmax, df, frequency,
            FFTsize, SLine, Width, BlockSize
        ] = FileHeaderReaderADR(filepath, 0, 0)

        freq_points_num = len(frequency)

    if df_filepath[-4:] == '.jds':  # If data obtained from DSPZ receiver

        [
            df_filepath, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, time_resolution, fmin, fmax, df,
            frequency, freq_points_num, dataBlockSize
        ] = FileHeaderReaderJDS(filepath, 0, 0)

    # ************************************************************************************
    #                             R E A D I N G   D A T A                                *
    # ************************************************************************************

    # Time line file reading
    timeline, dt_timeline = time_line_file_reader(timeline_filepath)

    # Calculation of the dimensions of arrays to read taking into account the pulsar period
    spectra_in_file = int(
        (df_filesize - 1024) /
        (8 * freq_points_num))  # int(df_filesize - 1024)/(2*4*freq_points_num)
    spectra_to_read = int(
        np.round((periods_per_fig * p_bar / time_resolution), 0))
    spectra_per_period = int(np.round((p_bar / time_resolution), 0))
    num_of_blocks = int(np.floor(spectra_in_file / spectra_to_read))

    print('\n   Pulsar name:                             ', pulsar_name, '')
    print('   Pulsar period:                           ', p_bar, 's.')
    print('   Time resolution:                         ', time_resolution,
          's.')
    print('   Number of spectra to read in', periods_per_fig, 'periods:  ',
          spectra_to_read, ' ')
    print('   Number of spectra in file:               ', spectra_in_file, ' ')
    print('   Number of', periods_per_fig, 'periods blocks in file:      ',
          num_of_blocks, '\n')

    # Data reading and making figures
    print('\n   Data reading and making figure...')

    data_file = open(filepath, 'rb')

    # Jumping to 1024+number of spectra to skip bytes from file beginning
    data_file.seek(
        1024 + (period_number - 1) * spectra_per_period * len(frequency) * 8,
        os.SEEK_SET)

    # Reading and preparing block of data (3 periods)
    data = np.fromfile(data_file,
                       dtype=np.float64,
                       count=spectra_to_read * len(frequency))
    data_file.close()

    data = np.reshape(data, [len(frequency), spectra_to_read], order='F')

    # Read data file header from initial file
    with open(filepath, 'rb') as file:
        file_header = file.read(1024)

    # Create binary file with the header and pulsar one or two periods data
    dat_file_name = 'Single_pulse_' + filename
    file_data = open(result_path + '/' + dat_file_name, 'wb')
    file_data.write(file_header)
    del file_header
    # Prepare data to save to the file
    temp = data.transpose().copy(order='C')
    file_data.write(np.float64(temp))
    del temp
    file_data.close()

    # Time line
    fig_time_scale = timeline[(period_number - 1) *
                              spectra_per_period:(period_number - 1 +
                                                  spectra_to_read) *
                              spectra_per_period]

    # Prepared code to save timeline to a file, but as the timeline is wrong comented them temporarily
    # # Creating and filling a new timeline TXT file for results
    # new_tl_file_name = dat_file_name.split('_Data_', 1)[0] + '_Timeline.txt'
    # new_tl_file = open(result_path + '/' + new_tl_file_name, 'w')
    # # Saving time data to new file
    # for j in range(len(fig_time_scale)):
    #     new_tl_file.write((fig_time_scale[j][:]) + '')
    # new_tl_file.close()

    # Logging data for figure
    data = 10 * np.log10(data)

    # Normalizing data
    data = data - np.mean(data)

    # Making result picture
    fig = plt.figure(figsize=(9.2, 4.5))
    rc('font', size=5, weight='bold')
    ax2 = fig.add_subplot(111)
    ax2.set_title('File: ' + filename + '  Description: ' + df_description +
                  '  Resolution: ' + str(np.round(df / 1000, 3)) +
                  ' kHz and ' + str(np.round(time_resolution * 1000, 3)) +
                  ' ms.',
                  fontsize=5,
                  fontweight='bold')
    ax2.imshow(
        np.flipud(data),
        aspect='auto',
        cmap=colormap,
        vmin=spectrum_pic_min,
        vmax=spectrum_pic_max,
        extent=[0, data.shape[1], frequency[0] + 16.5,
                frequency[-1] + 16.5])  # len(profile)
    ax2.set_xlabel('Time UTC (at the lowest frequency), HH:MM:SS.ms',
                   fontsize=6,
                   fontweight='bold')
    ax2.set_ylabel('Frequency, MHz', fontsize=6, fontweight='bold')
    text = ax2.get_xticks().tolist()
    for i in range(len(text) - 1):
        k = int(text[i])
        text[i] = fig_time_scale[k][11:23]
    ax2.set_xticklabels(text, fontsize=5, fontweight='bold')
    fig.subplots_adjust(hspace=0.05, top=0.91)
    fig.suptitle('Extracted single pulse of ' + pulsar_name + ' (DM: ' +
                 str(DM) + r' $\mathrm{pc \cdot cm^{-3}}$' + ', Period: ' +
                 str(p_bar) + ' s.)',
                 fontsize=7,
                 fontweight='bold')
    fig.text(0.80,
             0.04,
             'Processed ' + current_date + ' at ' + current_time,
             fontsize=3,
             transform=plt.gcf().transFigure)
    fig.text(0.09,
             0.04,
             'Software version: ' + software_version +
             ', [email protected], IRA NASU',
             fontsize=3,
             transform=plt.gcf().transFigure)
    pylab.savefig(result_path + '/' + 'Single_pulse_' + filename[:-4] + '.png',
                  bbox_inches='tight',
                  dpi=customDPI)
    plt.close('all')

    return result_path, 'Single_pulse_' + filename, filename + 'Single_pulse_' + filename[:
                                                                                          -4] + '.png'
Ejemplo n.º 11
0
    decision = int(
        input(
            '* Enter "1" to start processing, or "0" to stop the script:     ')
    )
    if decision != 1:
        sys.exit('\n\n\n              ***  Program stopped! *** \n\n\n')

# To print in console the header of first file
print('\n  First file header parameters: \n')

# *** Data file header read ***
[
    df_filename, df_filesize, df_system_name, df_obs_place, df_description,
    CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes,
    fmin, fmax, df, frequency, FreqPointsNum, data_block_size
] = FileHeaderReaderJDS(source_directory + fileList[0], 0, 1)

# CLCfrq = 80

# Main loop by files start
for fileNo in range(len(fileList)):  # loop by files

    # *** Opening datafile ***
    fname = source_directory + fileList[fileNo]

    # *********************************************************************************

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
Ejemplo n.º 12
0
def jds_wf_simple_reader(directory, no_of_spectra_to_average, skip_data_blocks,
                         VminNorm, VmaxNorm, colormap, custom_dpi,
                         save_long_file_aver, dyn_spectr_save_init,
                         dyn_spectr_save_norm):
    """
    Does not seem to work better or faster, takes a lot of RAM (32 GB) but works
    Is not used in any other scripts and is more a demonstration
    The same functions in non-fast file works approximately the same time but consumes less memory
    The only advantage of this function is reading the whole file at once
    """

    current_time = time.strftime("%H:%M:%S")
    current_date = time.strftime("%d.%m.%Y")

    # *** Creating a folder where all pictures and results will be stored (if it doesn't exist) ***
    # result_folder = 'RESULTS_JDS_waveform_' + directory.split('/')[-2]
    result_folder = 'RESULTS_JDS_waveform'
    if not os.path.exists(result_folder):
        os.makedirs(result_folder)
    if dyn_spectr_save_init == 1:
        initial_spectra_folder = result_folder + '/Initial spectra'
        if not os.path.exists(initial_spectra_folder):
            os.makedirs(initial_spectra_folder)

    # *** Search JDS files in the directory ***

    file_list = find_files_only_in_current_folder(directory, '.jds', 1)
    print('')

    if len(
            file_list
    ) > 1:  # Check if files have same parameters if there are more then one file in list
        # Check if all files (except the last) have same size
        same_or_not = check_if_all_files_of_same_size(directory, file_list, 1)

        # Check if all files in this folder have the same parameters in headers
        equal_or_not = check_if_JDS_files_of_equal_parameters(
            directory, file_list)

        if same_or_not and equal_or_not:
            print(
                '\n\n\n        :-)  All files seem to be of the same parameters!  :-) \n\n\n'
            )
        else:
            print(
                '\n\n\n ************************************************************************************* '
            )
            print(
                ' *                                                                                   *'
            )
            print(
                ' *   Seems files in folders are different check the errors and restart the script!   *'
            )
            print(
                ' *                                                                                   *  '
                '\n ************************************************************************************* \n\n\n'
            )

            decision = int(
                input(
                    '* Enter "1" to start processing, or "0" to stop the script:     '
                ))
            if decision != 1:
                sys.exit(
                    '\n\n\n              ***  Program stopped! *** \n\n\n')

    # To print in console the header of first file
    print('\n  First file header parameters: \n')

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency, freq_points_num, data_block_size
    ] = FileHeaderReaderJDS(directory + file_list[0], 0, 1)

    # Main loop by files start
    for fileNo in range(len(file_list)):  # loop by files

        # *** Opening datafile ***
        fname = directory + file_list[fileNo]

        # *********************************************************************************

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode,
            Mode, Navr, TimeRes, fmin, fmax, df, frequency, freq_points_num,
            data_block_size
        ] = FileHeaderReaderJDS(fname, 0, 0)

        # Create long data files and copy first data file header to them
        if fileNo == 0 and save_long_file_aver == 1:

            with open(fname, 'rb') as file:
                # *** Data file header read ***
                file_header = file.read(1024)

            # *** Creating a name for long timeline TXT file ***
            TLfile_name = df_filename + '_Timeline.txt'
            TLfile = open(
                TLfile_name,
                'w')  # Open and close to delete the file with the same name
            TLfile.close()

            # *** Creating a binary file with data for long data storage ***
            file_data_A_name = df_filename + '_Data_chA.dat'
            file_data_A = open(file_data_A_name, 'wb')
            file_data_A.write(file_header)
            file_data_A.seek(574)  # FFT size place in header
            file_data_A.write(np.int32(data_block_size).tobytes())
            file_data_A.seek(624)  # Lb place in header
            file_data_A.write(np.int32(0).tobytes())
            file_data_A.seek(628)  # Hb place in header
            file_data_A.write(np.int32(data_block_size / 2).tobytes())
            file_data_A.seek(632)  # Wb place in header
            file_data_A.write(np.int32(data_block_size / 2).tobytes())
            file_data_A.seek(636)  # Navr place in header
            file_data_A.write(
                bytes([np.int32(Navr * no_of_spectra_to_average)]))
            file_data_A.close()

            if Channel == 2:
                file_data_B_name = df_filename + '_Data_chB.dat'
                file_data_B = open(file_data_B_name, 'wb')
                file_data_B.write(file_header)
                file_data_B.seek(574)  # FFT size place in header
                file_data_B.write(np.int32(data_block_size).tobytes())
                file_data_B.seek(624)  # Lb place in header
                file_data_B.write(np.int32(0).tobytes())
                file_data_B.seek(628)  # Hb place in header
                file_data_B.write(np.int32(data_block_size / 2).tobytes())
                file_data_B.seek(632)  # Wb place in header
                file_data_B.write(np.int32(data_block_size / 2).tobytes())
                file_data_B.seek(636)  # Navr place in header
                file_data_B.write(
                    bytes([np.int32(Navr * no_of_spectra_to_average)]))
                file_data_B.close()

            del file_header

        # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!!

        # Manually set frequencies for one channel mode

        if (Channel == 0 and int(CLCfrq / 1000000)
                == 66) or (Channel == 1 and int(CLCfrq / 1000000) == 66):
            freq_points_num = 8192
            frequency = np.linspace(0.0, 33.0, freq_points_num)

        # Manually set frequencies for two channels mode
        if Channel == 2 or (Channel == 0 and int(CLCfrq / 1000000) == 33) or (
                Channel == 1 and int(CLCfrq / 1000000) == 33):
            freq_points_num = 8192
            frequency = np.linspace(16.5, 33.0, freq_points_num)
        # For new receiver (temporary):
        if Channel == 2 and int(CLCfrq / 1000000) == 80:
            freq_points_num = 8192
            frequency = np.linspace(0.0, 40.0, freq_points_num)

        # Calculation of number of blocks and number of spectra in the file
        if Channel == 0 or Channel == 1:  # Single channel mode
            no_of_av_spectra_per_file = (df_filesize - 1024) / (
                2 * data_block_size * no_of_spectra_to_average)
        else:  # Two channels mode
            no_of_av_spectra_per_file = (df_filesize - 1024) / (
                4 * data_block_size * no_of_spectra_to_average)

        no_of_blocks_in_file = (df_filesize - 1024) / data_block_size

        no_of_av_spectra_per_file = int(no_of_av_spectra_per_file)
        fine_CLCfrq = (int(CLCfrq / 1000000.0) * 1000000.0)

        # Real time resolution of averaged spectra
        real_av_spectra_dt = (1 / fine_CLCfrq) * (data_block_size -
                                                  4) * no_of_spectra_to_average

        if fileNo == 0:
            print(' Number of blocks in file:             ',
                  no_of_blocks_in_file)
            print(' Number of spectra to average:         ',
                  no_of_spectra_to_average)
            print(' Number of averaged spectra in file:   ',
                  no_of_av_spectra_per_file)
            print(' Time resolution of averaged spectrum: ',
                  round(real_av_spectra_dt * 1000, 3), ' ms.')
            print('\n  *** Reading data from file *** \n')

        # *******************************************************************************
        #                           R E A D I N G   D A T A                             *
        # *******************************************************************************

        with open(fname, 'rb') as file:
            file.seek(
                1024 + data_block_size * 4 *
                skip_data_blocks)  # Jumping to 1024 byte from file beginning

            # *** DATA READING process ***

            # !!! Fake timing. Real timing to be done!!!
            TimeFigureScaleFig = np.linspace(0, no_of_av_spectra_per_file,
                                             no_of_av_spectra_per_file + 1)
            for i in range(no_of_av_spectra_per_file):
                TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i])

            TimeScaleFig = []
            TimeScaleFull = []

            # Calculation of number of blocks and number of spectra in the file
            if Channel == 0 or Channel == 1:  # Single channel mode
                no_of_spectra_in_file = int(
                    (df_filesize - 1024) / (1 * 2 * data_block_size))
            else:  # Two channels mode
                no_of_spectra_in_file = int(
                    (df_filesize - 1024) / (1 * 4 * data_block_size))

            no_of_av_spectra_per_file = 1

            # Reading and reshaping all data with time data
            if Channel == 0 or Channel == 1:  # Single channel mode
                wf_data = np.fromfile(file,
                                      dtype='i2',
                                      count=no_of_spectra_in_file *
                                      data_block_size)
                wf_data = np.reshape(wf_data,
                                     [data_block_size, no_of_spectra_in_file],
                                     order='F')
            if Channel == 2:  # Two channels mode
                wf_data = np.fromfile(file,
                                      dtype='i2',
                                      count=2 * no_of_spectra_in_file *
                                      data_block_size)
                wf_data = np.reshape(
                    wf_data, [data_block_size, 2 * no_of_spectra_in_file],
                    order='F')

            print('Waveform read, shape: ', wf_data.shape)

            # Timing
            timeline_block_str = jds_waveform_time(wf_data, CLCfrq,
                                                   data_block_size)
            timeline_block_str = timeline_block_str[0::16]
            for j in range(len(timeline_block_str)):
                TimeScaleFig.append(timeline_block_str[j][0:12])
                TimeScaleFull.append(df_creation_timeUTC[0:10] + ' ' +
                                     timeline_block_str[j][0:12])

            # Nulling the time blocks in waveform data
            wf_data[data_block_size - 4:data_block_size, :] = 0

            # Scaling of the data - seems to be wrong in absolute value
            wf_data = wf_data / 32768.0

            if Channel == 0 or Channel == 1:  # Single channel mode
                wf_data_chA = wf_data  # All the data is channel A data
                del wf_data  # Deleting unnecessary array to free the memory

            if Channel == 2:  # Two channels mode

                # Resizing to obtain the matrix for separation of channels
                wf_data_new = np.zeros(
                    (2 * data_block_size, no_of_spectra_in_file))
                for i in range(2 * no_of_spectra_in_file):
                    if i % 2 == 0:
                        wf_data_new[0:data_block_size,
                                    int(i / 2)] = wf_data[:, i]  # Even
                    else:
                        wf_data_new[data_block_size:2 * data_block_size,
                                    int(i / 2)] = wf_data[:, i]  # Odd
                del wf_data  # Deleting unnecessary array to free the memory

                # Separating the data into two channels
                wf_data_chA = np.zeros(
                    (data_block_size,
                     no_of_spectra_in_file))  # Preparing empty array
                wf_data_chB = np.zeros(
                    (data_block_size,
                     no_of_spectra_in_file))  # Preparing empty array
                wf_data_chA[:, :] = wf_data_new[0:(
                    2 * data_block_size):2, :]  # Separation to channel A
                wf_data_chB[:, :] = wf_data_new[1:(
                    2 * data_block_size):2, :]  # Separation to channel B
                del wf_data_new

            print('Before transpose, shape: ', wf_data_chA.shape)

            # preparing matrices for spectra
            wf_data_chA = np.transpose(wf_data_chA)
            spectra_ch_a = np.zeros_like(wf_data_chA)
            if Channel == 2:
                wf_data_chB = np.transpose(wf_data_chB)
                spectra_ch_b = np.zeros_like(wf_data_chB)

            print('After transpose, shape: ', wf_data_chA.shape)

            # Calculation of spectra
            spectra_ch_a[:] = np.power(np.abs(np.fft.fft(wf_data_chA[:])), 2)
            if Channel == 2:  # Two channels mode
                spectra_ch_b[:] = np.power(np.abs(np.fft.fft(wf_data_chB[:])),
                                           2)

            print('After fft, spectrum shape: ', spectra_ch_a.shape)

            # Storing only first (left) mirror part of spectra
            spectra_ch_a = spectra_ch_a[:, :int(data_block_size / 2)]
            if Channel == 2:
                spectra_ch_b = spectra_ch_b[:, :int(data_block_size / 2)]

            print('After fft cut, spectrum shape: ', spectra_ch_a.shape)

            # At 33 MHz the specter is usually upside down, to correct it we use flip up/down
            if int(CLCfrq / 1000000) == 33:
                spectra_ch_a = np.fliplr(spectra_ch_a)
                if Channel == 2:
                    spectra_ch_b = np.fliplr(spectra_ch_b)

            # Deleting the unnecessary matrices
            del wf_data_chA
            if Channel == 2:
                del wf_data_chB

            # Dimensions of [data_block_size / 2, no_of_spectra_in_file]

            # Calculation the averaged spectrum
            print('Shape before averaging: ', spectra_ch_a.shape)
            spectra_ch_a = np.reshape(spectra_ch_a, [
                int(no_of_spectra_in_file / no_of_spectra_to_average),
                no_of_spectra_to_average,
                int(data_block_size / 2)
            ],
                                      order='F')

            spectra_ch_a = spectra_ch_a.mean(axis=1)[:]

            print('Shape after averaging: ', spectra_ch_a.shape)

            if Channel == 2:
                spectra_ch_b = np.reshape(spectra_ch_b, [
                    int(no_of_spectra_in_file / no_of_spectra_to_average),
                    no_of_spectra_to_average,
                    int(data_block_size / 2)
                ],
                                          order='F')

                spectra_ch_b = spectra_ch_b.mean(axis=1)[:]

        file.close()  # Close the data file

        # Saving averaged spectra to a long data files
        if save_long_file_aver == 1:
            temp = spectra_ch_a.copy(order='C')
            file_data_A = open(file_data_A_name, 'ab')
            file_data_A.write(temp)
            file_data_A.close()
            if Channel == 2:
                temp = spectra_ch_b.copy(order='C')
                file_data_B = open(file_data_B_name, 'ab')
                file_data_B.write(temp)
                file_data_B.close()

            # Saving time data to long timeline file
            with open(TLfile_name, 'a') as TLfile:
                for i in range(no_of_av_spectra_per_file):
                    TLfile.write((TimeScaleFull[i][:]) + ' \n')  # str

        # Log data (make dB scale)
        with np.errstate(invalid='ignore', divide='ignore'):
            spectra_ch_a = 10 * np.log10(spectra_ch_a)
            if Channel == 2:
                spectra_ch_b = 10 * np.log10(spectra_ch_b)

        # If the data contains minus infinity values change them to particular values
        spectra_ch_a[np.isinf(spectra_ch_a)] = 40
        if Channel == 2:
            spectra_ch_b[np.isinf(spectra_ch_b)] = 40

        # *******************************************************************************
        #             P L O T T I N G    D Y N A M I C    S P E C T R A                 *
        # *******************************************************************************

        spectra_ch_a = np.transpose(spectra_ch_a)
        if Channel == 2:
            spectra_ch_b = np.transpose(spectra_ch_b)

        no_of_av_spectra_per_file = spectra_ch_a.shape[1]

        if dyn_spectr_save_init == 1:

            # Plot of initial dynamic spectra
            VminA = np.min(spectra_ch_a)
            VmaxA = np.max(spectra_ch_a)
            VminB = VminA
            VmaxB = VmaxA
            if Channel == 2:
                VminB = np.min(spectra_ch_b)
                VmaxB = np.max(spectra_ch_b)

            if Channel == 0 or Channel == 1:  # Single channel mode
                spectra_ch_b = spectra_ch_a

            suptitle = ('Dynamic spectrum (initial) ' + str(df_filename) +
                        ' - Fig. ' + str(1) + ' of ' + str(1) +
                        '\n Initial parameters: dt = ' +
                        str(round(TimeRes * 1000., 3)) + ' ms, df = ' +
                        str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                        str(df_system_name) + ', Place: ' + str(df_obs_place) +
                        '\n' + ReceiverMode + ', Fclock = ' +
                        str(round(CLCfrq / 1000000, 1)) +
                        ' MHz, Avergaed spectra: ' +
                        str(no_of_spectra_to_average) + ' (' +
                        str(round(no_of_spectra_to_average * TimeRes, 3)) +
                        ' sec.), Description: ' + str(df_description))

            fig_file_name = (initial_spectra_folder + '/' + df_filename[0:14] +
                             ' Initial dynamic spectrum fig.' + str(0 + 1) +
                             '.png')

            if Channel == 0 or Channel == 1:  # Single channel mode
                OneDynSpectraPlot(spectra_ch_a, VminA, VmaxA, suptitle,
                                  'Intensity, dB', no_of_av_spectra_per_file,
                                  TimeScaleFig, frequency, freq_points_num,
                                  colormap, 'UTC Time, HH:MM:SS.msec',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)

            if Channel == 2:
                TwoDynSpectraPlot(spectra_ch_a, spectra_ch_b, VminA, VmaxA,
                                  VminB, VmaxB, suptitle, 'Intensity, dB',
                                  'Intensity, dB', no_of_av_spectra_per_file,
                                  TimeScaleFig, TimeScaleFig, frequency,
                                  freq_points_num, colormap, 'Channel A',
                                  'Channel B', fig_file_name, current_date,
                                  current_time, software_version, custom_dpi)

        if dyn_spectr_save_norm == 1:

            # Normalization and cleaning of data

            Normalization_dB(spectra_ch_a.transpose(), freq_points_num,
                             no_of_av_spectra_per_file)
            if Channel == 2:
                Normalization_dB(spectra_ch_b.transpose(), freq_points_num,
                                 no_of_av_spectra_per_file)

            simple_channel_clean(spectra_ch_a, 8)
            if Channel == 2:
                simple_channel_clean(spectra_ch_b, 8)

            # Plot of normalized and cleaned dynamic spectra

            suptitle = ('Normalized and cleaned dynamic spectrum (initial) ' +
                        str(df_filename) + ' - Fig. ' + str(0 + 1) + ' of ' +
                        str(1) + '\n Initial parameters: dt = ' +
                        str(round(TimeRes * 1000, 3)) + ' ms, df = ' +
                        str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                        str(df_system_name) + ', Place: ' + str(df_obs_place) +
                        '\n' + ReceiverMode + ', Fclock = ' +
                        str(round(CLCfrq / 1000000, 1)) +
                        ' MHz, Avergaed spectra: ' +
                        str(no_of_spectra_to_average) + ' (' +
                        str(round(no_of_spectra_to_average * TimeRes, 3)) +
                        ' sec.), Description: ' + str(df_description))

            fig_file_name = (result_folder + '/' + df_filename[0:14] +
                             ' Normalized and cleaned dynamic spectrum fig.' +
                             str(0 + 1) + '.png')

            if Channel == 0 or Channel == 1:  # Single channel mode
                OneDynSpectraPlot(spectra_ch_a, VminNorm, VmaxNorm, suptitle,
                                  'Intensity, dB', no_of_av_spectra_per_file,
                                  TimeScaleFig, frequency, freq_points_num,
                                  colormap, 'UTC Time, HH:MM:SS.msec',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)
            if Channel == 2:
                TwoDynSpectraPlot(spectra_ch_a, spectra_ch_b, VminNorm,
                                  VmaxNorm, VminNorm, VmaxNorm, suptitle,
                                  'Intensity, dB', 'Intensity, dB',
                                  no_of_av_spectra_per_file, TimeScaleFig,
                                  TimeScaleFig, frequency, freq_points_num,
                                  colormap, 'Channel A', 'Channel B',
                                  fig_file_name, current_date, current_time,
                                  software_version, custom_dpi)

    results_files_list = []
    results_files_list.append(file_data_A_name)
    if Channel == 2:
        results_files_list.append(file_data_B_name)

    return results_files_list
def wf32_two_channel_phase_calibration(fname, no_of_points_for_fft_dedisp,
                                       no_of_spectra_in_bunch,
                                       phase_calibr_txt_file):
    """
    function reads waveform data in wf32 format, makes FFT, cuts the symmetrical half of the spectra and
    multiplies complex data by phase calibration data read from txt file. Then a symmetrcal part of spectra
    are made and joined to the shifted one, inverse FFT as applied and data are stored in waveform wf32 format
    Input parameters:
        fname -                         name of file with initial wf32 data
        no_of_points_for_fft_dedisp -   number of waveform data points to use for FFT
        phase_calibr_txt_file -         txt file with phase calibration data
    Output parameters:
        file_data_name -                name of file with calibrated data
    """

    # Rename the data file to make the new data file of the same name as initial one
    non_calibrated_fname = fname[:-5] + '_without_phase_calibration' + '.wf32'
    calibrated_fname = fname
    print('\n  Phase calibration of one channel \n')
    print('  Old filename of initial file:  ', calibrated_fname)
    print('  New filename of initial file:  ', non_calibrated_fname)

    os.rename(calibrated_fname, non_calibrated_fname)

    #  *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        clock_freq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        time_resolution, fmin, fmax, df, frequency_list, freq_points_num,
        data_block_size
    ] = FileHeaderReaderJDS(non_calibrated_fname, 0, 0)

    # Read phase calibration txt file
    phase_calibr_file = open(phase_calibr_txt_file, 'r')
    phase_vs_freq = []
    for line in phase_calibr_file:
        phase_vs_freq.append(np.float(line))
    phase_calibr_file.close()

    fig = plt.figure(figsize=(9, 5))
    ax1 = fig.add_subplot(111)
    ax1.plot(phase_vs_freq,
             linestyle='-',
             linewidth='1.00',
             label='Phase to add')
    ax1.legend(loc='upper right', fontsize=6)
    ax1.grid(b=True, which='both', color='silver', linestyle='-')
    ax1.set_ylabel('Phase, a.u.', fontsize=6, fontweight='bold')
    pylab.savefig('00_Phase to add.png', bbox_inches='tight', dpi=160)
    plt.close('all')

    # Converting phase to complex numbers
    cmplx_phase = np.zeros((len(phase_vs_freq)), dtype=np.complex)
    for i in range(len(phase_vs_freq)):
        cmplx_phase[i] = np.cos(
            phase_vs_freq[i]) + 1j * np.sin(phase_vs_freq[i])

    # Create long data files and copy first data file header to them
    non_calibr_file_data = open(non_calibrated_fname, 'rb')
    file_header = non_calibr_file_data.read(1024)

    # *** Creating a binary file with data for long data storage ***
    calibr_file_data = open(calibrated_fname, 'wb')
    calibr_file_data.write(file_header)
    calibr_file_data.close()
    del file_header

    # Calculation of number of blocks and number of spectra in the file
    no_of_spectra_per_file = int(
        (df_filesize - 1024) / (no_of_points_for_fft_dedisp * 4))
    no_of_bunches_per_file = math.ceil(no_of_spectra_per_file /
                                       no_of_spectra_in_bunch)
    print('  Number of spectra in bunch:    ', no_of_spectra_in_bunch)
    print('  Number of batches per file:    ', no_of_bunches_per_file, '')
    print('  Number of spectra per file:    ', no_of_spectra_per_file, '\n')

    non_calibr_file_data.seek(1024)  # Jumping to 1024 byte from file beginning

    bar = IncrementalBar(' Phase calibration of the file: ',
                         max=no_of_bunches_per_file - 1,
                         suffix='%(percent)d%%')

    for bunch in range(no_of_bunches_per_file):

        if bunch < no_of_bunches_per_file - 1:
            pass
        else:
            no_of_spectra_in_bunch = no_of_spectra_per_file - bunch * no_of_spectra_in_bunch
            # print(' Last bunch ', bunch, ', spectra in bunch: ', no_of_spectra_in_bunch)

        bar.next()

        # Reading and reshaping all data with time data
        wf_data = np.fromfile(non_calibr_file_data,
                              dtype='f4',
                              count=no_of_spectra_in_bunch *
                              no_of_points_for_fft_dedisp)

        wf_data = np.reshape(
            wf_data, [no_of_points_for_fft_dedisp, no_of_spectra_in_bunch],
            order='F')

        # preparing matrices for spectra
        spectra = np.zeros(
            (no_of_points_for_fft_dedisp, no_of_spectra_in_bunch),
            dtype='complex64')

        # Calculation of spectra
        for i in range(no_of_spectra_in_bunch):
            spectra[:, i] = np.fft.fft(wf_data[:, i])
        del wf_data

        # Add phase to the data (multiply by complex number)
        for i in range(no_of_spectra_in_bunch):
            spectra[:, i] = spectra[:, i] * cmplx_phase[:]

        # Preparing array for new waveform
        wf_data = np.zeros(
            (no_of_points_for_fft_dedisp, no_of_spectra_in_bunch))

        # Making IFFT
        for i in range(no_of_spectra_in_bunch):
            wf_data[:, i] = np.real(np.fft.ifft(spectra[:, i]))
        del spectra

        # Reshaping the waveform to single dimension (real)
        wf_data = np.reshape(
            wf_data, [no_of_points_for_fft_dedisp * no_of_spectra_in_bunch, 1],
            order='F')

        # Saving waveform data to wf32 file
        calibr_file_data = open(calibrated_fname, 'ab')
        calibr_file_data.write(np.float32(wf_data).transpose().copy(order='C'))
        calibr_file_data.close()

    bar.finish()

    return
    '\x00')  # Initial data file name
file.close()

receiver_type = df_filename[-4:]

# Reading file header to obtain main parameters of the file
if receiver_type == '.adr':
    [TimeRes, fmin, fmax, df, frequency_list,
     FFTsize] = FileHeaderReaderADR(data_filename, 0)

if receiver_type == '.jds':
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, SpInFile, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency_list, FFTsize, dataBlockSize
    ] = FileHeaderReaderJDS(data_filename, 0, 1)

#************************************************************************************
#                            R E A D I N G   D A T A                                *
#************************************************************************************
num_frequencies = len(frequency_list)

# Calculating number of samples per period and number of blocks
samples_per_period = int(np.ceil(pulsar_period / TimeRes))
num_of_blocks = int(np.floor(SpInFile / samples_per_period))

print(' Number of samples per period:  ', samples_per_period, ' \n')
print(' Number of blocks in file:      ', num_of_blocks, ' \n')

if receiver_type == '.jds':
    num_frequencies_initial = len(frequency_list) - 4
def cut_needed_time_points_from_dat_to_txt(path, filename):
    import matplotlib
    matplotlib.use('TkAgg')

    """
    Function to cut the part of pulsar period data with the pulse from dat file to the txt file
    User is asked to specify the start and stop point of the selected pulse period.
    Returns the start and stop point numbers, the data and image are saved to HDD
    """
    software_version = '2021.08.08'
    current_time = time.strftime("%H:%M:%S")
    current_date = time.strftime("%d.%m.%Y")

    data_file = open(path + '/' + filename, 'rb')
    df_filepath = data_file.read(32).decode('utf-8').rstrip('\x00')  # Initial data file name
    data_file.close()

    if df_filepath[-4:] == '.jds':  # If data obtained from DSPZ receiver

        [df_filepath, df_filesize, df_system_name, df_obs_place, df_description,
         CLCfrq, df_creation_timeUTC, SpInFile, ReceiverMode, Mode, Navr, time_resolution, fmin, fmax,
         df, frequency, freq_points_num, dataBlockSize] = FileHeaderReaderJDS(path + '/' + filename, 0, 0)

    spectra_to_read = int((df_filesize - 1024) / (8 * freq_points_num))

    data_file = open(path + '/' + filename, 'rb')
    data_file.seek(1024, os.SEEK_SET)
    array = np.fromfile(data_file, dtype=np.float64, count=spectra_to_read * freq_points_num)
    array = np.reshape(array, [freq_points_num, spectra_to_read], order='F')
    data_file.close()
    print('  Shape of the array from dat file: ', array.shape)

    # Logging data
    array = 10 * np.log10(array)

    # Normalizing data
    array = array - np.mean(array)

    # Show the pulse profile to select the start and end points of the cut
    fig = plt.figure(figsize=(16.0, 9.0))
    ax1 = fig.add_subplot(111)
    ax1.plot(np.mean(array, axis=0), linewidth='0.50')
    ax1.set_xlim(xmin=0, xmax=array.shape[1])
    ax1.tick_params(axis='both', which='major', labelsize=10)
    ax1.set_xlabel('Time points to select', fontsize=8, fontweight='bold')
    ax1.set_ylabel('Amplitude, a.u.', fontsize=8, fontweight='bold')
    plt.show()

    # Enter the points from the keyboard
    start_point = int(input('\n  Begin from point:                  '))
    end_point = int(input('  End at point:                      '))

    result_array = array[:, start_point: end_point]
    del array
    print('\n  Shape of result array:            ', result_array.shape)

    # Save cut data to the new txt file
    single_pulse_txt = open(path + '/Extracted_s' + filename[1:-4] + '.txt', "w")
    for freq in range(result_array.shape[0] - 1):
        single_pulse_txt.write(' '.join('  {:+12.7E}'.format(result_array[freq, i]) \
                                        for i in range(result_array.shape[1])) + ' \n')
    single_pulse_txt.close()

    # Making result figure with dynamic spectra and profiles in full band and split in subbands
    fig = plt.figure(figsize=(16.0, 8.0))
    gs = GridSpec(3, 5, figure=fig)
    rc('font', size=8, weight='bold')

    ax1 = fig.add_subplot(gs[0:2, 0])
    ax1.set_title('Full band, points: ' + str(start_point) + ' - ' + str(end_point), fontsize=8, fontweight='bold')
    ax1.imshow(np.flipud(result_array), aspect='auto', cmap=colormap, vmin=spectrum_pic_min, vmax=spectrum_pic_max,
               extent=[0, result_array.shape[1], 16.5, 33.0])
    ax1.xaxis.set_ticklabels([])
    ax1.set_ylabel('Frequency, MHz', fontsize=10, fontweight='bold')
    ax2 = fig.add_subplot(gs[2, 0])
    ax2.plot(np.mean(result_array, axis=0), linewidth='0.50', color='C1')
    ax2.set_xlim(xmin=0, xmax=result_array.shape[1])
    ax2.set_xlabel('Time points', fontsize=10, fontweight='bold')
    ax2.set_ylabel('Amplitude, a.u.', fontsize=10, fontweight='bold')

    ax3 = fig.add_subplot(gs[0:2, 1])
    ax3.set_title('16.5 - 20.5 MHz', fontsize=8, fontweight='bold')
    ax3.imshow(np.flipud(result_array[0:1986]), aspect='auto', cmap=colormap, vmin=spectrum_pic_min,
               vmax=spectrum_pic_max, extent=[0, result_array.shape[1], 16.5, 20.5])
    ax3.xaxis.set_ticklabels([])
    ax4 = fig.add_subplot(gs[2, 1])
    ax4.plot(np.mean(result_array[0:1986], axis=0), linewidth='0.50', color='C4')
    ax4.set_xlim(xmin=0, xmax=result_array.shape[1])
    ax4.set_yticklabels([])
    ax4.set_yticks([])
    ax4.set_xlabel('Time points', fontsize=10, fontweight='bold')

    ax5 = fig.add_subplot(gs[0:2, 2])
    ax5.set_title('20.5 - 24.5 MHz', fontsize=8, fontweight='bold')
    ax5.imshow(np.flipud(result_array[1986:3972]), aspect='auto', cmap=colormap, vmin=spectrum_pic_min,
               vmax=spectrum_pic_max, extent=[0, result_array.shape[1], 20.5, 24.5])
    ax5.xaxis.set_ticklabels([])
    ax6 = fig.add_subplot(gs[2, 2])
    ax6.plot(np.mean(result_array[1986:3972], axis=0), linewidth='0.50', color='C4')
    ax6.set_xlim(xmin=0, xmax=result_array.shape[1])
    ax6.set_yticklabels([])
    ax6.set_yticks([])
    ax6.set_xlabel('Time points', fontsize=10, fontweight='bold')

    ax7 = fig.add_subplot(gs[0:2, 3])
    ax7.set_title('24.5 - 28.5 MHz', fontsize=8, fontweight='bold')
    ax7.imshow(np.flipud(result_array[3972:5958]), aspect='auto', cmap=colormap, vmin=spectrum_pic_min, vmax=spectrum_pic_max,
               extent=[0, result_array.shape[1], 24.5, 28.5])
    ax7.xaxis.set_ticklabels([])
    ax8 = fig.add_subplot(gs[2, 3])
    ax8.plot(np.mean(result_array[3972:5958], axis=0), linewidth='0.50', color='C4')
    ax8.set_xlim(xmin=0, xmax=result_array.shape[1])
    ax8.set_yticklabels([])
    ax8.set_yticks([])
    ax8.set_xlabel('Time points', fontsize=10, fontweight='bold')

    ax9 = fig.add_subplot(gs[0:2, 4])
    ax9.set_title('28.5 - 32.5 MHz', fontsize=8, fontweight='bold')
    ax9.imshow(np.flipud(result_array[5958:7944]), aspect='auto', cmap=colormap,
               vmin=spectrum_pic_min, vmax=spectrum_pic_max, extent=[0, result_array.shape[1], 28.5, 32.5])
    ax9.xaxis.set_ticklabels([])
    ax10 = fig.add_subplot(gs[2, 4])
    ax10.plot(np.mean(result_array[5958:7944], axis=0), linewidth='0.50', color='C4')
    ax10.set_xlim(xmin=0, xmax=result_array.shape[1])
    ax10.set_yticklabels([])
    ax10.set_yticks([])
    ax10.set_xlabel('Time points', fontsize=10, fontweight='bold')

    fig.subplots_adjust(hspace=0.00, wspace=0.25, top=0.93)
    fig.suptitle('Result pulse cut from ' + df_filepath + ' (' + df_description + '), DM: ' +
                 find_between(filename, 'DM_', '_') + r' $pc * cm^{-3}$', fontsize=10, fontweight='bold')  #
    fig.text(0.80, 0.05, 'Processed ' + current_date + ' at ' + current_time,
             fontsize=5, transform=plt.gcf().transFigure)
    fig.text(0.09, 0.05, 'Software version: '+software_version+', [email protected], IRA NASU',
             fontsize=5, transform=plt.gcf().transFigure)
    pylab.savefig(path + '/Extracted_s' + filename[1:-4] + '.png', bbox_inches='tight', dpi=customDPI)
    plt.close('all')

    # from PIL import Image
    # img = Image.open(path + '/Extracted_s' + filename[1:-4] + '.png')
    # img.show()

    # import sys
    # import subprocess
    #
    # def openImage(path):
    #     imageViewerFromCommandLine = {'linux': 'xdg-open',
    #                                   'win32': 'explorer',
    #                                   'darwin': 'open'}[sys.platform]
    #     subprocess.Popen([imageViewerFromCommandLine, path])
    #
    # openImage(path + '/Extracted_s' + filename[1:-4] + '.png')

    # # Array preprocessing
    #
    # array = np.flipud(result_array[1024:3972])
    # freq_aver_const = 2
    # time_aver_const = 1
    #
    # # Cutting the array ends which are not divisible to average constants
    # array = array[:(array.shape[0] // freq_aver_const) * freq_aver_const,
    #               :(array.shape[1] // time_aver_const) * time_aver_const]
    #
    # # Time averaging
    # if time_aver_const > 1:
    #     array = array.reshape(array.shape[0], -1, time_aver_const)
    #     array = np.mean(array, axis=2)
    #
    # # Frequency averaging
    # if freq_aver_const > 1:
    #     array = np.transpose(array)
    #     array = np.mean(array.reshape(array.shape[0], -1, freq_aver_const), axis=2)
    #     array = np.transpose(array)
    #
    # # Plot of the pulse dynamic spectrum
    # rc('font', size=8, weight='bold')
    # fig = plt.figure(figsize=(5.0, 8.0))
    # ax1 = fig.add_subplot()
    # # ax1.set_title('Full band, points: ', fontsize=8, fontweight='bold')
    # ax1.imshow(array, aspect='auto', cmap='Greys', vmin=-1.0, vmax=10.0,
    #            extent=[0, array.shape[1], 0, array.shape[0]])
    # ax1.set_ylabel('Frequency, MHz', fontsize=10, fontweight='bold')
    # # fig.subplots_adjust(hspace=0.00, wspace=0.25, top=0.93)
    # # fig.suptitle('Result pulse cut for further processing', fontsize=10, fontweight='bold')
    # # fig.text(0.80, 0.05, 'Processed ' + current_date + ' at ' + current_time,
    # #          fontsize=5, transform=plt.gcf().transFigure)
    # # fig.text(0.09, 0.05, 'Software version: '+software_version+', [email protected], IRA NASU',
    # #          fontsize=5, transform=plt.gcf().transFigure)
    # pylab.savefig(path + '/Extracted_s' + filename[1:-4] + '_dynamic_spectrum.png', bbox_inches='tight', dpi=customDPI)
    # plt.close('all')

    return start_point, end_point
def pulsar_period_DM_compensated_pics(common_path, filename, pulsar_name,
                                      normalize_response, profile_pic_min,
                                      profile_pic_max, spectrum_pic_min,
                                      spectrum_pic_max, periods_per_fig,
                                      customDPI, colormap, save_strongest,
                                      threshold):

    current_time = time.strftime("%H:%M:%S")
    current_date = time.strftime("%d.%m.%Y")

    # Creating a folder where all pictures and results will be stored (if it doesn't exist)
    result_path = "RESULTS_pulsar_n_periods_pics_" + filename
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if save_strongest:
        best_result_path = result_path + '/Strongest_pulses'
        if not os.path.exists(best_result_path):
            os.makedirs(best_result_path)

    # Taking pulsar period from catalogue
    pulsar_ra, pulsar_dec, DM, p_bar = catalogue_pulsar(pulsar_name)

    # DAT file to be analyzed:
    filepath = common_path + filename

    # Timeline file to be analyzed:
    timeline_filepath = common_path + filename.split(
        '_Data_')[0] + '_Timeline.txt'

    # Opening DAT datafile
    file = open(filepath, 'rb')

    # Data file header read
    df_filesize = os.stat(filepath).st_size  # Size of file
    df_filepath = file.read(32).decode('utf-8').rstrip(
        '\x00')  # Initial data file name
    file.close()

    if df_filepath[-4:] == '.adr':

        [
            df_filepath, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, ReceiverMode, Mode,
            sumDifMode, NAvr, time_resolution, fmin, fmax, df, frequency,
            FFTsize, SLine, Width, BlockSize
        ] = FileHeaderReaderADR(filepath, 0, 0)

        freq_points_num = len(frequency)

    if df_filepath[-4:] == '.jds':  # If data obtained from DSPZ receiver

        [
            df_filepath, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, time_resolution, fmin, fmax, df,
            frequency, freq_points_num, dataBlockSize
        ] = FileHeaderReaderJDS(filepath, 0, 1)

    # ************************************************************************************
    #                             R E A D I N G   D A T A                                *
    # ************************************************************************************

    # Time line file reading
    timeline, dt_timeline = time_line_file_reader(timeline_filepath)

    # Calculation of the dimensions of arrays to read taking into account the pulsar period
    spectra_in_file = int(
        (df_filesize - 1024) /
        (8 * freq_points_num))  # int(df_filesize - 1024)/(2*4*freq_points_num)
    spectra_to_read = int(
        np.round((periods_per_fig * p_bar / time_resolution), 0))
    num_of_blocks = int(np.floor(spectra_in_file / spectra_to_read))

    print('   Pulsar period:                           ', p_bar, 's.')
    print('   Time resolution:                         ', time_resolution,
          's.')
    print('   Number of spectra to read in', periods_per_fig, 'periods:  ',
          spectra_to_read, ' ')
    print('   Number of spectra in file:               ', spectra_in_file, ' ')
    print('   Number of', periods_per_fig, 'periods blocks in file:      ',
          num_of_blocks, '\n')

    # Data reading and making figures
    print('\n\n  *** Data reading and making figures *** \n\n')

    data_file = open(filepath, 'rb')
    data_file.seek(
        1024, os.SEEK_SET
    )  # Jumping to 1024+number of spectra to skip byte from file beginning

    bar = IncrementalBar('   Making pictures of n periods: ',
                         max=num_of_blocks,
                         suffix='%(percent)d%%')
    bar.start()

    for block in range(num_of_blocks + 1):  # Main loop by blocks of data

        # bar.next()

        # current_time = time.strftime("%H:%M:%S")
        # print(' * Data block # ', block + 1, ' of ', num_of_blocks + 1, '  started at: ', current_time)

        # Reading the last block which is less then 3 periods
        if block == num_of_blocks:
            spectra_to_read = spectra_in_file - num_of_blocks * spectra_to_read

        # Reading and preparing block of data (3 periods)
        data = np.fromfile(data_file,
                           dtype=np.float64,
                           count=spectra_to_read * len(frequency))
        data = np.reshape(data, [len(frequency), spectra_to_read], order='F')
        data = 10 * np.log10(data)
        if normalize_response > 0:
            Normalization_dB(data.transpose(), len(frequency), spectra_to_read)

        # Preparing single averaged data profile for figure
        profile = data.mean(axis=0)[:]
        profile = profile - np.mean(profile)
        data = data - np.mean(data)

        # Time line
        fig_time_scale = timeline[block * spectra_to_read:(block + 1) *
                                  spectra_to_read]

        # Making result picture
        fig = plt.figure(figsize=(9.2, 4.5))
        rc('font', size=5, weight='bold')
        ax1 = fig.add_subplot(211)
        ax1.plot(profile,
                 color=u'#1f77b4',
                 linestyle='-',
                 alpha=1.0,
                 linewidth='0.60',
                 label='3 pulses time profile')
        ax1.legend(loc='upper right', fontsize=5)
        ax1.grid(b=True,
                 which='both',
                 color='silver',
                 linewidth='0.50',
                 linestyle='-')
        ax1.axis([0, len(profile), profile_pic_min, profile_pic_max])
        ax1.set_ylabel('Amplitude, AU', fontsize=6, fontweight='bold')
        ax1.set_title('File: ' + filename + '  Description: ' +
                      df_description + '  Resolution: ' +
                      str(np.round(df / 1000, 3)) + ' kHz and ' +
                      str(np.round(time_resolution * 1000, 3)) + ' ms.',
                      fontsize=5,
                      fontweight='bold')
        ax1.tick_params(axis='x',
                        which='both',
                        bottom=False,
                        top=False,
                        labelbottom=False)
        ax2 = fig.add_subplot(212)
        ax2.imshow(np.flipud(data),
                   aspect='auto',
                   cmap=colormap,
                   vmin=spectrum_pic_min,
                   vmax=spectrum_pic_max,
                   extent=[0, len(profile), frequency[0], frequency[-1]])
        ax2.set_xlabel('Time UTC (at the lowest frequency), HH:MM:SS.ms',
                       fontsize=6,
                       fontweight='bold')
        ax2.set_ylabel('Frequency, MHz', fontsize=6, fontweight='bold')
        text = ax2.get_xticks().tolist()
        for i in range(len(text) - 1):
            k = int(text[i])
            text[i] = fig_time_scale[k][11:23]
        ax2.set_xticklabels(text, fontsize=5, fontweight='bold')
        fig.subplots_adjust(hspace=0.05, top=0.91)
        fig.suptitle('Single pulses of ' + pulsar_name + ' (DM: ' + str(DM) +
                     r' $\mathrm{pc \cdot cm^{-3}}$' + ', Period: ' +
                     str(p_bar) + ' s.), fig. ' + str(block + 1) + ' of ' +
                     str(num_of_blocks + 1),
                     fontsize=7,
                     fontweight='bold')
        fig.text(0.80,
                 0.04,
                 'Processed ' + current_date + ' at ' + current_time,
                 fontsize=3,
                 transform=plt.gcf().transFigure)
        fig.text(0.09,
                 0.04,
                 'Software version: ' + Software_version +
                 ', [email protected], IRA NASU',
                 fontsize=3,
                 transform=plt.gcf().transFigure)
        pylab.savefig(result_path + '/' + filename + ' fig. ' +
                      str(block + 1) + ' - Combined picture.png',
                      bbox_inches='tight',
                      dpi=customDPI)

        # If the profile has points above threshold save picture also into separate folder
        if save_strongest and np.max(profile) > threshold:
            pylab.savefig(best_result_path + '/' + filename + ' fig. ' +
                          str(block + 1) + ' - Combined picture.png',
                          bbox_inches='tight',
                          dpi=customDPI)
        plt.close('all')

        bar.next()

    bar.finish()
    data_file.close()
def check_if_JDS_files_of_equal_parameters(folder_path, file_list):
    '''
    The function checks if main parameters of the ADR files are equal (are they from the same observation)
    Input parameters:
        folder_path - path to folder with files
        file_list - list of files in the folder to check
    Output parameters:
        equal_or_not - "1" if files have equal parameters, "0" - otherwise
    '''
    df_system_name_list = []
    df_obs_place_list = []
    df_description_list = []
    ReceiverMode_list = []
    TimeRes_list = []
    fmin_list = []
    FreqPointsNum_list = []
    dataBlockSize_list = []

    for file_no in range(len(file_list)):
        filepath = folder_path + file_list[file_no]

        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
            FreqPointsNum, dataBlockSize
        ] = FileHeaderReaderJDS(filepath, 0, 0)

        df_system_name_list.append(df_system_name)
        df_obs_place_list.append(df_obs_place)
        df_description_list.append(df_description)
        ReceiverMode_list.append(ReceiverMode)
        TimeRes_list.append(TimeRes)
        fmin_list.append(fmin)
        FreqPointsNum_list.append(FreqPointsNum)
        dataBlockSize_list.append(dataBlockSize)

    i = 0
    if df_system_name_list.count(
            df_system_name_list[0]) == len(df_system_name_list):
        i = i + 1
    if df_obs_place_list.count(df_obs_place_list[0]) == len(df_obs_place_list):
        i = i + 1
    if df_description_list.count(
            df_description_list[0]) == len(df_description_list):
        i = i + 1
    if ReceiverMode_list.count(ReceiverMode_list[0]) == len(ReceiverMode_list):
        i = i + 1
    if TimeRes_list.count(TimeRes_list[0]) == len(TimeRes_list): i = i + 1
    if fmin_list.count(fmin_list[0]) == len(fmin_list): i = i + 1
    if FreqPointsNum_list.count(
            FreqPointsNum_list[0]) == len(FreqPointsNum_list):
        i = i + 1
    if dataBlockSize_list.count(
            dataBlockSize_list[0]) == len(dataBlockSize_list):
        i = i + 1

    if i == 8:
        equal_or_not = 1
        print('   OK: all files have the same parameters!')
    else:
        equal_or_not = 0
        print(
            '\n **********************************************************\n !!!   WARNING: Parameters of files in folder differ    !!! \n **********************************************************'
        )
        print('\n   * Check letteral parameters of the files in list: \n')
        for file_no in range(len(file_list)):
            print('   ', file_no + 1, ') ', df_system_name_list[file_no], '  ',
                  df_obs_place_list[file_no], '  ',
                  df_description_list[file_no])
        print('\n   * Check numerical parameters of the files in list: \n')
        print(
            '   No  DSP mode  Sum/Diff  Time res.   FFT size   Start line   Width  Block size\n'
        )
        for file_no in range(len(file_list)):
            #print('   ',  file_no+1 ,')    ', str(ADRmode_list[file_no]), '     ',str(sumDifMode_list[file_no]), '   ',np.round(TimeRes_list[file_no], 6), '   ', FFT_Size_list[file_no], '  ', SLine_list[file_no], Width_list[file_no], ' ', BlockSize_list[file_no])
            print('  {:0>4d}'.format(file_no + 1), '       ',
                  (ReceiverMode_list[file_no]),
                  '      {:.6f}'.format(np.round(TimeRes_list[file_no], 6)),
                  '       {:1.0f}'.format(fmin_list[file_no]),
                  '         {:1.0f}'.format(FreqPointsNum_list[file_no]),
                  '     {:6.0f}'.format(dataBlockSize_list[file_no]))

    return equal_or_not
def convert_wf32_to_dat_with_overlap(fname, no_of_points_for_fft_spectr,
                                     no_of_spectra_in_bunch, hanning_window):
    """
        function converts waveform data in .wf32 format to spectra in .dat format
        : fname : name of .wf32 file with waveform data
        : no_of_points_for_fft : number of points for FFT to provide necessary time-frequency resolution
        : return : file_data_name - name of .dat file with result spectra
    """

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        clock_freq, df_time_utc, channel, receiver_mode, mode, n_avr,
        time_resolution, fmin, fmax, df, frequency, freq_points_num,
        data_block_size
    ] = FileHeaderReaderJDS(fname, 0, 0)

    freq_points_num = int(no_of_points_for_fft_spectr / 2)

    with open(fname, 'rb') as file:
        # *** Data file header read ***
        file_header = file.read(1024)

        # *** Creating a binary file with spectra data for long data storage ***
        file_data_name = fname[:-5] + '.dat'
        file_data = open(file_data_name, 'wb')
        file_data.write(file_header)
        file_data.seek(574)  # FFT size place in header
        file_data.write(np.int32(no_of_points_for_fft_spectr).tobytes())
        file_data.seek(624)  # Lb place in header
        file_data.write(np.int32(0).tobytes())
        file_data.seek(628)  # Hb place in header
        file_data.write(np.int32(freq_points_num).tobytes())
        file_data.seek(632)  # Wb place in header
        file_data.write(np.int32(freq_points_num).tobytes())
        file_data.seek(636)
        file_data.write(np.int32(1).tobytes())  # Seem to work OK
        file_data.close()
        del file_header

        # Calculation of number of blocks and number of spectra in the file
        no_of_bunches_per_file = int(
            (df_filesize - 1024) /
            ((no_of_spectra_in_bunch + 0.5) * no_of_points_for_fft_spectr * 4))

        # Real time resolution of averaged spectra
        fine_clock_freq = int(clock_freq / 1000000.0) * 1000000.0
        real_spectra_dt = float(no_of_points_for_fft_spectr / fine_clock_freq)
        real_spectra_df = float(
            (fine_clock_freq / 2) / (no_of_points_for_fft_spectr / 2))

        print(' Number of spectra in bunch:                  ',
              no_of_spectra_in_bunch)
        print(' Sampling clock frequency:                    ',
              fine_clock_freq, ' Hz')
        print(' Number of bunches to read in file:           ',
              no_of_bunches_per_file)
        print(' Time resolution of calculated spectra:       ',
              round(real_spectra_dt * 1000, 3), ' ms')
        print(' Frequency resolution of calculated spectra:  ',
              round(real_spectra_df / 1000, 3), ' kHz \n')
        # print('\n  Reading data from file \n')

        file.seek(1024)  # Jumping to 1024 byte from file beginning

        # *** Creating a new timeline TXT file for results ***
        new_tl_file_name = file_data_name.split('_Data_',
                                                1)[0] + '_Timeline.txt'
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name
        new_tl_file.close()

        # *** Reading timeline file ***
        old_tl_file_name = fname.split("_Data_", 1)[0] + '_Timeline.wtxt'
        old_tl_file = open(old_tl_file_name, 'r')
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name

        # Making the variable for half length of the spectrum for convenience
        half_of_spectrum = int(no_of_points_for_fft_spectr / 2)

        # Making a small buffer vector to store the last half ot spectrum for the next loop step
        buffer = np.zeros(half_of_spectrum)

        bar = IncrementalBar(' Conversion from waveform to spectra: ',
                             max=no_of_bunches_per_file - 1,
                             suffix='%(percent)d%%')
        bar.start()

        for bunch in range(no_of_bunches_per_file - 1):

            # print('Bunch # ', bunch, ' of ', no_of_bunches_per_file - 1)

            # Read time from timeline file for the bunch
            time_scale_bunch = []
            for line in range(no_of_spectra_in_bunch):
                tmp = str(old_tl_file.readline())
                time_scale_bunch.append(tmp)  # append the current value
                time_scale_bunch.append(
                    tmp
                )  # append once more the same value for timing of fft with overlap
            # Saving time data to new file
            for i in range(len(time_scale_bunch)):
                new_tl_file.write((time_scale_bunch[i][:]) + '')

            # Reading and reshaping data of the bunch
            wf_data = np.fromfile(file,
                                  dtype='f4',
                                  count=no_of_spectra_in_bunch *
                                  no_of_points_for_fft_spectr)

            wf_data = np.concatenate((buffer, wf_data), axis=0)

            # Save new data from the end to the buffer
            buffer = wf_data[-half_of_spectrum:].copy()

            # Selecting the needed sequence of the data and reshaping to rectangular array
            wf_data_1 = np.reshape(
                wf_data[:-half_of_spectrum].copy(),
                [no_of_points_for_fft_spectr, no_of_spectra_in_bunch],
                order='F')
            wf_data_2 = np.reshape(
                wf_data[half_of_spectrum:].copy(),
                [no_of_points_for_fft_spectr, no_of_spectra_in_bunch],
                order='F')
            wf_data_1 = np.transpose(wf_data_1)
            wf_data_2 = np.transpose(wf_data_2)
            del wf_data

            # Merging 2 arrays into one rectangular array in the one by one order
            wf_data = np.zeros(
                (2 * no_of_spectra_in_bunch, no_of_points_for_fft_spectr))
            wf_data[0::2, :] = wf_data_1[:, :]
            wf_data[1::2, :] = wf_data_2[:, :]
            del wf_data_1, wf_data_2

            # Apply window to data for FFT
            if hanning_window:
                # window = np.hanning(no_of_points_for_fft_spectr)
                window = np.hamming(no_of_points_for_fft_spectr)
                wf_data[:] = wf_data[:] * window[:]
                del window

            # Preparing empty array for spectra
            spectra = np.zeros_like(wf_data, dtype=np.float64)

            # Calculation of spectra
            spectra[:] = np.power(np.abs(np.fft.fft(wf_data[:])), 2)
            # spectra[:] = np.abs(np.fft.fft(np.power(wf_data[:], 2))) # Does not work
            # spectra[:, i] = np.power(np.abs(np.fft.fft(wf_data[:, i])), 2)
            del wf_data

            # Storing only first (left) mirror part of spectra
            spectra = spectra[:, :int(no_of_points_for_fft_spectr / 2)]
            # spectra = spectra[: int(no_of_points_for_fft_spectr / 2), :]

            # At 33 MHz clock frequency the specter is upside down, to correct it we use flip up/down
            if int(clock_freq / 1000000) == 33:
                # spectra = np.flipud(spectra)
                spectra = np.fliplr(spectra)

            # Saving spectra data to dat file
            temp = spectra.copy(order='C')
            file_data = open(file_data_name, 'ab')
            file_data.write(np.float64(temp))
            file_data.close()

            bar.next()

        bar.finish()

    file.close()  # Close the data file
    return file_data_name
def make_long_spectra_files_from_wf(directory, fileList, result_folder):
    '''
    Makes fft and saves spectra to the long data files
    '''

    # Preparing long data files
    fname = directory + fileList[0]
    [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
     CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax,
     df, frequency, FreqPointsNum, data_block_size] = FileHeaderReaderJDS(fname, 0, 1)

    no_of_blocks_in_file = (df_filesize - 1024) / data_block_size
    print(' Number of blocks in file:             ', no_of_blocks_in_file)

    no_of_blocks_in_batch = int(no_of_blocks_in_file / (2 * no_of_batches_in_file))
    print(' Number of blocks in batch:            ', no_of_blocks_in_batch)

    with open(fname, 'rb') as file:
        # *** Data file header read ***
        file_header = file.read(1024)

    # *** Creating a name for long timeline TXT file ***
    TLfile_name = result_folder + '/' + df_filename + '_Timeline.txt'
    TLfile = open(TLfile_name, 'w')  # Open and close to delete the file with the same name
    TLfile.close()

    # *** Creating a binary file with data for long data storage ***
    file_data_re_name = result_folder + '/' + df_filename + '_Data_WRe.dat'
    file_data_re = open(file_data_re_name, 'wb')
    file_data_re.write(file_header)
    file_data_re.close()

    file_data_im_name = result_folder + '/' + df_filename + '_Data_WIm.dat'
    file_data_im = open(file_data_im_name, 'wb')
    file_data_im.write(file_header)
    file_data_im.close()

    for fileNo in range(len(fileList)):  # loop by files
        #print('\n\n\n  *  File ', str(fileNo + 1), ' of', str(len(fileList)))
        #print('  *  File path: ', str(fileList[fileNo]))

        # *** Opening datafile ***
        fname = directory + fileList[fileNo]

        # *********************************************************************************

        # *** Data file header read ***
        [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
         CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr, TimeRes, fmin, fmax,
         df, frequency, FreqPointsNum, data_block_size] = FileHeaderReaderJDS(fname, 0, 0)

        # *******************************************************************************
        #                          R E A D I N G   D A T A                             *
        # *******************************************************************************

        #print('\n  *** Reading data from file *** \n')

        with open(fname, 'rb') as file:
            file.seek(1024)  # Jumping to 1024 byte from file beginning #+ (sizeOfChunk+8) * chunkSkip
            TimeScaleFull = []

            bar = IncrementalBar(' File ' + str(fileNo + 1) + ' of ' + str(len(fileList)) + ' progress: ',
                                 max=no_of_batches_in_file, suffix='%(percent)d%%')

            for batch in range(no_of_batches_in_file):  #

                bar.next()

                # Reading and reshaping all data with readers
                if Channel == 0 or Channel == 1:  # Single channel mode

                    wf_data = np.fromfile(file, dtype='i2', count=no_of_blocks_in_batch * data_block_size)
                    wf_data = np.reshape(wf_data, [data_block_size, no_of_blocks_in_batch], order='F')

                # Timing
                timeline_block_str = jds_waveform_time(wf_data, CLCfrq, data_block_size)
                #TimeScaleFig.append(timeline_block_str[-1][0:12])
                for j in range (no_of_blocks_in_batch):
                    TimeScaleFull.append(df_creation_timeUTC[0:10] + ' ' + timeline_block_str[j][0:12])

                # Nulling the time blocks in waveform data
                wf_data[data_block_size - 4: data_block_size, :] = 0

                # Scaling of the data - seems to be wrong in absolute value
                wf_data = wf_data / 32768.0

                spectra_chA = np.zeros([data_block_size, no_of_blocks_in_batch], dtype=complex)
                for i in range(no_of_blocks_in_batch):
                    spectra_chA[:, i] = np.fft.fft(wf_data[:, i])

                # Storing only second (right) mirror part of spectra
                spectra_chA = spectra_chA[0: int(data_block_size / 2), :]

                if batch == 0:
                    plt.figure(1, figsize=(10.0, 6.0))
                    plt.subplots_adjust(left=None, bottom=0, right=None, top=0.86, wspace=None, hspace=None)
                    plt.plot(np.log10(np.power(np.abs(spectra_chA[:, 0]), 2)), label='First specter')
                    plt.title('Title', fontsize=10, fontweight='bold', style='italic', y=1.025)
                    plt.legend(loc='upper right', fontsize=10)
                    plt.ylabel('Amplitude, a.u.', fontsize=10, fontweight='bold')
                    plt.xlabel('Frequency, counts', fontsize=10, fontweight='bold')
                    plt.yticks(fontsize=8, fontweight='bold')
                    plt.xticks(fontsize=8, fontweight='bold')
                    pylab.savefig(result_folder + '/Fig. 1.png', bbox_inches='tight', dpi=customDPI)
                    plt.close('all')

                temp = np.real(spectra_chA).copy(order='C')

                file_data_re = open(file_data_re_name, 'ab')
                file_data_re.write(temp)
                file_data_re.close()

                temp = np.imag(spectra_chA).copy(order='C')

                file_data_im = open(file_data_im_name, 'ab')
                file_data_im.write(temp)
                file_data_im.close()

                # Saving time data to ling timeline file
                with open(TLfile_name, 'a') as TLfile:
                    for i in range(no_of_blocks_in_batch):
                        TLfile.write((TimeScaleFull[i][:]) + ' \n')

    return file_data_re_name, file_data_im_name, TLfile_name
def convert_wf32_to_dat_without_overlap(fname, no_of_points_for_fft_spectr,
                                        no_of_spectra_in_bunch):
    """
    Converts waveform data in .wf32 format to spectra in .dat format
    Input parameters:
        fname -                 name of .wf32 file with waveform data
        no_of_points_for_fft -  number of points for FFT to provide necessary time-frequency resolution
    Output parameters:
        file_data_name -        name of .dat file with result spectra
    """

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        clock_freq, df_time_utc, channel, receiver_mode, mode, n_avr,
        time_resolution, fmin, fmax, df, frequency, freq_points_num,
        data_block_size
    ] = FileHeaderReaderJDS(fname, 0, 0)

    freq_points_num = int(no_of_points_for_fft_spectr / 2)

    with open(fname, 'rb') as file:
        # Data file header read
        file_header = file.read(1024)

        # Creating a binary file with spectra data for long data storage
        file_data_name = fname[:-5] + '.dat'
        file_data = open(file_data_name, 'wb')
        file_data.write(file_header)
        file_data.seek(574)  # FFT size place in header
        file_data.write(np.int32(no_of_points_for_fft_spectr).tobytes())
        file_data.seek(624)  # Lb place in header
        file_data.write(np.int32(0).tobytes())
        file_data.seek(628)  # Hb place in header
        file_data.write(np.int32(freq_points_num).tobytes())
        file_data.seek(632)  # Wb place in header
        file_data.write(np.int32(freq_points_num).tobytes())
        file_data.seek(636)  # Navr place in header
        file_data.write(np.int32(1).tobytes())  # Works fine
        file_data.close()
        del file_header

        # Calculation of number of blocks and number of spectra in the file
        no_of_bunches_per_file = int(
            (df_filesize - 1024) /
            (no_of_spectra_in_bunch * no_of_points_for_fft_spectr * 4))

        # Real time resolution of averaged spectra
        fine_clock_freq = (int(clock_freq / 1000000.0) * 1000000.0)
        real_spectra_dt = float(no_of_points_for_fft_spectr / fine_clock_freq)
        real_spectra_df = float(
            (fine_clock_freq / 2) / (no_of_points_for_fft_spectr / 2))

        print(' Number of spectra in bunch:                  ',
              no_of_spectra_in_bunch)
        print(' Number of bunches to read in file:           ',
              no_of_bunches_per_file)
        print(' Time resolution of calculated spectra:       ',
              round(real_spectra_dt * 1000, 3), ' ms')
        print(' Frequency resolution of calculated spectra:  ',
              round(real_spectra_df / 1000, 3), ' kHz')
        print('\n  *** Reading data from file *** \n')

        file.seek(1024)  # Jumping to 1024 byte from file beginning

        # *** Creating a new timeline TXT file for results ***
        new_tl_file_name = file_data_name.split('_Data_',
                                                1)[0] + '_Timeline.txt'
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name
        new_tl_file.close()

        # *** Reading timeline file ***
        old_tl_file_name = fname.split("_Data_", 1)[0] + '_Timeline.wtxt'
        old_tl_file = open(old_tl_file_name, 'r')
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name

        bar = IncrementalBar(' Conversion from waveform to spectra: ',
                             max=no_of_bunches_per_file - 1,
                             suffix='%(percent)d%%')

        for bunch in range(no_of_bunches_per_file - 1):

            bar.next()

            # Read time from timeline file for the bunch
            time_scale_bunch = []
            for line in range(no_of_spectra_in_bunch):
                time_scale_bunch.append(str(old_tl_file.readline()))
            # Saving time data to new file
            for i in range(len(time_scale_bunch)):
                new_tl_file.write((time_scale_bunch[i][:]) + '')

            # Reading and reshaping data of the bunch
            wf_data = np.fromfile(file,
                                  dtype='f4',
                                  count=no_of_spectra_in_bunch *
                                  no_of_points_for_fft_spectr)
            wf_data = np.reshape(
                wf_data, [no_of_points_for_fft_spectr, no_of_spectra_in_bunch],
                order='F')

            #
            #
            #
            #
            #

            # window = np.hamming(no_of_points_for_fft_spectr)
            # for i in range(no_of_spectra_in_bunch):
            #     wf_data[:, i] = wf_data[:, i] * window[:]
            # del window

            #
            #
            #
            #
            #

            # preparing matrices for spectra
            spectra = np.zeros_like(wf_data)

            # Calculation of spectra
            for i in range(no_of_spectra_in_bunch):
                spectra[:, i] = np.power(np.abs(np.fft.fft(wf_data[:, i])), 2)

            # Storing only first (left) mirror part of spectra
            spectra = spectra[:int(no_of_points_for_fft_spectr / 2), :]

            # At 33 MHz the specter is usually upside down, to correct it we use flip up/down
            if int(clock_freq / 1000000) == 33:
                spectra = np.flipud(spectra)

            # Saving spectra data to dat file
            temp = spectra.transpose().copy(order='C')
            file_data = open(file_data_name, 'ab')
            file_data.write(np.float64(temp))
            file_data.close()

        bar.finish()

    file.close()  # Close the data file
    return file_data_name
Ejemplo n.º 21
0
def obtain_calibr_matrix_for_2_channel_wf_calibration(path_to_calibr_data,
                                                      no_of_points_for_fft):
    """
    The function reads 2-channel waveform calibration files (UTR-2 noise generator calibration with a set of
    attenuators) calculates the cross-spectra of two channels in each file and provides a phase difference txt file
    for pulsar waveform observations calibration

    """

    file_list = find_and_check_files_in_current_folder(path_to_calibr_data,
                                                       '.jds')

    labels = []
    cross_sp_ampl = []
    cross_sp_angl = []
    file_names = []
    spectrum_ch_1 = []
    spectrum_ch_2 = []
    imed_spectrum_ch_1 = []
    imed_spectrum_ch_2 = []
    corr_f_abs = []
    corr_f_ang = []
    corr_f_re = []

    result_path = 'RESULTS_WF_calibration_analyzer/'
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    # Main loop by files start
    for file_no in range(len(file_list)):  # loop by files

        fname = path_to_calibr_data + file_list[file_no]

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, clock_freq, df_creation_timeUTC, channel,
            receiver_mode, Mode, Navr, time_res, fmin, fmax, df, frequency,
            freq_points_num, data_block_size
        ] = FileHeaderReaderJDS(fname, 0, 0)

        labels.append(df_system_name + ' ' + df_description.replace('_', ' '))
        file_names.append(df_filename)

        print('\n  Processing file: ', df_description.replace('_', ' '),
              ',  # ', file_no + 1, ' of ', len(file_list), '\n')

        wf32_files = convert_one_jds_wf_to_wf32(fname, result_directory, 16)

        ampl_corr, angle_corr, av_sp_1, av_sp_2, sp_1, sp_2, cf_abs, cf_arg, cf_re = correlate_two_wf32_signals(
            wf32_files[0], wf32_files[1], no_of_points_for_fft, True, False)

        cross_sp_ampl.append(ampl_corr)
        cross_sp_angl.append(angle_corr)
        spectrum_ch_1.append(av_sp_1)
        spectrum_ch_2.append(av_sp_2)
        imed_spectrum_ch_1.append(sp_1)
        imed_spectrum_ch_2.append(sp_2)
        corr_f_abs.append(cf_abs)
        corr_f_ang.append(cf_arg)
        corr_f_re.append(cf_re)

    # Figures of initial and averaged spectra for each file
    for i in range(len(file_list)):
        # Figure of averaged and non-averaged spectra
        rc('font', size=10, weight='bold')
        fig = plt.figure(figsize=(18, 10))
        fig.suptitle(
            'Comparison of current and averaged cross spectra of waveform signals for '
            + file_list[i] + ' (' + labels[i] + ')',
            fontsize=12,
            fontweight='bold')
        ax1 = fig.add_subplot(211)
        ax1.plot(imed_spectrum_ch_1[i],
                 linestyle='-',
                 linewidth='1.00',
                 label='Current spectrum')
        ax1.plot(spectrum_ch_1[i],
                 linestyle='-',
                 linewidth='1.00',
                 label='Averaged spectrum')
        ax1.set(xlim=(0, no_of_points_for_fft // 2))
        ax1.legend(loc='upper right', fontsize=10)
        ax2 = fig.add_subplot(212)
        ax2.plot(imed_spectrum_ch_2[i],
                 linestyle='-',
                 linewidth='1.00',
                 label='Current spectrum')
        ax2.plot(spectrum_ch_2[i],
                 linestyle='-',
                 linewidth='1.00',
                 label='Averaged spectrum')
        ax2.set(xlim=(0, no_of_points_for_fft // 2))
        ax2.set_xlabel('Frequency channels, #', fontsize=10, fontweight='bold')
        ax2.legend(loc='upper right', fontsize=10)
        fig.subplots_adjust(hspace=0.07, top=0.94)
        pylab.savefig(result_path + 'Signal_spectra_' + file_names[i] + '.png',
                      bbox_inches='tight',
                      dpi=160)
        plt.close('all')

        # Plot cross spectra matrix
        rc('font', size=10, weight='bold')
        fig = plt.figure(figsize=(18, 10))
        fig.suptitle(
            'Calibration matrix of waveform signals correlation for ' +
            file_list[i] + ' (' + labels[i] + ')',
            fontsize=12,
            fontweight='bold')
        ax1 = fig.add_subplot(211)
        ax1.set_title('Files: ' + file_names[0] + ' - ' + file_names[-1],
                      fontsize=12)
        ax1.plot(np.log10(cross_sp_ampl[i]),
                 linestyle='-',
                 linewidth='1.30',
                 label='Cross spectra amplitude')
        ax1.legend(loc='upper right', fontsize=10)
        ax1.set(xlim=(0, no_of_points_for_fft // 2))
        ax1.set_ylabel('Amplitude, A.U.', fontsize=10, fontweight='bold')
        ax2 = fig.add_subplot(212)
        ax2.plot(cross_sp_angl[i],
                 linestyle='-',
                 linewidth='1.30',
                 label='Cross spectra phase')
        ax2.set(xlim=(0, no_of_points_for_fft // 2))
        ax2.set_xlabel('Frequency channels, #', fontsize=10, fontweight='bold')
        ax2.set_ylabel('Phase, rad', fontsize=10, fontweight='bold')
        ax2.legend(loc='upper right', fontsize=10)
        fig.subplots_adjust(hspace=0.07, top=0.94)
        pylab.savefig(result_path + 'WF_signal_correlation_' + file_names[i] +
                      '.png',
                      bbox_inches='tight',
                      dpi=160)
        plt.close('all')

    # Plot calibration spectra matrix
    rc('font', size=10, weight='bold')
    fig = plt.figure(figsize=(18, 10))
    fig.suptitle('Calibration matrix of waveform signals',
                 fontsize=12,
                 fontweight='bold')
    ax1 = fig.add_subplot(211)
    ax1.set_title('Files: ' + file_names[0] + ' - ' + file_names[-1],
                  fontsize=12)
    for i in range(len(spectrum_ch_1)):
        ax1.plot(spectrum_ch_1[i],
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax1.legend(loc='upper right', fontsize=10)
    ax1.set(xlim=(0, no_of_points_for_fft // 2))
    ax1.set_ylabel('Signal, A.U.', fontsize=10, fontweight='bold')
    ax2 = fig.add_subplot(212)
    for i in range(len(spectrum_ch_2)):
        ax2.plot(spectrum_ch_2[i],
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax2.set(xlim=(0, no_of_points_for_fft // 2))
    ax2.set_xlabel('Frequency channels, #', fontsize=10, fontweight='bold')
    ax2.set_ylabel('Signal, A.U.', fontsize=10, fontweight='bold')
    ax2.legend(loc='upper right', fontsize=10)
    fig.subplots_adjust(hspace=0.07, top=0.94)
    pylab.savefig(result_path + 'Calibration_matrix_wf_spectra.png',
                  bbox_inches='tight',
                  dpi=160)
    plt.close('all')

    # Plot cross spectra matrix
    rc('font', size=10, weight='bold')
    fig = plt.figure(figsize=(18, 10))
    fig.suptitle('Cross spectra matrix of waveform signals correlation',
                 fontsize=12,
                 fontweight='bold')
    ax1 = fig.add_subplot(211)
    ax1.set_title('Files: ' + file_names[0] + ' - ' + file_names[-1],
                  fontsize=12)
    for i in range(len(cross_sp_ampl)):
        ax1.plot(np.log10(cross_sp_ampl[i]),
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax1.legend(loc='upper right', fontsize=10)
    ax1.set(xlim=(0, no_of_points_for_fft // 2))
    ax1.set_ylabel('Amplitude, A.U.', fontsize=10, fontweight='bold')
    ax2 = fig.add_subplot(212)
    for i in range(len(cross_sp_angl)):
        ax2.plot(cross_sp_angl[i],
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax2.set(xlim=(0, no_of_points_for_fft // 2))
    ax2.set_xlabel('Frequency channels, #', fontsize=10, fontweight='bold')
    ax2.set_ylabel('Phase, rad', fontsize=10, fontweight='bold')
    ax2.legend(loc='upper right', fontsize=10)
    fig.subplots_adjust(hspace=0.07, top=0.94)
    pylab.savefig(result_path + 'Calibration_matrix_wf_cross_spectra.png',
                  bbox_inches='tight',
                  dpi=160)
    plt.close('all')

    # Plot correlation matrix
    rc('font', size=10, weight='bold')
    fig = plt.figure(figsize=(18, 10))
    fig.suptitle('Cross spectra matrix of waveform signals correlation',
                 fontsize=12,
                 fontweight='bold')
    ax1 = fig.add_subplot(211)
    ax1.set_title('Files: ' + file_names[0] + ' - ' + file_names[-1],
                  fontsize=12)
    for i in range(len(corr_f_abs)):
        ax1.plot(np.log10(corr_f_abs[i]),
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax1.legend(loc='upper right', fontsize=10)
    ax1.set(xlim=(0, no_of_points_for_fft // 1))
    ax1.set_ylabel('Amplitude, A.U.', fontsize=10, fontweight='bold')
    ax2 = fig.add_subplot(212)
    for i in range(len(corr_f_ang)):
        ax2.plot(corr_f_ang[i],
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax2.set(xlim=(0, no_of_points_for_fft // 1))
    ax2.set_xlabel('Frequency channels, #', fontsize=10, fontweight='bold')
    ax2.set_ylabel('Phase, rad', fontsize=10, fontweight='bold')
    ax2.legend(loc='upper right', fontsize=10)
    fig.subplots_adjust(hspace=0.07, top=0.94)
    pylab.savefig(result_path + 'Calibration_matrix_wf_correlation.png',
                  bbox_inches='tight',
                  dpi=160)
    plt.close('all')

    # Plot mutual correlation function
    rc('font', size=10, weight='bold')
    fig = plt.figure(figsize=(18, 10))
    fig.suptitle('Mutual correlation matrix of waveform signals',
                 fontsize=12,
                 fontweight='bold')
    ax1 = fig.add_subplot(111)
    ax1.set_title('Files: ' + file_names[0] + ' - ' + file_names[-1],
                  fontsize=12)
    for i in range(len(corr_f_re)):
        ax1.plot(corr_f_re[i],
                 linestyle='-',
                 linewidth='1.30',
                 label=labels[i])
    ax1.legend(loc='upper right', fontsize=10)
    ax1.set(xlim=(0, no_of_points_for_fft // 1))
    ax1.set_ylabel('Amplitude, A.U.', fontsize=10, fontweight='bold')
    fig.subplots_adjust(top=0.94)
    pylab.savefig(result_path +
                  'Calibration_matrix_of_wf_mutual_correlation_function.png',
                  bbox_inches='tight',
                  dpi=160)
    plt.close('all')

    # Save phase matrix to txt files
    for i in range(len(file_list)):
        phase_txt_file = open(
            result_path + 'Calibration_' + file_names[i] +
            '_cross_spectra_phase.txt', "w")
        for freq in range(no_of_points_for_fft // 1):  # //2
            phase_txt_file.write(
                ''.join(' {:+12.7E}'.format(cross_sp_angl[i][freq])) + ' \n')
        phase_txt_file.close()

    return 0
def coherent_wf_to_wf_dedispersion(DM, fname, no_of_points_for_fft_dedisp):
    """
    function reads waveform data in wf32 format, makes FFT, cuts the symmetrical half of the spectra and shifts the
    lines of complex data to provide coherent dedispersion. Then a symmetrcal part of spectra are made and joined
    to the shifted one, inverse FFT as applied and data are stored in waveform wf32 format
    Input parameters:
        DM -                            dispersion measure to compensate
        fname -                         name of file with initial wf32 data
        no_of_points_for_fft_dedisp -   number of waveform data points to use for FFT
    Output parameters:
        file_data_name -                name of file with processed data
    """

    #  *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        clock_freq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        time_resolution, fmin, fmax, df, frequency_list, freq_points_num,
        data_block_size
    ] = FileHeaderReaderJDS(fname, 0, 0)

    # Manually set frequencies for one channel mode
    freq_points_num = int(no_of_points_for_fft_dedisp / 2)

    # Manually set frequencies for 33 MHz clock frequency
    if int(clock_freq / 1000000) == 33:
        fmin = 16.5
        fmax = 33.0
        df = 16500000 / freq_points_num

    # Create long data files and copy first data file header to them

    with open(fname, 'rb') as file:
        # *** Data file header read ***
        file_header = file.read(1024)

        # Removing old DM from file name and updating it to current value
        if fname.startswith('DM_'):
            prev_dm_str = fname.split('_')[1]
            prev_dm = np.float32(prev_dm_str)
            new_dm = prev_dm + DM
            n = len('DM_' + prev_dm_str + '_')
            file_data_name = 'DM_' + str(np.round(new_dm, 6)) + '_' + fname[n:]
        else:
            file_data_name = 'DM_' + str(np.round(DM, 6)) + '_' + fname

        # *** Creating a binary file with data for long data storage ***
        file_data = open(file_data_name, 'wb')
        file_data.write(file_header)
        file_data.close()
        del file_header

        # *** Creating a new timeline TXT file for results ***
        new_tl_file_name = file_data_name.split("_Data_ch",
                                                1)[0] + '_Timeline.wtxt'
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name
        new_tl_file.close()

        # Calculation of the time shifts
        shift_vector = DM_full_shift_calc(freq_points_num, fmin, fmax,
                                          df / pow(10, 6), time_resolution, DM,
                                          'jds')
        max_shift = np.abs(shift_vector[0])

        # Preparing buffer array
        buffer_array = np.zeros((freq_points_num, 2 * max_shift),
                                dtype='complex64')

        print(' Maximal shift is:                            ', max_shift,
              ' pixels ')
        print(' Dispersion measure:                          ', DM,
              ' pc / cm3 ')

        # Calculation of number of blocks and number of spectra in the file
        no_of_spectra_in_bunch = max_shift.copy()
        no_of_bunches_per_file = int(
            np.ceil(
                (df_filesize - 1024) /
                (no_of_spectra_in_bunch * no_of_points_for_fft_dedisp * 4)))

        # Real time resolution of spectra
        fine_clock_freq = (int(clock_freq / 1000000.0) * 1000000.0)
        real_spectra_dt = float(no_of_points_for_fft_dedisp / fine_clock_freq)
        real_spectra_df = float(
            (fine_clock_freq / 2) / (no_of_points_for_fft_dedisp / 2))

        print(' Number of spectra in bunch:                  ',
              no_of_spectra_in_bunch)
        print(' Number of bunches to read in file:           ',
              no_of_bunches_per_file)
        print(' Time resolution of calculated spectra:       ',
              round(real_spectra_dt * 1000, 3), ' ms')
        print(' Frequency resolution of calculated spectra:  ',
              round(real_spectra_df / 1000, 3), ' kHz \n')

        # !!! Fake timing. Real timing to be done!!!
        # *** Reading timeline file ***
        old_tl_file_name = fname.split("_Data_ch", 1)[0] + '_Timeline.wtxt'
        old_tl_file = open(old_tl_file_name, 'r')
        new_tl_file = open(
            new_tl_file_name,
            'w')  # Open and close to delete the file with the same name

        file.seek(1024)  # Jumping to 1024 byte from file beginning

        # bar = IncrementalBar(' Coherent dispersion delay removing: ', max=no_of_bunches_per_file - 1,
        bar = IncrementalBar(' Coherent dispersion delay removing: ',
                             max=no_of_bunches_per_file,
                             suffix='%(percent)d%%')
        bar.start()

        # for bunch in range(no_of_bunches_per_file - 1):
        for bunch in range(no_of_bunches_per_file):

            # Trying to read all the file, not only integer number of bunches
            if bunch >= no_of_bunches_per_file - 1:
                no_of_spectra_in_bunch = int(
                    ((df_filesize - 1024) -
                     bunch * max_shift * no_of_points_for_fft_dedisp * 4) /
                    (no_of_points_for_fft_dedisp * 4))
                # print('\n  Bunch No ', str(bunch+1), ' of ', no_of_bunches_per_file, ' bunches')
                # print('\n  Number of spectra in the last bunch is: ', no_of_spectra_in_bunch)
                # print('\n  Maximal shift is:                       ', max_shift)

            # Read time from timeline file for the bunch
            time_scale_bunch = []
            for line in range(no_of_spectra_in_bunch):
                time_scale_bunch.append(str(old_tl_file.readline()))

            # Reading and reshaping all data with time data
            wf_data = np.fromfile(file,
                                  dtype='f4',
                                  count=no_of_spectra_in_bunch *
                                  no_of_points_for_fft_dedisp)
            '''
            fig = plt.figure(figsize=(9, 5))
            ax1 = fig.add_subplot(111)
            ax1.plot(wf_data, linestyle='-', linewidth='1.00', label='Initial waveform')
            ax1.legend(loc='upper right', fontsize=6)
            ax1.grid(b=True, which='both', color='silver', linestyle='-')
            ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
            pylab.savefig('00_Initial_waveform_' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
            plt.close('all')
            '''

            wf_data = np.reshape(
                wf_data, [no_of_points_for_fft_dedisp, no_of_spectra_in_bunch],
                order='F')

            # preparing matrices for spectra
            spectra = np.zeros(
                (no_of_points_for_fft_dedisp, no_of_spectra_in_bunch),
                dtype='complex64')

            # Calculation of spectra
            for i in range(no_of_spectra_in_bunch):
                spectra[:, i] = np.fft.fft(wf_data[:, i])
            del wf_data
            '''
            fig = plt.figure(figsize=(9, 5))
            ax1 = fig.add_subplot(111)
            ax1.plot(10 * np.log10(np.power(np.abs(spectra[:, 0]), 2)), linestyle='-', linewidth='1.00',
                     label='Initial spectra before cut')
            ax1.legend(loc='upper right', fontsize=6)
            ax1.grid(b=True, which='both', color='silver', linestyle='-')
            ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
            pylab.savefig('00a_Initial_doubled_imm_spectra' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
            plt.close('all')
            '''

            # Cut half of the spectra
            spectra = spectra[int(no_of_points_for_fft_dedisp / 2):, :]
            ''' # making figures
            fig = plt.figure(figsize=(9, 5))
            ax1 = fig.add_subplot(111)
            ax1.imshow(np.flipud(10*np.log10(np.power(np.abs(spectra), 2))), aspect='auto', cmap='jet')
            ax1.set_ylabel('Frequency points', fontsize=6, fontweight='bold')
            ax1.set_xlabel('Time points', fontsize=6, fontweight='bold')
            pylab.savefig('01_Initial_spectra_' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
            plt.close('all')

            fig = plt.figure(figsize=(9, 5))
            ax1 = fig.add_subplot(111)
            ax1.plot(10*np.log10(np.power(np.abs(spectra[:, 0]), 2)), linestyle='-', linewidth='1.00', label='Initial waveform')
            ax1.legend(loc='upper right', fontsize=6)
            ax1.grid(b=True, which='both', color='silver', linestyle='-')
            ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
            pylab.savefig('02_Initial_imm_spectra' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
            plt.close('all')
            '''

            #  Dispersion delay removing
            data_space = np.zeros((freq_points_num, 2 * max_shift),
                                  dtype='complex64')

            # if it is the last bunch - use only availble data
            if bunch >= no_of_bunches_per_file - 1:
                data_space[:, max_shift:max_shift +
                           no_of_spectra_in_bunch] = spectra[:, :]
            else:
                data_space[:, max_shift:] = spectra[:, :]

            data_space = pulsar_DM_compensation_with_indices_changes(
                data_space, shift_vector)
            del spectra

            # Adding the next data block
            buffer_array += data_space

            # Making and filling the array with fully ready data for plotting and saving to a file
            if bunch >= no_of_bunches_per_file - 1:
                array_compensated_dm = buffer_array[:,
                                                    0:no_of_spectra_in_bunch]
            else:
                array_compensated_dm = buffer_array[:, 0:max_shift]

            if bunch > 0:

                # Saving time data to a new file
                for i in range(len(time_scale_bunch)):
                    new_tl_file.write((time_scale_bunch[i][:]) + '')

                # Saving data with compensated DM
                spectra = array_compensated_dm  # .copy()
                '''
                # making figures
                fig = plt.figure(figsize=(9, 5))
                ax1 = fig.add_subplot(111)
                ax1.imshow(np.flipud(10*np.log10(np.power(np.abs(spectra), 2))), aspect='auto', cmap='jet')
                ax1.set_ylabel('Frequency points', fontsize=6, fontweight='bold')
                ax1.set_xlabel('Time points', fontsize=6, fontweight='bold')
                pylab.savefig('03_Compensated_spectra_' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
                plt.close('all')

                fig = plt.figure(figsize=(9, 5))
                ax1 = fig.add_subplot(111)
                ax1.plot(10*np.log10(np.power(np.abs(spectra[:,0]), 2)), linestyle='-', linewidth='1.00', label='Initial waveform')
                ax1.legend(loc='upper right', fontsize=6)
                ax1.grid(b=True, which='both', color='silver', linestyle='-')
                ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
                pylab.savefig('04_Compensated_imm_spectra' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
                plt.close('all')
                '''

                wf_data = np.zeros(
                    (no_of_points_for_fft_dedisp, no_of_spectra_in_bunch))

                # Add lost half of the spectra

                second_spectra_half = spectra.copy()
                second_spectra_half = np.flipud(second_spectra_half)
                spectra = np.concatenate((second_spectra_half, spectra),
                                         axis=0)  # Changed places!!!
                '''
                fig = plt.figure(figsize=(9, 5))
                ax1 = fig.add_subplot(111)
                ax1.plot(10*np.log10(np.power(np.abs(spectra[:,0]), 2)), linestyle='-', linewidth='1.00', label='Initial waveform')
                ax1.legend(loc='upper right', fontsize=6)
                ax1.grid(b=True, which='both', color='silver', linestyle='-')
                ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
                pylab.savefig('05_Compensated_doubled_imm_spectra' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
                plt.close('all')
                '''

                # Making IFFT
                for i in range(no_of_spectra_in_bunch):
                    wf_data[:, i] = np.real(np.fft.ifft(spectra[:, i]))
                del spectra

                # Reshaping the waveform to single dimension (real)
                wf_data = np.reshape(
                    wf_data,
                    [no_of_points_for_fft_dedisp * no_of_spectra_in_bunch, 1],
                    order='F')
                ''' # making figures
                fig = plt.figure(figsize=(9, 5))
                ax1 = fig.add_subplot(111)
                ax1.plot(wf_data, linestyle='-', linewidth='1.00', label='Initial waveform')
                ax1.legend(loc='upper right', fontsize=6)
                ax1.grid(b=True, which='both', color='silver', linestyle='-')
                ax1.set_ylabel('Intensity, a.u.', fontsize=6, fontweight='bold')
                pylab.savefig('06_Compensated_waveform_' + str(bunch) + '.png', bbox_inches='tight', dpi=160)
                plt.close('all')
                '''

                # Saving waveform data to wf32 file
                file_data = open(file_data_name, 'ab')
                file_data.write(
                    np.float32(wf_data).transpose().copy(order='C'))
                file_data.close()

                # !!! Saving time data to timeline file !!!

            # Rolling temp_array to put current data first
            buffer_array = np.roll(buffer_array, -max_shift)
            buffer_array[:, max_shift:] = 0

            bar.next()

        bar.finish()
        old_tl_file.close()
        new_tl_file.close()

    return file_data_name
Ejemplo n.º 23
0
if filename[0:3] == 'ADR':
    [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
            F_ADC, df_creation_timeUTC, ReceiverMode, ADRmode, sumDifMode,
            NAvr, TimeRes, fmin, fmax, df, frequency_list, FFTsize,
            SLine, Width, BlockSize] = FileHeaderReaderADR(filepath, smd_filesize - 1024 - 131096, 1)


    record_date_time_dt = datetime(int('20' + df_filename[1:3]), int(df_filename[3:5]), int(df_filename[5:7]), int(df_creation_timeUTC[0:2]), int(df_creation_timeUTC[3:5]), int(df_creation_timeUTC[6:8]), int(df_creation_timeUTC[9:12]) * 1000)
    record_date_time = str(record_date_time_dt)
    telescope = 'GURT'

if filename[0:3] == 'DSP':
    [df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, SpInFile, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency_list, FFTsize, BlockSize] = FileHeaderReaderJDS(filepath, smd_filesize - 1024, 1)
    telescope = 'UTR-2'

    record_date_time_dt = datetime(int('20' + df_filename[5:7]), int(df_filename[3:5]), int(df_filename[1:3]),
                                   int(df_creation_timeUTC[11:13]), int(df_creation_timeUTC[14:16]),
                                   int(df_creation_timeUTC[17:19]), 0)
    record_date_time = str(record_date_time_dt)


df = df / pow(10,6)
freq_num = len(frequency_list)

file = open(filepath, 'rb')


#   *** Reading pulsar period and number of samples per period ***
def JDS_file_reader(fileList, result_path, MaxNsp, spSkip, RFImeanConst, Vmin,
                    Vmax, VminNorm, VmaxNorm, VminCorrMag, VmaxCorrMag,
                    colormap, customDPI, CorrelationProcess, longFileSaveAch,
                    longFileSaveBch, longFileSaveCRI, longFileSaveCMP,
                    DynSpecSaveInitial, DynSpecSaveCleaned,
                    CorrSpecSaveInitial, CorrSpecSaveCleaned,
                    SpecterFileSaveSwitch, ImmediateSpNo):
    currentTime = time.strftime("%H:%M:%S")
    currentDate = time.strftime("%d.%m.%Y")

    # *** Creating a folder where all pictures and results will be stored (if it doen't exist) ***
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(result_path + '/Service'):
        os.makedirs(result_path + '/Service')
    if DynSpecSaveInitial == 1:
        if not os.path.exists(result_path + '/Initial_spectra'):
            os.makedirs(result_path + '/Initial_spectra')
    if (DynSpecSaveCleaned == 1 and CorrelationProcess == 1):
        if not os.path.exists(result_path + '/Correlation_spectra'):
            os.makedirs(result_path + '/Correlation_spectra')

    # Main loop
    for fileNo in range(len(fileList)):  # loop by files
        print('\n\n\n  *  File ', str(fileNo + 1), ' of', str(len(fileList)))
        print('  *  File path: ', str(fileList[fileNo]))

        #*********************************************************************************

        # *** Opening datafile ***
        fname = ''
        if len(fname) < 1: fname = fileList[fileNo]

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
            FreqPointsNum, dataBlockSize
        ] = FileHeaderReaderJDS(fname, 0, 0)

        # Initial time line settings
        TimeScaleStartDate = datetime(int(df_creation_timeUTC[0:4]),
                                      int(df_creation_timeUTC[5:7]),
                                      int(df_creation_timeUTC[8:10]), 0, 0, 0,
                                      0)

        timeLineMS = np.zeros(
            int(SpInFile))  # List of ms values from ends of spectra

        # *** Creating a name for long timeline TXT file ***
        if fileNo == 0 and (longFileSaveAch == 1 or longFileSaveBch == 1
                            or longFileSaveCRI == 1 or longFileSaveCMP == 1):
            TLfile_name = df_filename + '_Timeline.txt'
            TLfile = open(
                TLfile_name,
                'wb')  # Open and close to delete the file with the same name
            TLfile.close()

        with open(fname, 'rb') as file:

            # *** If it is the first file - write the header to long data file
            if ((longFileSaveAch == 1 or longFileSaveBch == 1
                 or longFileSaveCRI == 1 or longFileSaveCMP == 1)
                    and fileNo == 0):
                file.seek(0)
                file_header = file.read(1024)

                DAT_file_name = df_filename
                DAT_file_list = []

                # *** Creating a binary file with data for long data storage ***
                if ((Mode == 1 or Mode == 2) and longFileSaveAch == 1):
                    Data_A_name = df_filename + '_Data_chA.dat'
                    Data_AFile = open(Data_A_name, 'wb')
                    Data_AFile.write(file_header)
                    Data_AFile.close()
                    DAT_file_list.append('chA')
                if (longFileSaveBch == 1 and (Mode == 1 or Mode == 2)):
                    Data_B_name = df_filename + '_Data_chB.dat'
                    Data_BFile = open(Data_B_name, 'wb')
                    Data_BFile.write(file_header)
                    Data_BFile.close()
                    DAT_file_list.append('chB')
                if (longFileSaveCRI == 1 and CorrelationProcess == 1
                        and Mode == 2):
                    Data_CRe_name = df_filename + '_Data_CRe.dat'
                    Data_CReFile = open(Data_CRe_name, 'wb')
                    Data_CReFile.write(file_header)
                    Data_CReFile.close()
                    DAT_file_list.append('CRe')
                    Data_CIm_name = df_filename + '_Data_CIm.dat'
                    Data_CImFile = open(Data_CIm_name, 'wb')
                    Data_CImFile.write(file_header)
                    Data_CImFile.close()
                    DAT_file_list.append('CIm')
                if (longFileSaveCMP == 1 and CorrelationProcess == 1
                        and Mode == 2):
                    Data_Cm_name = df_filename + '_Data_C_m.dat'
                    Data_CmFile = open(Data_Cm_name, 'wb')
                    Data_CmFile.write(file_header)
                    Data_CmFile.close()
                    DAT_file_list.append('C_m')
                    Data_Cp_name = df_filename + '_Data_C_p.dat'
                    Data_CpFile = open(Data_Cp_name, 'wb')
                    Data_CpFile.write(file_header)
                    Data_CpFile.close()
                    DAT_file_list.append('C_p')

                del file_header

    #*******************************************************************************
    #                         R E A D I N G   D A T A                              *
    #*******************************************************************************
    #print ('\n  *** Reading data from file *** \n')
            file.seek(1024)  # Jumping to 1024 byte from file beginning
            if Mode == 0:
                print(
                    '\n\n  Data in waveform mode, use appropriate program!!! \n\n\n'
                )

            if Mode > 0 and Mode < 3:  # Spectra modes
                figID = -1
                figMAX = int(math.ceil((SpInFile - spSkip) / MaxNsp))
                if figMAX < 1: figMAX = 1
                for fig in range(figMAX):
                    Time1 = time.time()  # Timing
                    figID = figID + 1
                    currentTime = time.strftime("%H:%M:%S")
                    print(' File # ', str(fileNo + 1), ' of ',
                          str(len(fileList)), ', figure # ', figID + 1, ' of ',
                          figMAX, '   started at: ', currentTime)
                    if (SpInFile - spSkip - MaxNsp * figID) < MaxNsp:
                        Nsp = int(SpInFile - spSkip - MaxNsp * figID)
                    else:
                        Nsp = MaxNsp

                    # *** Preparing empty matrices ***
                    if Mode == 1 or Mode == 2:
                        Data_ChA = np.zeros((Nsp, FreqPointsNum))

                    if Mode == 1 or Mode == 2:
                        Data_ChB = np.zeros((Nsp, FreqPointsNum))

                    if Mode == 2:
                        Data_CRe = np.zeros((Nsp, FreqPointsNum))
                        Data_CIm = np.zeros((Nsp, FreqPointsNum))
                        CorrModule = np.zeros((Nsp, FreqPointsNum))
                        CorrPhase = np.zeros((Nsp, FreqPointsNum))

                    # *** Reading and reshaping all data for figure ***
                    if Mode == 1:
                        raw = np.fromfile(file,
                                          dtype='u4',
                                          count=(2 * Nsp * FreqPointsNum))
                        raw = np.reshape(raw, [2 * FreqPointsNum, Nsp],
                                         order='F')
                        Data_ChA = raw[0:(FreqPointsNum * 2):2, :].transpose()
                        Data_ChB = raw[1:(FreqPointsNum * 2):2, :].transpose()

                    if Mode == 2:
                        raw = np.fromfile(file,
                                          dtype='u4',
                                          count=(4 * Nsp * FreqPointsNum))
                        raw = np.reshape(raw, [4 * FreqPointsNum, Nsp],
                                         order='F')
                        Data_ChA = raw[0:(FreqPointsNum * 4):4, :].transpose()
                        Data_ChB = raw[1:(FreqPointsNum * 4):4, :].transpose()
                        Data_CRe = raw[2:(FreqPointsNum * 4):4, :].transpose()
                        Data_CIm = raw[3:(FreqPointsNum * 4):4, :].transpose()

                    del raw

                    # *** Single out timing from data ***
                    counterA2 = np.uint64(Data_ChA[:, -1])
                    counterB2 = np.uint64(Data_ChB[:, -1])
                    counterA1 = np.uint64(Data_ChA[:, -2])
                    counterB1 = np.uint64(Data_ChB[:, -2])

                    A = np.uint64(int('01111111111111111111111111111111', 2))
                    msCount = np.uint32(np.bitwise_and(
                        counterB2, A))  # number of ms since record started
                    ftCount = np.uint32(np.bitwise_and(
                        counterA2,
                        A))  # number of specter since record started

                    A = np.uint64(int('00000111111111111111111111111111', 2))
                    phaOfSec = np.uint32(np.bitwise_and(
                        counterA1, A))  # phase of second for the spectr
                    A = np.uint64(int('00000000000000011111111111111111', 2))
                    secOfDay = np.uint32(np.bitwise_and(
                        counterB1, A))  # second of the day for the specter

                    # *** Time line arranging ***

                    # Preparing/cleaning matrices for time scales
                    TimeScale = []  # New for each file
                    TimeFigureScale = [
                    ]  # Timelime (new) for each figure (Nsp)
                    # Calculations
                    FigStartTime = timedelta(
                        0, int(secOfDay[0]),
                        int(1000000 * phaOfSec[0] / CLCfrq))
                    for i in range(Nsp):
                        TimeAdd = timedelta(
                            0, int(secOfDay[i]),
                            int(1000000 * phaOfSec[i] / CLCfrq))
                        TimeScale.append(str(str(TimeScaleStartDate +
                                                 TimeAdd)))
                        TimeFigureScale.append(str((TimeAdd - FigStartTime)))

                    TimeFigureScaleFig = np.empty_like(TimeFigureScale)
                    TimeScaleFig = np.empty_like(TimeScale)
                    for i in range(len(TimeFigureScale)):
                        TimeFigureScaleFig[i] = TimeFigureScale[i][0:11]
                        TimeScaleFig[i] = TimeScale[i][11:23]

                    # *** Converting from FPGA to PC float format ***
                    if Mode == 1 or Mode == 2:
                        Data_ChA = FPGAtoPCarrayJDS(Data_ChA, Navr)
                        Data_ChB = FPGAtoPCarrayJDS(Data_ChB, Navr)
                    if (Mode == 2 and CorrelationProcess == 1):
                        Data_CRe = FPGAtoPCarrayJDS(Data_CRe, Navr)
                        Data_CIm = FPGAtoPCarrayJDS(Data_CIm, Navr)
                    '''
                    # *** Absolute correlation specter plot ***
                    if Mode == 2 and figID == 0:   #  Immediate correlation spectrum channels A & B
                        TwoImmedSpectraPlot(frequency, Data_CRe[1][:], Data_CIm[1][:], 'Channel A', 'Channel B',
                                            frequency[0], frequency[FreqPointsNum-1], -0.001, 0.001,
                                            'Frequency, MHz', 'Amplitude, dB',
                                            'Immediate spectrum '+str(df_filename[0:18])+ ' channels A & B',
                                            'Initial parameters: dt = '+str(round(TimeRes,3))+' Sec, df = '+str(round(df/1000,3))+' kHz',
                                            'JDS_Results/Service/'+df_filename[0:14]+' Correlation Spectrum Re and Im before log.png')
                    '''

                    # *** Saving data to a long-term file ***
                    if (Mode == 1 or Mode == 2) and longFileSaveAch == 1:
                        Data_AFile = open(Data_A_name, 'ab')
                        Data_AFile.write(Data_ChA)
                        Data_AFile.close()
                    if (Mode == 1 or Mode == 2) and longFileSaveBch == 1:
                        Data_BFile = open(Data_B_name, 'ab')
                        Data_BFile.write(Data_ChB)
                        Data_BFile.close()
                    if Mode == 2 and longFileSaveCRI == 1 and CorrelationProcess == 1:
                        Data_CReFile = open(Data_CRe_name, 'ab')
                        Data_CReFile.write(np.float64(Data_CRe))
                        Data_CReFile.close()
                        Data_CImFile = open(Data_CIm_name, 'ab')
                        Data_CImFile.write(np.float64(Data_CIm))
                        Data_CImFile.close()

                    if (longFileSaveAch == 1 or longFileSaveBch == 1
                            or longFileSaveCRI == 1 or longFileSaveCMP == 1):
                        with open(TLfile_name, 'a') as TLfile:
                            for i in range(Nsp):
                                TLfile.write(
                                    (TimeScale[i][:] + ' \n'))  #str.encode

                    # *** Converting to logarythmic scale matrices ***
                    if (Mode == 1 or Mode == 2):
                        with np.errstate(invalid='ignore'):
                            Data_ChA = 10 * np.log10(Data_ChA)
                            Data_ChB = 10 * np.log10(Data_ChB)
                        Data_ChA[np.isnan(Data_ChA)] = -120
                        Data_ChB[np.isnan(Data_ChB)] = -120
                    if (Mode == 2 and CorrelationProcess == 1):
                        with np.errstate(invalid='ignore', divide='ignore'):
                            CorrModule = 10 * np.log10(
                                ((Data_CRe)**2 + (Data_CIm)**2)**(0.5))
                            CorrPhase = np.arctan2(Data_CIm, Data_CRe)
                        CorrPhase[np.isnan(CorrPhase)] = 0
                        CorrModule[np.isinf(CorrModule)] = -135.5

                    # *** Saving correlation data to a long-term module and phase files ***
                    if (Mode == 2 and CorrelationProcess == 1
                            and longFileSaveCMP == 1):
                        Data_CmFile = open(Data_Cm_name, 'ab')
                        Data_CmFile.write(np.float64(CorrModule))
                        Data_CmFile.close()
                        Data_CpFile = open(Data_Cp_name, 'ab')
                        Data_CpFile.write(np.float64(CorrPhase))
                        Data_CpFile.close()

                    # *** Saving immediate spectrum to file ***
                    if (SpecterFileSaveSwitch == 1 and figID == 0):
                        SpFile = open(
                            'JDS_Results/Service/Specter_' +
                            df_filename[0:14] + '.txt', 'w')
                        for i in range(FreqPointsNum - 1):
                            if Mode == 1:
                                SpFile.write(
                                    str('{:10.6f}'.format(frequency[i])) +
                                    '  ' + str('{:16.10f}'.format(
                                        Data_ChA[ImmediateSpNo][i])) + '  ' +
                                    str('{:16.10f}'.format(
                                        Data_ChB[ImmediateSpNo][i])) + ' \n')
                            if Mode == 2:
                                SpFile.write(
                                    str(frequency[i]) + '  ' +
                                    str(Data_ChA[ImmediateSpNo][i]) + '  ' +
                                    str(Data_ChB[ImmediateSpNo][i]) + '  ' +
                                    str(Data_CRe[ImmediateSpNo][i]) + '  ' +
                                    str(Data_CIm[ImmediateSpNo][i]) + ' \n')

                        SpFile.close()

    #*******************************************************************************
    #                                  F I G U R E S                               *
    #*******************************************************************************

    # *** Plotting immediate spectra before cleaning and normalizing ***
                    if (Mode == 1 or Mode == 2) and figID == 0:

                        Suptitle = ('Immediate spectrum ' +
                                    str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Immediate Spectrum before cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, Data_ChA[0][:], Data_ChB[0][:],
                            'Channel A', 'Channel B', frequency[0],
                            frequency[FreqPointsNum - 1], -120, -20, -120, -20,
                            'Frequency, MHz', 'Intensity, dB', 'Intensity, dB',
                            Suptitle, Title, Filename, currentDate,
                            currentTime, Software_version)

                    if Mode == 2 and CorrelationProcess == 1 and figID == 0:

                        Suptitle = ('Immediate correlation spectrum ' +
                                    str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Correlation Immedaiate Spectrum before cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, CorrModule[0][:], CorrPhase[0][:],
                            'Correlation module', 'Correlation phase',
                            frequency[0], frequency[FreqPointsNum - 1],
                            VminCorrMag, VmaxCorrMag, -4, 4, 'Frequency, MHz',
                            'Amplitude, dB', 'Phase, deg', Suptitle, Title,
                            Filename, currentDate, currentTime,
                            Software_version)

                    # *** FIGURE Initial dynamic spectrum channels A and B ***
                    if (Mode == 1 or Mode == 2) and DynSpecSaveInitial == 1:

                        Suptitle = ('Dynamic spectrum (initial) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path + '/Initial_spectra/' +
                                         df_filename[0:14] +
                                         ' Initial dynamic spectrum fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(Data_ChA.transpose(),
                                          Data_ChB.transpose(), Vmin, Vmax,
                                          Vmin, Vmax, Suptitle,
                                          'Intensity, dB', 'Intensity, dB',
                                          Nsp, TimeFigureScaleFig,
                                          TimeScaleFig, frequency,
                                          FreqPointsNum, colormap, 'Channel A',
                                          'Channel B', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)

                    # *** FIGURE Initial correlation spectrum Module and Phase (python 3 new version) ***
                    if (Mode == 2 and CorrSpecSaveInitial == 1
                            and CorrelationProcess == 1):

                        Suptitle = ('Correlation dynamic spectrum (initial) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path +
                                         '/Correlation_spectra/' +
                                         df_filename[0:14] +
                                         ' Correlation dynamic spectrum fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(CorrModule.transpose(),
                                          CorrPhase.transpose(), VminCorrMag,
                                          VmaxCorrMag, -3.15, 3.15, Suptitle,
                                          'Intensity, dB', 'Phase, rad', Nsp,
                                          TimeFigureScaleFig, TimeScaleFig,
                                          frequency, FreqPointsNum, colormap,
                                          'Correlation module',
                                          'Correlation phase', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)

                    # *** Normalizing amplitude-frequency responce ***
                    if Mode == 1 or Mode == 2:
                        Normalization_dB(Data_ChA, FreqPointsNum, Nsp)
                        Normalization_dB(Data_ChB, FreqPointsNum, Nsp)
                    if Mode == 2 and CorrelationProcess == 1 and CorrSpecSaveCleaned == 1:
                        Normalization_dB(CorrModule, FreqPointsNum, Nsp)

                    # *** Deleting cahnnels with strong RFI ***
                    if Mode == 1 or Mode == 2:
                        simple_channel_clean(Data_ChA, RFImeanConst)
                        simple_channel_clean(Data_ChB, RFImeanConst)
                    if Mode == 2 and CorrelationProcess == 1 and CorrSpecSaveCleaned == 1:
                        simple_channel_clean(CorrModule, 2 * RFImeanConst)

                    #   *** Immediate spectra ***    (only for first figure in data file)
                    if (Mode == 1 or Mode == 2
                        ) and figID == 0:  # Immediate spectrum channels A & B

                        Suptitle = (
                            'Cleaned and normalized immediate spectrum ' +
                            str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Immediate Spectrum after cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, Data_ChA[1][:], Data_ChB[1][:],
                            'Channel A', 'Channel B', frequency[0],
                            frequency[FreqPointsNum - 1], VminNorm - 5,
                            VmaxNorm, VminNorm - 5, VmaxNorm, 'Frequency, MHz',
                            'Intensity, dB', 'Intensity, dB', Suptitle, Title,
                            Filename, currentDate, currentTime,
                            Software_version)

                    # *** FIGURE Normalized dynamic spectrum channels A and B ***
                    if (Mode == 1 or Mode == 2) and DynSpecSaveCleaned == 1:

                        Suptitle = ('Dynamic spectrum (normalized) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path + '/' +
                                         df_filename[0:14] +
                                         ' Dynamic spectra fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(
                            Data_ChA.transpose(), Data_ChB.transpose(),
                            VminNorm, VmaxNorm, VminNorm, VmaxNorm, Suptitle,
                            'Intensity, dB', 'Intensity, dB', Nsp,
                            TimeFigureScaleFig, TimeScaleFig, frequency,
                            FreqPointsNum, colormap, 'Channel A', 'Channel B',
                            fig_file_name, currentDate, currentTime,
                            Software_version, customDPI)

                    # *** FIGURE Normalized correlation spectrum Module and Phase ***
                    if (Mode == 2 and CorrSpecSaveCleaned == 1
                            and CorrelationProcess == 1):

                        Suptitle = (
                            'Correlation dynamic spectrum (normalized) ' +
                            str(df_filename) + ' - Fig. ' + str(figID + 1) +
                            ' of ' + str(figMAX) +
                            '\n Initial parameters: dt = ' +
                            str(round(TimeRes * 1000, 3)) + ' ms, df = ' +
                            str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                            str(df_system_name) + ', Place: ' +
                            str(df_obs_place) + '\n' + ReceiverMode +
                            ', Description: ' + str(df_description))

                        fig_file_name = (
                            result_path + '/Correlation_spectra/' +
                            df_filename[0:14] +
                            ' Correlation dynamic spectra cleaned fig.' +
                            str(figID + 1) + '.png')
                        TwoDynSpectraPlot(CorrModule.transpose(),
                                          CorrPhase.transpose(), 2 * VminNorm,
                                          2 * VmaxNorm, -3.15, 3.15, Suptitle,
                                          'Intensity, dB', 'Phase, rad', Nsp,
                                          TimeFigureScaleFig, TimeScaleFig,
                                          frequency, FreqPointsNum, colormap,
                                          'Normalized correlation module',
                                          'Correlation phase', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)
                '''
                # Check of second counter data for linearity
                OneImmedSpecterPlot(list(range(ChunksInFile)), timeLineSecond, 'timeLineSecond',
                                    0, ChunksInFile, 0, 2000,
                                    'Time, sec', 'Second counter, sec',
                                    'Second counter',
                                    ' ',
                                    'ADR_Results/Service/' + df_filename[0:14] + ' Second counter fig.' + str(figID+1) + '.png')

                '''

                gc.collect()

            #print ('\n  Position in file: ', file.tell(), ' File size: ', df_filesize)
            #if (file.tell() == df_filesize): print ('\n  File was read till the end \n')
            if (file.tell() < df_filesize):
                print('    The difference is ', (df_filesize - file.tell()),
                      ' bytes')
                print('\n  File was NOT read till the end!!! ERROR')

        file.close()  #Here we close the data file

    ok = 1
    return ok, DAT_file_name, DAT_file_list
Ejemplo n.º 25
0
def pulsar_incoherent_dedispersion(
        common_path, filename, pulsar_name, average_const, profile_pic_min,
        profile_pic_max, cleaning_Iana, cleaning, no_of_iterations,
        std_lines_clean, pic_in_line, std_pixels_clean, SpecFreqRange,
        freqStart, freqStop, save_profile_txt, save_compensated_data,
        customDPI, colormap):

    previousTime = time.time()
    currentTime = time.strftime("%H:%M:%S")
    currentDate = time.strftime("%d.%m.%Y")

    rc('font', size=6, weight='bold')
    data_filename = common_path + filename

    # *** Creating a folder where all pictures and results will be stored (if it doesn't exist) ***
    newpath = 'RESULTS_pulsar_single_pulses_' + pulsar_name + '_' + filename
    if not os.path.exists(newpath):
        os.makedirs(newpath)

    # Path to timeline file to be analyzed:
    time_line_file_name = common_path + filename[-31:-13] + '_Timeline.txt'

    if save_profile_txt > 0:
        # *** Creating a name for long timeline TXT file ***
        profile_file_name = newpath + '/' + filename + '_time_profile.txt'
        profile_txt_file = open(
            profile_file_name,
            'w')  # Open and close to delete the file with the same name
        profile_txt_file.close()

    # *** Opening DAT datafile ***
    file = open(data_filename, 'rb')

    # reading FHEADER
    df_filesize = os.stat(data_filename).st_size  # Size of file
    df_filename = file.read(32).decode('utf-8').rstrip(
        '\x00')  # Initial data file name
    file.close()

    receiver_type = df_filename[-4:]

    # Reading file header to obtain main parameters of the file
    if receiver_type == '.adr':
        [TimeRes, fmin, fmax, df, frequency_list,
         FFTsize] = FileHeaderReaderADR(data_filename, 0, 1)

    if receiver_type == '.jds':
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, sp_in_file,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency_list,
            FFTsize, dataBlockSize
        ] = FileHeaderReaderJDS(data_filename, 0, 1)

    # Manually set frequencies for two channels mode
    if int(CLCfrq / 1000000) == 33:
        #FFTsize = 8192
        fmin = 16.5
        fmax = 33.0
        frequency_list = np.linspace(fmin, fmax, FFTsize)

    sp_in_file = int(
        ((df_filesize - 1024) / (len(frequency_list) * 8)
         ))  # the second dimension of the array: file size - 1024 bytes

    pulsar_ra, pulsar_dec, DM, p_bar = catalogue_pulsar(pulsar_name)

    # ************************************************************************************
    #                             R E A D I N G   D A T A                                *
    # ************************************************************************************

    # Time line file reading
    timeline, dt_timeline = time_line_file_reader(time_line_file_name)

    # Selecting the frequency range of data to be analyzed
    if SpecFreqRange == 1:
        A = []
        B = []
        for i in range(len(frequency_list)):
            A.append(abs(frequency_list[i] - freqStart))
            B.append(abs(frequency_list[i] - freqStop))
        ifmin = A.index(min(A))
        ifmax = B.index(min(B))
        shift_vector = DM_full_shift_calc(ifmax - ifmin, frequency_list[ifmin],
                                          frequency_list[ifmax],
                                          df / pow(10, 6), TimeRes, DM,
                                          receiver_type)
        print(' Number of frequency channels:  ', ifmax - ifmin)

    else:
        shift_vector = DM_full_shift_calc(
            len(frequency_list) - 4, fmin, fmax, df / pow(10, 6), TimeRes, DM,
            receiver_type)
        print(' Number of frequency channels:  ', len(frequency_list) - 4)
        ifmin = 0
        ifmax = int(len(frequency_list) - 4)

    if save_compensated_data > 0:
        with open(data_filename, 'rb') as file:
            file_header = file.read(1024)  # Data file header read

        # *** Creating a binary file with data for long data storage ***
        new_data_file_name = pulsar_name + '_DM_' + str(DM) + '_' + filename
        new_data_file = open(new_data_file_name, 'wb')
        new_data_file.write(file_header)
        new_data_file.seek(624)  # Lb place in header
        new_data_file.write(np.int32(ifmin).tobytes())
        new_data_file.seek(628)  # Hb place in header
        new_data_file.write(np.int32(ifmax).tobytes())
        new_data_file.seek(632)  # Wb place in header
        new_data_file.write(
            np.int32(ifmax -
                     ifmin).tobytes())  # bytes([np.int32(ifmax - ifmin)]))
        new_data_file.close()

        # *** Creating a name for long timeline TXT file ***
        new_TLfile_name = pulsar_name + '_DM_' + str(
            DM) + '_' + data_filename[:-13] + '_Timeline.txt'
        new_TLfile = open(
            new_TLfile_name,
            'w')  # Open and close to delete the file with the same name
        new_TLfile.close()

        del file_header

    max_shift = np.abs(shift_vector[0])

    if SpecFreqRange == 1:
        buffer_array = np.zeros((ifmax - ifmin, 2 * max_shift))
    else:
        buffer_array = np.zeros((len(frequency_list) - 4, 2 * max_shift))

    num_of_blocks = int(sp_in_file / (1 * max_shift))

    print(' Number of spectra in file:     ', sp_in_file, ' ')
    print(' Maximal shift is:              ', max_shift, ' pixels ')
    print(' Number of blocks in file:      ', num_of_blocks, ' ')
    print(' Dispersion measure:            ', DM, ' pc / cm3 \n')
    print(' Pulsar name:                   ', pulsar_name, '  \n')

    if receiver_type == '.jds':
        num_frequencies_initial = len(frequency_list) - 4

    frequency_list_initial = np.empty_like(frequency_list)
    frequency_list_initial[:] = frequency_list[:]

    dat_file = open(data_filename, 'rb')
    dat_file.seek(1024)  # Jumping to 1024 byte from file beginning

    for block in range(num_of_blocks):  # main loop by number of blocks in file

        print(
            '\n * Data block # ', block + 1, ' of ', num_of_blocks,
            '\n ******************************************************************'
        )

        # Time line arrangements:
        fig_time_scale = []
        fig_date_time_scale = []
        for i in range(block * max_shift, (block + 1) * max_shift
                       ):  # Shows the time of pulse end (at lowest frequency)
            fig_time_scale.append(timeline[i][11:23])
            fig_date_time_scale.append(timeline[i][:])
        print(' Time: ', fig_time_scale[0], ' - ', fig_time_scale[-1],
              ', number of points: ', len(fig_time_scale))

        # Data block reading
        if receiver_type == '.jds':
            data = np.fromfile(dat_file,
                               dtype=np.float64,
                               count=(num_frequencies_initial + 4) * 1 *
                               max_shift)  # 2
            data = np.reshape(data,
                              [(num_frequencies_initial + 4), 1 * max_shift],
                              order='F')  # 2
            data = data[:
                        num_frequencies_initial, :]  # To delete the last channels of DSP data where time is stored

        # Cutting the array in predefined frequency range
        if SpecFreqRange == 1:
            data, frequency_list, fi_start, fi_stop = specify_frequency_range(
                data, frequency_list_initial, freqStart, freqStop)
            num_frequencies = len(frequency_list)
        else:
            num_frequencies = num_frequencies_initial

        # Normalization of data
        Normalization_lin(data, num_frequencies, 1 * max_shift)

        nowTime = time.time()
        print('\n  *** Preparation of data took:              ',
              round((nowTime - previousTime), 2), 'seconds ')
        previousTime = nowTime

        if cleaning_Iana > 0:
            data = survey_cleaning(
                data)  # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

        if cleaning > 0:

            # Cleaning vertical and horizontal lines of RFI
            data, mask, cleaned_pixels_num = clean_lines_of_pixels(
                data, no_of_iterations, std_lines_clean, pic_in_line)

            plt.figure(1, figsize=(10.0, 6.0))
            plt.subplots_adjust(left=None,
                                bottom=0,
                                right=None,
                                top=0.86,
                                wspace=None,
                                hspace=None)
            ImA = plt.imshow(mask, aspect='auto', vmin=0, vmax=1, cmap='Greys')
            plt.title('Full log initial data',
                      fontsize=10,
                      fontweight='bold',
                      style='italic',
                      y=1.025)
            plt.ylabel('One dimension', fontsize=10, fontweight='bold')
            plt.xlabel('Second dimensions', fontsize=10, fontweight='bold')
            plt.colorbar()
            plt.yticks(fontsize=8, fontweight='bold')
            plt.xticks(fontsize=8, fontweight='bold')
            pylab.savefig(newpath + '/00_10' + ' fig. ' + str(block + 1) +
                          ' - Result mask after lines cleaning.png',
                          bbox_inches='tight',
                          dpi=300)
            plt.close('all')

            # Cleaning remaining 1 pixel splashes of RFI
            data, mask, cleaned_pixels_num = array_clean_by_STD_value(
                data, std_pixels_clean)

            plt.figure(1, figsize=(10.0, 6.0))
            plt.subplots_adjust(left=None,
                                bottom=0,
                                right=None,
                                top=0.86,
                                wspace=None,
                                hspace=None)
            ImA = plt.imshow(mask, aspect='auto', vmin=0, vmax=1, cmap='Greys')
            plt.title('Full log initial data',
                      fontsize=10,
                      fontweight='bold',
                      style='italic',
                      y=1.025)
            plt.ylabel('One dimension', fontsize=10, fontweight='bold')
            plt.xlabel('Second dimensions', fontsize=10, fontweight='bold')
            plt.colorbar()
            plt.yticks(fontsize=8, fontweight='bold')
            plt.xticks(fontsize=8, fontweight='bold')
            pylab.savefig(newpath + '/00_11' + ' fig. ' + str(block + 1) +
                          ' - Mask after fine STD cleaning.png',
                          bbox_inches='tight',
                          dpi=300)
            plt.close('all')

            nowTime = time.time()
            print('\n  *** Normalization and cleaning took:       ',
                  round((nowTime - previousTime), 2), 'seconds ')
            previousTime = nowTime
        '''
        # Logging the data
        with np.errstate(invalid='ignore'):
            data[:,:] = 10 * np.log10(data[:,:])
        data[np.isnan(data)] = 0

        # Normalizing log data
        data = data - np.mean(data)
        '''

        # Dispersion delay removing
        data_space = np.zeros((num_frequencies, 2 * max_shift))
        data_space[:, max_shift:] = data[:, :]
        temp_array = pulsar_DM_compensation_with_indices_changes(
            data_space, shift_vector)
        del data, data_space

        nowTime = time.time()
        # print('\n  *** Dispersion compensation took:          ', round((nowTime - previousTime), 2), 'seconds ')
        print('\n  *** Dispersion delay removing took:        ',
              round((nowTime - previousTime), 2), 'seconds ')
        previousTime = nowTime

        # Adding the next data block
        buffer_array += temp_array

        # Making and filling the array with fully ready data for plotting and saving to a file
        array_compensated_DM = buffer_array[:, 0:max_shift]

        if block > 0:
            # Saving data with compensated DM to DAT file
            if save_compensated_data > 0 and block > 0:
                temp = array_compensated_DM.transpose().copy(order='C')
                new_data_file = open(new_data_file_name, 'ab')
                new_data_file.write(temp)
                new_data_file.close()

                # Saving time data to ling timeline file
                with open(new_TLfile_name, 'a') as new_TLfile:
                    for i in range(max_shift):
                        new_TLfile.write((fig_date_time_scale[i][:]))  # str

            # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            # Logging the data
            with np.errstate(divide='ignore'):
                array_compensated_DM[:, :] = 10 * np.log10(
                    array_compensated_DM[:, :])
            array_compensated_DM[array_compensated_DM == -np.inf] = 0

            # Normalizing log data
            array_compensated_DM = array_compensated_DM - np.mean(
                array_compensated_DM)
            # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

            # Preparing single averaged data profile for figure
            profile = array_compensated_DM.mean(axis=0)[:]
            profile = profile - np.mean(profile)

            # Save full profile to TXT file
            if save_profile_txt > 0:
                profile_txt_file = open(profile_file_name, 'a')
                for i in range(len(profile)):
                    profile_txt_file.write(str(profile[i]) + ' \n')
                profile_txt_file.close()

            # Averaging of the array with pulses for figure
            averaged_array = average_some_lines_of_array(
                array_compensated_DM, int(num_frequencies / average_const))
            freq_resolution = (df *
                               int(num_frequencies / average_const)) / 1000.
            max_time_shift = max_shift * TimeRes

            # NEW start
            averaged_array = averaged_array - np.mean(averaged_array)
            # NEW stop

            plot_ready_data(profile, averaged_array, frequency_list,
                            num_frequencies, fig_time_scale, newpath, filename,
                            pulsar_name, DM, freq_resolution, TimeRes,
                            max_time_shift, block, num_of_blocks - 1, block,
                            profile_pic_min, profile_pic_max, df_description,
                            colormap, customDPI, currentDate, currentTime,
                            Software_version)

        # Rolling temp_array to put current data first
        buffer_array = np.roll(buffer_array, -max_shift)
        buffer_array[:, max_shift:] = 0

    dat_file.close()

    # Fourier analysis of the obtained time profile of pulses
    if save_profile_txt > 0:
        print('\n\n  *** Making Fourier transform of the time profile...')
        pulsar_pulses_time_profile_FFT(newpath + '/',
                                       filename + '_time_profile.txt',
                                       pulsar_name, TimeRes, profile_pic_min,
                                       profile_pic_max, customDPI, colormap)

    return new_data_file_name
def jds_wf_true_resolution(source_directory, result_directory,
                           no_of_points_for_fft, no_of_bunches_per_file):

    # *** Search JDS files in the source_directory ***
    fileList = find_files_only_in_current_folder(source_directory, '.jds', 1)
    print('')

    if len(
            fileList
    ) > 1:  # Check if files have same parameters if there are more then one file in list
        # Check if all files (except the last) have same size
        same_or_not = check_if_all_files_of_same_size(source_directory,
                                                      fileList, 1)

        # Check if all files in this folder have the same parameters in headers
        equal_or_not = check_if_JDS_files_of_equal_parameters(
            source_directory, fileList)

        if same_or_not and equal_or_not:
            print(
                '\n\n\n        :-)  All files seem to be of the same parameters!  :-) \n\n\n'
            )
        else:
            print(
                '\n\n\n ************************************************************************************* \n *                                                                                   *'
            )
            print(
                ' *   Seems files in folders are different check the errors and restart the script!   *'
            )
            print(
                ' *                                                                                   *  '
                '\n ************************************************************************************* \n\n\n'
            )

        decision = int(
            input(
                '* Enter "1" to start processing, or "0" to stop the script:     '
            ))
        if decision != 1:
            sys.exit('\n\n\n              ***  Program stopped! *** \n\n\n')

    # To print in console the header of first file
    print('\n  First file header parameters: \n')

    # *** Data file header read ***
    [
        df_filename, df_filesize, df_system_name, df_obs_place, df_description,
        CLCfrq, df_creation_timeUTC, Channel, ReceiverMode, Mode, Navr,
        TimeRes, fmin, fmax, df, frequency, FreqPointsNum, data_block_size
    ] = FileHeaderReaderJDS(source_directory + fileList[0], 0, 1)

    if Channel == 0 or Channel == 1:  # Single channel mode
        wf_data_chA = np.empty([0])
    else:
        wf_data_chA = np.empty([0])
        wf_data_chB = np.empty([0])

    # CLCfrq = 80

    # Main loop by files start
    for fileNo in range(len(fileList)):  # loop by files
        print('\n\n  *  File ', str(fileNo + 1), ' of', str(len(fileList)))
        print('  *  File path: ', str(fileList[fileNo]))

        # *** Opening datafile ***
        fname = source_directory + fileList[fileNo]

        # *********************************************************************************

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, Channel, ReceiverMode,
            Mode, Navr, TimeRes, fmin, fmax, df, frequency, FreqPointsNum,
            data_block_size
        ] = FileHeaderReaderJDS(fname, 0, 0)

        # !!! Make automatic calculations of time and frequency resolutions for waveform mode!!!

        # Manually set frequencies for one channel mode

        FreqPointsNum = int(no_of_points_for_fft / 2)
        '''
        if (Channel == 0 and int(CLCfrq/1000000) == 66) or (Channel == 1 and int(CLCfrq/1000000) == 66):
        #    FreqPointsNum = 8192
            frequency = np.linspace(0.0, 33.0, FreqPointsNum)

        # Manually set frequencies for two channels mode
        if Channel == 2 or (Channel == 0 and int(CLCfrq/1000000) == 33) or (Channel == 1 and int(CLCfrq/1000000) == 33):
            #FreqPointsNum = 8192
            frequency = np.linspace(16.5, 33.0, FreqPointsNum)

        # For new receiver (temporary):
        if Channel == 2 and int(CLCfrq/1000000) == 80:
            #FreqPointsNum = 8192
            frequency = np.linspace(0.0, 40.0, FreqPointsNum)
        '''

        # Create long data files and copy first data file header to them
        if fileNo == 0:
            #'''
            with open(fname, 'rb') as file:
                # *** Data file header read ***
                file_header = file.read(1024)

            # *** Creating a name for long timeline TXT file ***
            TLfile_name = df_filename + '_Timeline.txt'
            TLfile = open(
                TLfile_name,
                'w')  # Open and close to delete the file with the same name
            TLfile.close()

            # *** Creating a binary file with data for long data storage ***
            file_data_A_name = df_filename + '_Data_chA.dat'
            file_data_A = open(file_data_A_name, 'wb')
            file_data_A.write(file_header)
            file_data_A.seek(574)  # FFT size place in header
            file_data_A.write(np.int32(no_of_points_for_fft).tobytes())
            file_data_A.seek(624)  # Lb place in header
            file_data_A.write(np.int32(0).tobytes())
            file_data_A.seek(628)  # Hb place in header
            file_data_A.write(np.int32(FreqPointsNum).tobytes())
            file_data_A.seek(632)  # Wb place in header
            file_data_A.write(np.int32(FreqPointsNum).tobytes())
            file_data_A.seek(636)  # Navr place in header
            #file_data_A.write(bytes([np.int32(Navr)]))  # !!! To correct !!!
            #file_data_A.write(np.int32(no_of_points_for_fft/8192).tobytes()) # !!! Check for correctness !!!
            file_data_A.write(
                np.int32(1).tobytes())  # !!! Check for correctness !!!
            file_data_A.close()

            if Channel == 2:
                file_data_B_name = df_filename + '_Data_chB.dat'
                file_data_B = open(file_data_B_name, 'wb')
                file_data_B.write(file_header)
                file_data_B.seek(574)  # FFT size place in header
                file_data_B.write(np.int32(no_of_points_for_fft).tobytes())
                file_data_B.seek(624)  # Lb place in header
                file_data_B.write(np.int32(0).tobytes())
                file_data_B.seek(628)  # Hb place in header
                file_data_B.write(np.int32(FreqPointsNum).tobytes())
                file_data_B.seek(632)  # Wb place in header
                file_data_B.write(np.int32(FreqPointsNum).tobytes())
                file_data_B.seek(636)  # Navr place in header
                #file_data_B.write(bytes([np.int32(Navr)]))  # !!! To correct !!!
                file_data_B.write(np.int32(1).tobytes())
                file_data_B.close()

            del file_header
            #'''

        # Calculation of number of blocks and number of spectra in the file
        if Channel == 0 or Channel == 1:  # Single channel mode
            no_of_spectra_in_bunch = int(
                (df_filesize - 1024) /
                (no_of_bunches_per_file * 2 * data_block_size))
        else:  # Two channels mode
            no_of_spectra_in_bunch = int(
                (df_filesize - 1024) /
                (no_of_bunches_per_file * 4 * data_block_size))

        no_of_blocks_in_file = (df_filesize - 1024) / data_block_size

        fine_CLCfrq = (int(CLCfrq / 1000000.0) * 1000000.0)

        # Real time resolution of averaged spectra
        real_spectra_dt = float(no_of_points_for_fft / fine_CLCfrq)
        real_spectra_df = float((fine_CLCfrq / 2) / (no_of_points_for_fft / 2))

        if fileNo == 0:
            print(' Number of blocks in file:                    ',
                  no_of_blocks_in_file)
            print(' Number of spectra in bunch:                  ',
                  no_of_spectra_in_bunch)
            print(' Number of bunches to read in file:           ',
                  no_of_bunches_per_file)
            print(' Time resolution of calculated spectra:       ',
                  round(real_spectra_dt * 1000, 3), ' ms')
            print(' Frequency resolution of calculated spectra:  ',
                  round(real_spectra_df / 1000, 3), ' kHz')
            print('\n  *** Reading data from file *** \n')

        # *******************************************************************************
        #                           R E A D I N G   D A T A                             *
        # *******************************************************************************

        with open(fname, 'rb') as file:
            file.seek(1024)  # Jumping to 1024 byte from file beginning

            # *** DATA READING process ***

            # Preparing arrays for dynamic spectra
            #dyn_spectra_chA = np.zeros((int(data_block_size/2), no_of_bunches_per_file), float)
            #if Channel == 2:  # Two channels mode
            #    dyn_spectra_chB = np.zeros((int(data_block_size/2), no_of_bunches_per_file), float)

            # !!! Fake timing. Real timing to be done!!!
            TimeFigureScaleFig = np.linspace(0, no_of_bunches_per_file,
                                             no_of_bunches_per_file + 1)
            for i in range(no_of_bunches_per_file):
                TimeFigureScaleFig[i] = str(TimeFigureScaleFig[i])

            time_scale_bunch = []

            #bar = IncrementalBar(' File ' + str(fileNo+1) + ' of ' + str(len(fileList)) + ' reading: ',
            #                     max=no_of_bunches_per_file, suffix='%(percent)d%%')

            for bunch in range(no_of_bunches_per_file):

                #bar.next()

                # Reading and reshaping all data with time data
                if Channel == 0 or Channel == 1:  # Single channel mode
                    wf_data = np.fromfile(file,
                                          dtype='i2',
                                          count=no_of_spectra_in_bunch *
                                          data_block_size)
                    wf_data = np.reshape(
                        wf_data, [data_block_size, no_of_spectra_in_bunch],
                        order='F')
                if Channel == 2:  # Two channels mode
                    wf_data = np.fromfile(file,
                                          dtype='i2',
                                          count=2 * no_of_spectra_in_bunch *
                                          data_block_size)
                    wf_data = np.reshape(
                        wf_data, [data_block_size, 2 * no_of_spectra_in_bunch],
                        order='F')

                # Timing
                timeline_block_str = jds_waveform_time(wf_data, CLCfrq,
                                                       data_block_size)
                if Channel == 2:  # Two channels mode
                    timeline_block_str = timeline_block_str[0:int(
                        len(timeline_block_str) /
                        2)]  # Cut the timeline of second channel
                for i in range(len(timeline_block_str)):
                    time_scale_bunch.append(df_creation_timeUTC[0:10] + ' ' +
                                            timeline_block_str[i])  #  [0:12]

                # deleting the time blocks from waveform data
                real_data_block_size = data_block_size - 4
                wf_data = wf_data[0:real_data_block_size, :]

                # *** !!! Not sure if it is necessary now !!! ***
                # Scaling of the data - seems to be wrong in absolute value
                wf_data = wf_data / 32768  #  .0

                # Separation data into channels
                if Channel == 0 or Channel == 1:  # Single channel mode
                    wf_data_chA = np.append(
                        wf_data_chA,
                        np.reshape(
                            wf_data,
                            [real_data_block_size * no_of_spectra_in_bunch, 1],
                            order='F'))
                    del wf_data  # Deleting unnecessary array name just in case

                if Channel == 2:  # Two channels mode

                    # Separating the data into two channels
                    #print(wf_data.size, wf_data.shape)
                    wf_data = np.reshape(
                        wf_data,
                        [2 * real_data_block_size * no_of_spectra_in_bunch, 1],
                        order='F')
                    wf_data_chA = np.append(wf_data_chA, wf_data[0:(
                        2 * real_data_block_size *
                        no_of_spectra_in_bunch):2])  # Separation to channel A
                    wf_data_chB = np.append(wf_data_chB, wf_data[1:(
                        2 * real_data_block_size *
                        no_of_spectra_in_bunch):2])  # Separation to channel B
                    del wf_data

                no_of_spectra_to_compute = int(
                    np.floor(len(wf_data_chA) / no_of_points_for_fft))

                print(' Bunch # ', bunch + 1,
                      ' Number of true spectra in bunch: ',
                      no_of_spectra_to_compute)

                # Cutting the full array and saving residuals to buffer
                ready_wf_array_chA = wf_data_chA[:no_of_spectra_to_compute *
                                                 no_of_points_for_fft]
                ready_wf_array_chA = np.reshape(
                    ready_wf_array_chA,
                    [no_of_points_for_fft, no_of_spectra_to_compute],
                    order='F')
                wf_data_chA = wf_data_chA[no_of_spectra_to_compute *
                                          no_of_points_for_fft:]
                if Channel == 2:
                    ready_wf_array_chB = wf_data_chB[:
                                                     no_of_spectra_to_compute *
                                                     no_of_points_for_fft]
                    ready_wf_array_chB = np.reshape(
                        ready_wf_array_chB,
                        [no_of_points_for_fft, no_of_spectra_to_compute],
                        order='F')
                    wf_data_chB = wf_data_chB[no_of_spectra_to_compute *
                                              no_of_points_for_fft:]

                # preparing matrices for spectra
                spectra_chA = np.zeros_like(ready_wf_array_chA)
                if Channel == 2:
                    spectra_chB = np.zeros_like(ready_wf_array_chB)

                # Calculation of spectra
                for i in range(no_of_spectra_to_compute):
                    spectra_chA[:, i] = np.power(
                        np.abs(np.fft.fft(ready_wf_array_chA[:, i])), 2)
                    if Channel == 2:  # Two channels mode
                        spectra_chB[:, i] = np.power(
                            np.abs(np.fft.fft(ready_wf_array_chB[:, i])), 2)

                # Storing only first (left) mirror part of spectra
                spectra_chA = spectra_chA[:int(no_of_points_for_fft / 2), :]
                if Channel == 2:
                    spectra_chB = spectra_chB[:int(no_of_points_for_fft /
                                                   2), :]

                # At 33 MHz the specter is usually upside down, to correct it we use flip up/down
                if int(CLCfrq / 1000000) == 33:
                    spectra_chA = np.flipud(spectra_chA)
                    if Channel == 2:
                        spectra_chB = np.flipud(spectra_chB)
                '''
                '''
                # Saving spectra data to dat file
                temp = spectra_chA.transpose().copy(order='C')
                file_data_A = open(file_data_A_name, 'ab')
                file_data_A.write(temp)
                file_data_A.close()
                if Channel == 2:
                    temp = spectra_chB.transpose().copy(order='C')
                    file_data_B = open(file_data_B_name, 'ab')
                    file_data_B.write(temp)
                    file_data_B.close()

                # Saving time data to ling timeline file
                with open(TLfile_name, 'a') as TLfile:
                    for i in range(no_of_spectra_in_bunch):
                        TLfile.write(
                            (str(time_scale_bunch[i][:])) + ' \n')  # str

            #bar.finish()

        file.close()  # Close the data file