Exemplo n.º 1
0
def create_gammatone(filename):
    """
    Create Gammatone filter bank and apply it to a wav file define by filename
    :param filename: wav file
    :return:
    """
    if not os.path.exists(filename.split('.wav')[0]):
        filename = os.path.join(PATH_DATA, filename)
        fs, signal = scipy.io.wavfile.read(
            filename,
            "wb",
        )

        signal1 = signal[:, 0]
        signal2 = signal[:, 1]

        signal1 = butter_lowpass_filter(signal1, 1000, fs)
        signal2 = butter_lowpass_filter(signal2, 1000, fs)

        gamma_sig1 = ToolGammatoneFb(signal1, fs, iNumBands=NUM_BANDS)
        gamma_sig2 = ToolGammatoneFb(signal2, fs, iNumBands=NUM_BANDS)

        gammatone = np.stack((gamma_sig1, gamma_sig2), axis=-1)

        pickle_filename = filename.split('.wav')[0]
        pickle.dump(gammatone, open(pickle_filename, "wb"))

    return True
Exemplo n.º 2
0
def method_Def(Amplit, SamplingFreq, ax, Tstamp):

    High = 10e6

    Time = 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    Amplit = utils.butter_lowpass_filter(Amplit, High, SamplingFreq, order=1)

    Interval = 1.78e-6
    mpd = np.int(Interval * SamplingFreq)

    indexes = utils.detect_peaks(Amplit, mpd=mpd)
    Diff = np.int((Interval / 2) * SamplingFreq)

    Amplit_p = Amplit[indexes] - Amplit[indexes - Diff]
    Time_p = Time[indexes]

    return [Time_p, Amplit_p]
Exemplo n.º 3
0
def methodC(Amplit, SamplingFreq, ax, Tstamp):
    Low = 5e3
    Dec = 100

    Time = 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    Amplit = utils.butter_lowpass_filter(Amplit, Low, SamplingFreq, order=1)

    Time_p = Time[::Dec]
    Amplit_p = Amplit[::Dec]

    #Idx = np.where(Amplit_p == np.max(Amplit_p))
    #print(Time[Idx])
    #Time_p = Time_p - Time_p[Idx]
    #Time_p = Time_p - 0.155616

    #ax.plot(Time_p,Amplit_p)

    return Time_p, Amplit_p
Exemplo n.º 4
0
def methodA(Amplit, SamplingFreq, ax, Tstamp):

    Low = 30e6
    Amplit = utils.butter_lowpass_filter(Amplit, Low, SamplingFreq, order=1)

    Time = 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)

    mpd = np.int(200e-9 * SamplingFreq)
    indexes = utils.detect_peaks(Amplit, mpd=mpd)

    # IntegralAround = np.int(1.13e-6 * SamplingFreq)
    IntegralAround = np.int(150e-9 * SamplingFreq)  # Integrals of 200ns
    indexes = indexes[10:len(indexes) - 10]

    Amplit_p = []
    Baseline = []
    Time_p = Time[indexes]

    for i in indexes:
        Baseline_tmp = Amplit[i - IntegralAround]
        #Baseline.append(Baseline_tmp)
        Amplit_p.append(
            np.sum(Amplit[i - IntegralAround:i + IntegralAround] -
                   Baseline_tmp))
        #Baseline.append(np.sum(Amplit[i - (3 * IntegralAround):i - (1*IntegralAround)]))

    Vector = np.asarray(Amplit_p)  #-np.asarray(Baseline)

    plt.plot(Time, Amplit)
    plt.plot(Time[indexes], Amplit[indexes], '.r')
    plt.plot(Time[indexes - IntegralAround], Amplit[indexes - IntegralAround],
             '.b')
    plt.plot(Time[indexes], Amplit[indexes] - Amplit[indexes - IntegralAround],
             '.k')

    # plt.plot(Time_p,Baseline)
    # plt.plot(Time_p,Vector)
    plt.show()

    #Amplit_p = np.asarray(Amplit_p) - np.asarray(Baseline)

    return Time_p, Amplit_p
Exemplo n.º 5
0
def methodD(Amplit, SamplingFreq, ax, Tstamp):
    Low = 10e6

    Time = 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    Amplit = utils.butter_lowpass_filter(Amplit, Low, SamplingFreq, order=1)

    mpd = np.int(100e-9 * SamplingFreq)
    Amplit = Amplit / np.max(Amplit)

    prominence = 0.01
    maxtab, mintab = utils.peakdet(Amplit, prominence)  #, mpd=mpd)

    ax.plot(1e3 * maxtab[:, 0] / SamplingFreq, maxtab[:, 1], '.b')
    ax.plot(1e3 * mintab[:, 0] / SamplingFreq, mintab[:, 1], '.r')

    #ax.plot(Time, Amplit, 'b')
    #ax.plot(Time[Idx_Top], Amplit[Idx_Top], '.r')
    #ax.plot(Time[Idx_Bot], Amplit[Idx_Bot], '.g')

    Top = [maxtab[:, 0], maxtab[:, 1]]
    Bot = [mintab[:, 0], mintab[:, 1]]
    Bot_R = utils.resample(Bot, Top)

    Amplit_p = Top[1] - Bot_R[1]
    Time_p = Top[0] * 1e3 / SamplingFreq

    Averaging_Window = 7
    Amplit_p = np.convolve(Amplit_p,
                           np.ones((Averaging_Window, )) / Averaging_Window,
                           mode='valid')
    Time_p = np.convolve(Time_p,
                         np.ones((Averaging_Window, )) / Averaging_Window,
                         mode='valid')

    ax.plot(Time_p, Amplit_p, 'k')
    #Time_p = 0
    #Amplit_p = 0

    return Time_p, Amplit_p
Exemplo n.º 6
0
def methodA2(Amplit, SamplingFreq, ax, Tstamp):

    Low = 20e6
    Amplit = utils.butter_lowpass_filter(Amplit, Low, SamplingFreq, order=1)
    Time = 1e3 * (np.arange(0, Amplit.size, 1) / SamplingFreq)
    Th = np.max(Amplit) / 2

    mask1 = (Amplit[:-1] < Th) & (Amplit[1:] > Th)
    Idx = np.flatnonzero(mask1) + 1
    Offset = np.int(len(Idx) / 3)
    Idx = Idx[Offset:-Offset]  # We take only central third

    mpd = np.min(np.diff(Idx))
    IntegralAround = np.int(mpd / 2)

    indexes = utils.detect_peaks(Amplit, mpd=mpd)
    indexes = indexes[10:len(indexes) - 10]

    Amplit_p = []
    Baseline = []
    Time_p = []

    for i in indexes:
        Baseline_tmp = Amplit[i - IntegralAround]
        if Baseline_tmp < Amplit[i]:
            Amplit_p.append(
                np.sum(Amplit[i - IntegralAround:i + IntegralAround] -
                       Baseline_tmp))
            Time_p.append(Time[i])

    # plt.plot(Time,Amplit,'b')
    # plt.plot(Time[indexes],Amplit[indexes],'.r')
    # plt.plot(Time[indexes-IntegralAround],Amplit[indexes-IntegralAround],'.g')
    # plt.plot(Time[indexes], Amplit[indexes]-Amplit[indexes - IntegralAround],'k')
    # plt.show()
    return Time_p, Amplit_p
Exemplo n.º 7
0
    def __data_generation(self, list_IDs_temp):
        'Generates data containing batch_size samples'  # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.empty((self.batch_size, *self.dim, self.n_channels), dtype=np.float)
        y = np.empty(self.batch_size, dtype=np.float32)

        # Generate data
        for i, ID in enumerate(list_IDs_temp):

            if self.features == 'gammatone':
                filename = os.path.join(self.path_data, ID.split('.wav')[0])

                input_x = pickle.load(open(filename, 'rb'))
                input_x = input_x.reshape(input_x.shape[1], input_x.shape[0])
                input_x = np.expand_dims(input_x, axis=-1)

            else:
                filename = os.path.join(self.path_data, ID)

                fs, signal = wavfile.read(filename)
                signal = normalize_audio(signal)
                signal1 = signal[:, 0]
                signal2 = signal[:, 1]

                if self.resampling:
                    nb_samples = self.resampling
                    signal1 = np.array(scipy.signal.resample(signal1, nb_samples), dtype=np.int16)
                    signal2 = np.array(scipy.signal.resample(signal2, nb_samples), dtype=np.int16)
                    fs = nb_samples

                if self.features == 'gcc-phat':
                    window_hanning = np.hanning(len(signal1))
                    delay, gcc = gcc_phat(signal1 * window_hanning, signal2 * window_hanning, fs, self.max_tau)
                    input_x = np.expand_dims(gcc, axis=-1)

                elif self.features == 'melspec':
                    input_1 = librosa.feature.melspectrogram(signal1.astype(float), fs)
                    input_2 = librosa.feature.melspectrogram(signal2.astype(float), fs)
                    input_x = np.stack((input_1, input_2), axis=-1)

                elif self.features == 'mfcc':
                    input_1 = librosa.feature.mfcc(signal1.astype(float), fs, n_mfcc=60)
                    input_2 = librosa.feature.mfcc(signal2.astype(float), fs, n_mfcc=60)

                    input_x = np.hstack((input_1, input_2))
                    input_x = np.expand_dims(input_x, axis=-1)

                elif self.features == 'gammagram':
                    filename = os.path.join(self.path_data, ID.split('.wav')[0])

                    if os.path.exists(filename):
                        input_x = pickle.load(open(filename, 'rb'))

                    else:

                        fft_gram1, fft_gram2 = get_fft_gram(signal, fs)
                        input_x = np.stack((fft_gram1, fft_gram2), axis=-1)
                        pickle.dump(input_x, open(filename, "wb"))

                else:
                    signal1 = butter_lowpass_filter(signal1, 1000, fs)
                    signal2 = butter_lowpass_filter(signal2, 1000, fs)

                    input_x = np.stack((signal1, signal2), axis=-1)

            # Store sample
            X[i, ] = input_x

            # Store class
            y[i] = self.labels[ID]

        if self.reg:
            return X,  y
        else:
            return X,  tf.keras.utils.to_categorical(y, num_classes=self.n_classes)
Exemplo n.º 8
0
def gsr_preprocessing(signals):
    ''' Preprocessing for GSR signals '''
    der_signals = np.gradient(signals)
    con_signals = 1.0 / signals
    nor_con_signals = (con_signals -
                       np.mean(con_signals)) / np.std(con_signals)

    mean = np.mean(signals)
    der_mean = np.mean(der_signals)
    neg_der_mean = np.mean(der_signals[der_signals < 0])
    neg_der_pro = float(der_signals[der_signals < 0].size) / float(
        der_signals.size)

    local_min = 0
    for i in range(signals.shape[0] - 1):
        if i == 0:
            continue
        if signals[i - 1] > signals[i] and signals[i] < signals[i + 1]:
            local_min += 1

    # Using SC calculates rising time
    det_nor_signals, trend = detrend(nor_con_signals)
    lp_det_nor_signals = butter_lowpass_filter(det_nor_signals, 0.5, 128.)
    der_lp_det_nor_signals = np.gradient(lp_det_nor_signals)

    rising_time = 0
    rising_cnt = 0
    for i in range(der_lp_det_nor_signals.size - 1):
        if der_lp_det_nor_signals[i] > 0:
            rising_time += 1
            if der_lp_det_nor_signals[i + 1] < 0:
                rising_cnt += 1

    avg_rising_time = rising_time * (1. / 128.) / rising_cnt

    freqs, power = getfreqs_power(signals,
                                  fs=128.,
                                  nperseg=signals.size,
                                  scaling='spectrum')
    power_0_24 = []
    for i in range(21):
        power_0_24.append(
            getBand_Power(freqs,
                          power,
                          lower=0 + (i * 0.8 / 7),
                          upper=0.1 + (i * 0.8 / 7)))

    SCSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.2, 128.))
    SCVSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.08, 128.))

    zero_cross_SCSR = 0
    zero_cross_SCVSR = 0
    peaks_cnt_SCSR = 0
    peaks_cnt_SCVSR = 0
    peaks_value_SCSR = 0.
    peaks_value_SCVSR = 0.

    zc_idx_SCSR = np.array([], int)  # must be int, otherwise it will be float
    zc_idx_SCVSR = np.array([], int)
    for i in range(nor_con_signals.size - 1):
        if SCSR[i] * next((j for j in SCSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCSR += 1
            zc_idx_SCSR = np.append(zc_idx_SCSR, i + 1)
        if SCVSR[i] * next((j for j in SCVSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCVSR += 1
            zc_idx_SCVSR = np.append(zc_idx_SCVSR, i)

    for i in range(zc_idx_SCSR.size - 1):
        peaks_value_SCSR += np.absolute(
            SCSR[zc_idx_SCSR[i]:zc_idx_SCSR[i + 1]]).max()
        peaks_cnt_SCSR += 1
    for i in range(zc_idx_SCVSR.size - 1):
        peaks_value_SCVSR += np.absolute(
            SCVSR[zc_idx_SCVSR[i]:zc_idx_SCVSR[i + 1]]).max()
        peaks_cnt_SCVSR += 1

    zcr_SCSR = zero_cross_SCSR / (nor_con_signals.size / 128.)
    zcr_SCVSR = zero_cross_SCVSR / (nor_con_signals.size / 128.)

    mean_peak_SCSR = peaks_value_SCSR / peaks_cnt_SCSR if peaks_cnt_SCSR != 0 else 0
    mean_peak_SCVSR = peaks_value_SCVSR / peaks_cnt_SCVSR if peaks_value_SCVSR != 0 else 0

    features = [mean, der_mean, neg_der_mean, neg_der_pro, local_min, avg_rising_time] + \
        power_0_24 + [zcr_SCSR, zcr_SCVSR, mean_peak_SCSR, mean_peak_SCVSR]

    return features
Exemplo n.º 9
0
def process_position(data,
                     configuration,
                     sampling_frequency,
                     StartTime,
                     showplot=False,
                     filename=None,
                     return_processing=False,
                     camelback_threshold_on=True,
                     INOUT='IN'):
    """
    Processing of the angular position based on the raw data of the OPS
    Credits : Jose Luis Sirvent (BE-BI-PM, CERN)
    """
    if configuration != None:
        # Recuperation of processing parameters:
        #config = configparser.RawConfigParser()
        #config.read(parameter_file)
        SlitsperTurn = configuration.ops_slits_per_turn  #eval(config.get('OPS processing parameters', 'slits_per_turn'))
        rdcp = configuration.ops_relative_distance_correction_prameters  #eval(config.get('OPS processing parameters', 'relative_distance_correction_prameters'))
        prominence = configuration.ops_prominence  #eval(config.get('OPS processing parameters', 'prominence'))
        camelback_threshold = configuration.ops_camelback_threshold  #eval(config.get('OPS processing parameters', 'camelback_threshold'))
        OPS_processing_filter_freq = configuration.ops_low_pass_filter_freq  #eval(config.get('OPS processing parameters', 'OPS_processing_filter_freq'))
        centroids = configuration.ops_centroids

        References_Timming = [
            configuration.def_ops_in_ref, configuration.def_ops_out_ref
        ]  #eval(config.get('OPS processing parameters', 'References_Timming'))
        AngularIncrement = 2 * np.pi / SlitsperTurn

        if INOUT == 'OUT':
            data = np.flip(data, 0)

        threshold_reference = np.amax(
            data) - camelback_threshold * np.mean(data)

        if camelback_threshold_on is True:
            data[np.where(data > threshold_reference)] = threshold_reference

        max_data = np.amax(data)
        min_data = np.amin(data)

        data = utils.butter_lowpass_filter(data,
                                           OPS_processing_filter_freq,
                                           sampling_frequency,
                                           order=5)

        data = data - min_data
        data = data / max_data

        # *** This is the part that takes most of the time!
        # Method A:
        # ---------
        # maxtab, mintab = utils.peakdet(data, prominence)
        # false = np.where(mintab[:, 1] > np.mean(maxtab[:, 1]))
        # mintab = np.delete(mintab, false, 0)
        #
        # locs_up = np.array(maxtab)[:, 0]
        # pck_up = np.array(maxtab)[:, 1]
        #
        # locs_dwn = np.array(mintab)[:, 0]
        # pck_dwn = np.array(mintab)[:, 1]

        # Method B: Seems Faster
        # ----------------------
        locs_up, _ = find_peaks(data, prominence=prominence)
        locs_dwn, _ = find_peaks(-data + 1, prominence=prominence)
        pck_up = data[locs_up]
        pck_dwn = data[locs_dwn]

        todelete = np.where(pck_dwn > np.mean(pck_up))
        locs_dwn = np.delete(locs_dwn, todelete, 0)
        pck_dwn = np.delete(pck_dwn, todelete, 0)

        LengthMin = np.minimum(pck_up.size, pck_dwn.size)

        # Crosing psotion evaluation
        Crosingpos = np.ones((2, LengthMin))
        Crosingpos[1][:] = np.arange(1, LengthMin + 1)

        if centroids == True:
            # ==========================================================================
            # Position processing based on centroids
            # ==========================================================================
            Crosingpos[0][:] = locs_dwn[0:LengthMin]
            A = np.ones(LengthMin)
        else:
            # ==========================================================================
            # Position processing based on crossing points: Rising edges only
            # ==========================================================================
            IndexDwn = 0
            IndexUp = 0
            A = []

            # Position calculation loop:
            for i in range(0, LengthMin - 1):

                # Ensure crossing point in rising edge (locs_dwn < locs_up)
                while locs_dwn[IndexDwn] >= locs_up[IndexUp]:
                    IndexUp += 1

                while locs_dwn[IndexDwn + 1] < locs_up[IndexUp]:
                    IndexDwn += 1

                # Calculate thresshold for current window: Mean point
                Threshold = (data[int(locs_dwn[IndexDwn])] +
                             data[int(locs_up[IndexUp])]) / 2
                # Find time on crossing point:
                b = int(locs_dwn[IndexDwn]) + np.where(
                    data[int(locs_dwn[IndexDwn]):int(locs_up[IndexUp])] >=
                    Threshold)[0][0]
                idx_n = np.where(
                    data[int(locs_dwn[IndexDwn]):int(locs_up[IndexUp])] <
                    Threshold)[0]
                idx_n = idx_n[::-1][0]
                a = int(locs_dwn[IndexDwn]) + idx_n

                Crosingpos[0, i] = (Threshold - data[int(a)]) * (b - a) / (
                    data[int(b)] - data[int(a)]) + a

                # if showplot is True or showplot is 1:
                A = np.append(A, Threshold)

                # Move to next window:
                IndexDwn = IndexDwn + 1
                IndexUp = IndexUp + 1

        # ==========================================================================
        # Position loss compensation
        # ==========================================================================
        # Un-corrected position and time
        Data_Time = Crosingpos[0][:] * 1 / sampling_frequency
        Data_Pos = Crosingpos[1][:] * AngularIncrement
        # Relative-distances method for slit-loss compensation:
        Distances = np.diff(Crosingpos[0][0:Crosingpos.size - 1])

        # Method 2: Considering average of several previous periods
        previous_periods = 4
        cnt = 0
        DistancesAVG = []

        for i in range(previous_periods, len(Distances)):
            DistancesAVG.append(np.mean(Distances[i - previous_periods:i]))

        RelDistr = np.divide(Distances[previous_periods:len(Distances)],
                             DistancesAVG)

        # Method 1: Only consider previous transition
        #RelDistr = np.divide(Distances[1:Distances.size], Distances[0:Distances.size - 1])

        # Search of compensation points:
        PointsCompensation = np.where(RelDistr >= rdcp[0])[0]

        for b in np.arange(0, PointsCompensation.size):

            if RelDistr[PointsCompensation[b]] >= rdcp[2]:
                # These are the references (metallic disk) or 3 slit loses
                Data_Pos[(
                    PointsCompensation[b] + 1 +
                    previous_periods):Data_Pos.size] = Data_Pos[(
                        PointsCompensation[b] + 1 +
                        previous_periods):Data_Pos.size] + 3 * AngularIncrement

            elif RelDistr[PointsCompensation[b]] >= rdcp[1]:
                # These are 2 slit loses
                Data_Pos[(
                    PointsCompensation[b] + 1 +
                    previous_periods):Data_Pos.size] = Data_Pos[(
                        PointsCompensation[b] + 1 +
                        previous_periods):Data_Pos.size] + 2 * AngularIncrement

            elif RelDistr[PointsCompensation[b]] >= rdcp[0]:
                # These are 1 slit losses
                Data_Pos[(
                    PointsCompensation[b] + 1 +
                    previous_periods):Data_Pos.size] = Data_Pos[(
                        PointsCompensation[b] + 1 +
                        previous_periods):Data_Pos.size] + 1 * AngularIncrement

        # ==========================================================================
        # Alignment to First reference and Storage
        # ==========================================================================

        if StartTime > References_Timming[0] / 1000:
            # This is the OUT
            Rtiming = References_Timming[1]
            Offset = np.where(
                ((StartTime + len(data) / sampling_frequency) -
                 Data_Time[0:Data_Time.size - 1]) < (Rtiming / 1000))[0][0]
        else:
            # This is the IN
            Rtiming = References_Timming[0]
            Offset = np.where(Data_Time[0:Data_Time.size - 1] +
                              StartTime > (Rtiming / 1000))[0][0]

        try:
            _IndexRef1 = Offset + np.where(
                RelDistr[Offset:LengthMin - Offset] > rdcp[1])[0]
            IndexRef1 = _IndexRef1[0]
            Data_Pos = Data_Pos - Data_Pos[IndexRef1]
        except:
            IndexRef1 = 0
            print('Disk Reference not found!')

        Data = np.ndarray((2, Data_Pos.size - 1))

        if INOUT == 'OUT':
            Data[0] = 1e3 * ((StartTime + len(data) / sampling_frequency) -
                             Data_Time[0:Data_Time.size - 1])
        else:
            Data[0] = 1e3 * (Data_Time[0:Data_Time.size - 1] + StartTime)

        Data[1] = Data_Pos[0:Data_Pos.size - 1]

        # ==========================================================================
        # Plotting script
        # ==========================================================================
        # if showplot is True or showplot is 1:
        #     fig = plt.figure(figsize=(11, 5))
        #     ax1 = fig.add_subplot(111)
        #     mplt.make_it_nice(ax1)
        #     plt.axhspan(0, threshold_reference / max_data, color='black', alpha=0.1)
        #     plt.axvspan(1e3 * StartTime + 1e3 * (data.size * 1 / 4) / sampling_frequency,
        #                 1e3 * StartTime + 1e3 * (data.size * 3 / 4) / sampling_frequency, color='black', alpha=0.1)
        #     plt.plot(1e3 * StartTime + 1e3 * np.arange(0, data.size) * 1 / sampling_frequency, data, linewidth=0.5)
        #     plt.plot(1e3 * StartTime + 1e3 * locs_up * 1 / sampling_frequency, pck_up, '.', MarkerSize=1.5)
        #     plt.plot(1e3 * StartTime + 1e3 * locs_dwn * 1 / sampling_frequency, pck_dwn, '.', MarkerSize=1.5)
        #     plt.plot(1e3 * StartTime + 1e3 * Crosingpos[0][0:A.size] * 1 / sampling_frequency, A, linewidth=0.5)
        #     ax1.set_title('Optical position sensor processing', loc='left')
        #     ax1.set_xlabel('Time (um)')
        #     ax1.set_ylabel('Normalized amplitude of signal (A.U.)')
        #     plt.show(block=False)
        # # plt.plot(1e3*StartTime+1e3*IndexRef1*1/sampling_frequency + StartTime, data[IndexRef1], 'x')
        # #        plt.plot(1e3*StartTime+1e3*np.arange(1,Distances.size)*1/sampling_frequency + StartTime, RelDistr, '.')

        if return_processing is True:
            if INOUT == 'OUT':
                return [
                    0, 0, 1e3 * (StartTime + len(data) / sampling_frequency) -
                    1e3 * locs_up * 1 / sampling_frequency, pck_up,
                    1e3 * (StartTime + len(data) / sampling_frequency) -
                    1e3 * locs_dwn * 1 / sampling_frequency, pck_dwn,
                    1e3 * (StartTime + len(data) / sampling_frequency) -
                    1e3 * Crosingpos[0][0:A.size] * 1 / sampling_frequency, A,
                    threshold_reference / max_data,
                    1e3 * (StartTime + len(data) / sampling_frequency) -
                    1e3 * Crosingpos[0][IndexRef1] * (1 / sampling_frequency),
                    Data
                ]
            else:
                return [
                    0, 0,
                    1e3 * StartTime + 1e3 * locs_up * 1 / sampling_frequency,
                    pck_up,
                    1e3 * StartTime + 1e3 * locs_dwn * 1 / sampling_frequency,
                    pck_dwn, 1e3 * StartTime +
                    1e3 * Crosingpos[0][0:A.size] * 1 / sampling_frequency, A,
                    threshold_reference / max_data, 1e3 * StartTime +
                    1e3 * Crosingpos[0][IndexRef1] * (1 / sampling_frequency),
                    Data
                ]

        else:
            return Data