Example #1
0
def signals_generator(noises, noise_mean, sufix=''):
    dimension = 7
    amplitude_mean = 30  # Exponential signal mean
    number_of_events = len(noises)
    number_of_jitter_info = dimension + 1
    print(
        f'Hybrid Signal Generator amp{amplitude_mean} using mean: {noise_mean}{sufix}'
    )

    folder_name = f'results/hybrid/amplitude_mean{amplitude_mean}/base_data/mu{noise_mean}'

    A = np.zeros(number_of_events)  # Amplitude
    jitters = np.zeros((number_of_events, number_of_jitter_info))
    data = np.zeros((number_of_events, dimension))
    for i in range(0, number_of_events):
        A[i] = np.random.exponential(
            amplitude_mean)  # Simulating true Amplitude
        jitter_pulse, jitter = pulse_helper.get_jitter_pulse()
        data[i, :] = noises.values[i][:] + np.multiply(A[i], jitter_pulse)

        jitters[i][0] = int(jitter)
        jitters[i][1:number_of_jitter_info] = jitter_pulse

    file_helper.save_file_in(f'tile_signal{sufix}', folder_name, data)
    file_helper.save_file_in(f'tile_A{sufix}', folder_name, A)
    _save_jitter_file(f'jitter{sufix}', folder_name, jitters)
def _apply_pileup_indexes_when_tilecal(i, pu_indexes, x):
    pu = np.multiply(_pileup(), pulse_helper.get_jitter_pulse())
    if pu_indexes[i] < 4:
        for j in range(pu_indexes[i] - 2, 3):
            x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]

    elif pu_indexes[i] > (number_of_data - 3):
        for j in range(-4, number_of_data - pu_indexes[i]):
            x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]
    else:
        for j in range(-4, 3):
            x[pu_indexes[i] + j] = x[pu_indexes[i] + j] + pu[j + 4]
    return x
Example #3
0
def pu_generator(number_of_events, signal_probabilities, pedestal, is_noise=False):
    number_of_data = TILECAL_NUMBER_OF_CHANNELS * number_of_events
    base_folder = 'results/simulated/pileup_data'

    for level in range(0, len(signal_probabilities)):
        signal_probability = signal_probabilities[level]  # Signal_probability
        signal_probability_percentage = signal_probability * 100
        signal_mean = 300  # Exponential signal mean

        print(f'PU Generator - Processing signal probability:  {signal_probability_percentage}%\n')

        x = _base_data(number_of_data, pedestal)
        pu_indexes = _pileup_indexes(signal_probability, number_of_data)

        if signal_probability > 0:
            for i in range(0, int(signal_probability * number_of_data)):
                x = _apply_pileup_indexes_when_tilecal(i, pu_indexes, x)

        # Formatting data
        data = np.reshape(x, (TILECAL_NUMBER_OF_CHANNELS, number_of_events))
        data = np.transpose(data)

        # Stores Noise Data
        folder_name = f'{base_folder}/prob_{signal_probability_percentage}'
        base_file_name = f'noise_prob_{signal_probability_percentage}'
        file_helper.save_file('tile_' + base_file_name, folder_name, data)

        # Stores mixed signal and amplitude
        folder_name = f'{base_folder}/prob_{signal_probability_percentage}'
        base_file_name = f'signal_prob_{signal_probability_percentage}'
        A = np.zeros(number_of_events)  # Amplitude
        for i in range(0, number_of_events):
            A[i] = np.random.exponential(signal_mean)  # Simulating true Amplitude
            data[i, :] = data[i, :] + np.multiply(A[i], pulse_helper.get_jitter_pulse())

        file_helper.save_file('tile_' + base_file_name, folder_name, data)
        file_helper.save_file('tile_A_' + base_file_name, folder_name, A)
        level += 1
                               [
                                   -0.0046605, 1.2195139, 0.0973187, 1.4662556,
                                   -1.0468390, 0.7239156, 0.5695964
                               ],
                               [
                                   0.7419353, -0.1169670, 0.8664425, 0.4642078,
                                   0.2907896, -1.3489066, 0.3598890
                               ]])

    amplitude = np.zeros(qtd_for_testing)
    signal_testing = np.zeros((qtd_for_testing, dimension))

    for i in range(0, qtd_for_testing):
        amplitude[i] = TEN_BITS_ADC_VALUE * np.random.random(1)
        signal_testing[i, :] = pedestal + np.random.randn(1, dimension) + \
            np.multiply(amplitude[i], pulse_helper.get_jitter_pulse())

    amplitude = [
        775.902, 10.351, 771.456, 1019.714, 512.243, 396.846, 65.751, 68.398,
        439.107, 913.696
    ]

    signal_testing = np.matrix(
        [[
            0.236776, 11.798491, 352.442790, 777.724908, 436.837438,
            115.284537, 31.993758
        ],
         [
             -0.868811, -0.044646, 3.415924, 9.414231, 6.257210, 2.728463,
             -0.782330
         ],
def mf_calculation(number_of_data, pedestal, probs, training_percentage=50):
    TEN_BITS_ADC_VALUE = 1023
    # base_folder = 'results/simulated/pileup_data'  # Normal data
    base_folder = 'results/simulated/base_data'  # Pileup data
    dimension = 7
    qtd_for_training = int(number_of_data / ((100 / training_percentage)))
    qtd_for_testing = number_of_data - qtd_for_training

    # For printing and files, probability must be in %.
    probs = np.array(probs) * 100
    for prob in probs:
        prob = prob
        print(f'MF - Processing signal probability:  {prob}%\n')
        # Normal data
        # amplitude_file_name = f'{base_folder}/{qtd_for_training}_events/amplitude.txt'
        # signal_testing_file_name = f'{base_folder}/{qtd_for_training}_events/signal_testing.txt'

        # Pileup data
        amplitude_file_name = f'{base_folder}/prob_{prob}/{qtd_for_training}_events/tile_A_signal_prob_{prob}.txt'
        signal_testing_file_name = f'{base_folder}/prob_{prob}/{qtd_for_training}_events/tile_signal_prob_{prob}.txt'
        result_prefix = f'pileup_prob_{prob}_'

        amplitude = pd.read_csv(amplitude_file_name, sep=" ", header=None)
        signal_testing = pd.read_csv(signal_testing_file_name, sep=" ", header=None)

        noise = pd.DataFrame(pedestal + np.random.randn(number_of_data, dimension))
        # Getting data from boundaries
        noise_training = noise[:qtd_for_training][:]
        noise_testing = noise[qtd_for_testing:][:]

        # Branqueamento
        noise_train_cov = noise_training.cov()

        [D, V] = LA.eigh(noise_train_cov)

        # Apply before diag to avoid inf value due to the 0 negative exponentiation
        D = D**(-.5)

        # eig returns D as an array, we need to transform it into a diagonal matrix
        D = pd.DataFrame(np.diag(D))
        V = pd.DataFrame(V)

        W = pd.DataFrame(D.dot(V.transpose()))

        W_t = W.transpose()

        # PCA Part
        pure_signal = np.zeros((qtd_for_testing, dimension))
        for i in range(0, qtd_for_testing):
            pure_signal[i, :] = TEN_BITS_ADC_VALUE * np.random.randn(1) * pulse_helper.get_jitter_pulse()

        pure_signal = pd.DataFrame(pure_signal)

        n_pca_components = dimension
        pca = PCA(n_components=n_pca_components)
        coeff = pd.DataFrame(pca.fit(pure_signal.dot(W_t)).components_)
        coeff_t = coeff.transpose()
        Y = pca.explained_variance_.T

        # stochastic filter params
        # ddof=1 to use Sampled data variance -> N-1
        variance = noise_training[:][3].var()
        reference_pulse = pd.DataFrame([0.0000, 0.0172, 0.4524, 1.0000, 0.5633, 0.1493, 0.0424])
        bleached_reference_pulse = reference_pulse.T.dot(W_t)

        optimal_reference_pulse = bleached_reference_pulse.dot(coeff_t[:][:n_pca_components])

        optimal_noise = ((noise_testing - pedestal).dot(W_t)).dot(coeff_t[:][:n_pca_components])
        optimal_signal = ((signal_testing - pedestal).dot(W_t)).dot(coeff_t[:][:n_pca_components])

        No = variance * 2
        h1 = np.zeros((dimension, dimension))
        h2 = np.zeros((dimension, dimension))

        for i in range(0, n_pca_components):
            h1 = h1 + (Y[i] / (Y[i] + variance)) * (coeff_t[:][i].values.reshape(1, dimension) * coeff_t[:][i].values.reshape(dimension, 1))
            h2 = h2 + (1.0 / (Y[i] + variance)) * (coeff_t[:][i].values.reshape(1, dimension) * coeff_t[:][i].values.reshape(dimension, 1))

        IR_noise = np.zeros((len(noise_testing), 1))
        IR_signal = np.zeros((len(signal_testing), 1))

        for ev in range(0, len(noise_testing)):
            IR_noise[ev] = (1.0 / No) * ((
                        (optimal_noise.values[ev][:].dot((coeff[:][:n_pca_components])))
                        .dot(h1).dot(
                            (optimal_noise.values[ev][:].dot(coeff[:][:n_pca_components]))
                        ).transpose()
                    ).transpose())

        for ev in range(0, len(signal_testing)):
            IR_signal[ev] = (1.0 / No) * ((
                        (optimal_signal.values[ev][:].dot((coeff[:][:n_pca_components])))
                        .dot(h1).dot(
                            (optimal_signal.values[ev][:].dot(coeff[:][:n_pca_components]))
                        ).transpose()
                    ).transpose())

        ID_noise = np.zeros((len(noise_testing), 1))
        ID_signal = np.zeros((len(signal_testing), 1))
        for ev in range(0, len(noise_testing)):
            ID_noise[ev] = ((optimal_reference_pulse.dot(coeff[:][:n_pca_components]))
                            .dot(h2).dot(
                                (optimal_noise.values[ev][:].dot(coeff[:][:n_pca_components]))
                                .transpose()
                                )
                            )

        for ev in range(0, len(signal_testing)):
            ID_signal[ev] = ((optimal_reference_pulse.dot(coeff[:][:n_pca_components]))
                             .dot(h2).dot(
                                (optimal_signal.values[ev][:].dot(coeff[:][:n_pca_components]))
                                .transpose()
                                )
                             )
        # Matched Filter estimatives
        estimated_noise = ID_noise + IR_noise
        estimated_signal = ID_signal + IR_signal
        print('Almost...\n')

        # Amplitude estimative
        b1 = coeff[:][:n_pca_components].transpose().dot(coeff_t[:][:n_pca_components])
        # DAQUI PARA BAIXO B2 E B3 NAO BATEM DEVIDO A ALGUMAS LINHAS DE COEFF
        b2 = (1.0 / No) * (
            coeff_t[:][:n_pca_components].transpose().dot(h1)
            .dot(coeff[:][:n_pca_components])
        )
        b3 = (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h2).dot(coeff[:][:n_pca_components])

        amp_noise = np.zeros((len(noise_testing), 1))
        amp_signal = np.zeros((len(signal_testing), 1))

        a = (1.0 / No) * (
                    (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h1)
                    .dot((optimal_reference_pulse.dot(coeff[:][:n_pca_components])).transpose())
                )
        b = (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h2).dot((optimal_reference_pulse.dot(coeff[:][:n_pca_components])).transpose())

        cs = 0
        cr = 0
        for i in range(0, len(signal_testing)):
            ra = b * b + 4 * a * estimated_signal[i]
            if ra.values < 0:
                ra = 0
                cs = cs + 1
            # signal amplitute using MF filter output
            amp_signal[i] = (-b + np.sqrt(ra)) / (2 * a)

        for i in range(0, len(noise_testing)):
            ra = b * b + 4 * a * estimated_noise[i]
            if ra.values < 0:
                ra = 0
                cr = cr + 1
            amp_noise[i] = (-b + np.sqrt(ra)) / (2 * a)

        amp_signal = pd.DataFrame(amp_signal)
        amp_error = amp_signal.values - amplitude.values

        file_helper.save_file(result_prefix + 'amp_signal', 'matched_filter', amp_signal)
        file_helper.save_file(result_prefix + 'amp_noise', 'matched_filter', amp_noise)
        file_helper.save_file(result_prefix + 'amp_error', 'matched_filter', amp_error)

        print('Finished!')
def mf_calculation(amplitude_mean,
                   noise_mean,
                   tile_partition,
                   training_percentage=50,
                   sufix=''):
    # TEN_BITS_ADC_VALUE = 1023
    ADC_VALUE = 5
    DIMENSION = 7

    print(
        f'E-MF - Processing signal for amp{amplitude_mean} and mu {noise_mean}{sufix}\n'
    )

    # Real data
    base_folder = f'results/hybrid/amplitude_mean{amplitude_mean}'
    data_folder = f'{base_folder}/base_data/mu{noise_mean}'
    amplitude_file_name = f'{data_folder}/tile_A{sufix}.txt'
    signal_file_name = f'{data_folder}/tile_signal{sufix}.txt'
    real_noise_file_name = f'data/{tile_partition}/{tile_partition}mu{noise_mean}_no_ped{sufix}.txt'

    real_noises = pd.read_csv(real_noise_file_name,
                              sep=" ",
                              usecols=(3, 4, 5, 6, 7, 8, 9),
                              header=None)

    number_of_data = len(real_noises)
    qtd_for_training = int(number_of_data / ((100 / training_percentage)))
    qtd_for_testing = number_of_data - qtd_for_training

    # Getting data from boundaries
    amplitude = pd.read_csv(amplitude_file_name, sep=" ",
                            header=None)[:qtd_for_testing]
    signal_testing = pd.read_csv(signal_file_name, sep=" ",
                                 header=None)[:qtd_for_testing][:]
    noise_testing = real_noises[:qtd_for_testing][:]  # test with 1st % part
    noise_training = real_noises[qtd_for_training:][:]  # train with 2nd % part

    print(f'Training with {len(noise_training)} events')
    print(f'Testing with {len(noise_testing)} events')
    print(f'Length of amplitudes {len(amplitude)}')
    print(f'Length of signals {len(signal_testing)}\n')
    # Branqueamento
    noise_train_cov = noise_training.cov()

    [D, V] = LA.eigh(noise_train_cov)

    # Apply before diag to avoid inf value due to the 0 negative exponentiation
    D = D**(-.5)

    # eig returns D as an array, we need to transform it into a diagonal matrix
    D = pd.DataFrame(np.diag(D))
    V = pd.DataFrame(V)

    W = pd.DataFrame(D.dot(V.transpose()))

    W_t = W.transpose()

    # PCA Part
    pure_signal = np.zeros((qtd_for_testing, DIMENSION))
    for i in range(0, qtd_for_testing):
        jitter_pulse, _ = pulse_helper.get_jitter_pulse()
        pure_signal[i, :] = ADC_VALUE * np.random.rand(1) * jitter_pulse

    pure_signal = pd.DataFrame(pure_signal)

    n_pca_components = DIMENSION
    pca = PCA(n_components=n_pca_components)
    coeff = pd.DataFrame(pca.fit(pure_signal.dot(W_t)).components_)
    coeff_t = coeff.transpose()
    Y = pca.explained_variance_ratio_.T

    # stochastic filter params
    # ddof=1 to use Sampled data variance -> N-1
    variance = noise_training[:][3].var()
    reference_pulse = pd.DataFrame(
        [0.0000, 0.0172, 0.4524, 1.0000, 0.5633, 0.1493, 0.0424])
    bleached_reference_pulse = reference_pulse.T.dot(W_t)

    optimal_reference_pulse = bleached_reference_pulse.dot(
        coeff_t[:][:n_pca_components])

    optimal_noise = pd.DataFrame(
        (np.dot(noise_testing, W_t)).dot(coeff_t[:][:n_pca_components]))
    optimal_signal = (signal_testing.dot(W_t)).dot(
        coeff_t[:][:n_pca_components])

    No = variance * 2
    h1 = np.zeros((DIMENSION, DIMENSION))
    h2 = np.zeros((DIMENSION, DIMENSION))

    for i in range(0, n_pca_components):
        h1 = h1 + (Y[i] / (Y[i] + variance)) * (coeff_t[:][i].values.reshape(
            1, DIMENSION) * coeff_t[:][i].values.reshape(DIMENSION, 1))
        h2 = h2 + (1.0 / (Y[i] + variance)) * (coeff_t[:][i].values.reshape(
            1, DIMENSION) * coeff_t[:][i].values.reshape(DIMENSION, 1))

    IR_noise = np.zeros((len(noise_testing), 1))
    IR_signal = np.zeros((len(signal_testing), 1))

    for ev in range(0, len(noise_testing)):
        IR_noise[ev] = (1.0 / No) * (((optimal_noise.values[ev][:].dot(
            (coeff[:][:n_pca_components]))).dot(h1).dot(
                (optimal_noise.values[ev][:].dot(
                    coeff[:][:n_pca_components]))).transpose()).transpose())

    for ev in range(0, len(signal_testing)):
        IR_signal[ev] = (1.0 / No) * (((optimal_signal.values[ev][:].dot(
            (coeff[:][:n_pca_components]))).dot(h1).dot(
                (optimal_signal.values[ev][:].dot(
                    coeff[:][:n_pca_components]))).transpose()).transpose())

    ID_noise = np.zeros((len(noise_testing), 1))
    ID_signal = np.zeros((len(signal_testing), 1))
    for ev in range(0, len(noise_testing)):
        ID_noise[ev] = ((optimal_reference_pulse.dot(
            coeff[:][:n_pca_components])).dot(h2).dot(
                (optimal_noise.values[ev][:].dot(
                    coeff[:][:n_pca_components])).transpose()))

    for ev in range(0, len(signal_testing)):
        ID_signal[ev] = ((optimal_reference_pulse.dot(
            coeff[:][:n_pca_components])).dot(h2).dot(
                (optimal_signal.values[ev][:].dot(
                    coeff[:][:n_pca_components])).transpose()))
    # Matched Filter estimatives
    estimated_noise = ID_noise + IR_noise
    estimated_signal = ID_signal + IR_signal
    print('Almost...\n')

    # TODO: These variables are not being used - Investigate
    # Amplitude estimative
    # b1 = coeff[:][:n_pca_components].transpose().dot(coeff_t[:][:n_pca_components])
    # # DAQUI PARA BAIXO B2 E B3 NAO BATEM DEVIDO A ALGUMAS LINHAS DE COEFF
    # b2 = (1.0 / No) * (
    #     coeff_t[:][:n_pca_components].transpose().dot(h1)
    #     .dot(coeff[:][:n_pca_components])
    # )
    # b3 = (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h2).dot(coeff[:][:n_pca_components])

    amp_noise = np.zeros((len(noise_testing), 1))
    amp_signal = np.zeros((len(signal_testing), 1))

    a = (1.0 / No) * (
        (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h1).dot(
            (optimal_reference_pulse.dot(
                coeff[:][:n_pca_components])).transpose()))
    b = (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).dot(h2).dot(
        (optimal_reference_pulse.dot(coeff[:][:n_pca_components])).transpose())

    cs = 0
    cr = 0
    for i in range(0, len(signal_testing)):
        ra = b * b + 4 * a * estimated_signal[i]
        if ra.values < 0:
            ra = 0
            cs = cs + 1
        # signal amplitute using MF filter output
        amp_signal[i] = (-b + np.sqrt(ra)) / (2 * a)

    for i in range(0, len(noise_testing)):
        ra = b * b + 4 * a * estimated_noise[i]
        if ra.values < 0:
            ra = 0
            cr = cr + 1
        amp_noise[i] = (-b + np.sqrt(ra)) / (2 * a)

    amp_signal = pd.DataFrame(amp_signal)
    amp_error = amp_signal.values - amplitude.values

    folder_name = f'{base_folder}/E_MF/mu{noise_mean}'
    file_helper.save_file_in(f'mf_amp_signal{sufix}', folder_name, amp_signal)
    file_helper.save_file_in(f'mf_amp_noise{sufix}', folder_name, amp_noise)
    file_helper.save_file_in(f'mf_amp_error{sufix}', folder_name, amp_error)

    print('Finished!')
        if is_noise:
            if TILECAL:
                file_helper.save_tile_noise(signal_probability, data, dataMean,
                                            dataStd)
            else:
                file_helper.save_noise(signal_probability, data, dataMean,
                                       dataStd)
        else:
            A = np.zeros(number_of_events)  # Amplitude
            for i in range(0, number_of_events):
                A[i] = np.random.exponential(signal_mean)

                if TILECAL:
                    data[i, :] = data[i, :] + np.multiply(
                        A[i], pulse_helper.get_jitter_pulse())
                else:
                    # NOT TESTED!!!
                    print('NOT TESTED!!!')
                    data[i, :] = data[i, :] + np.multiply(
                        A[i], pulse_helper.get_pulse_paper_COF())

            if TILECAL:
                file_helper.save_tile_data(signal_probability, data, dataMean,
                                           dataStd, A)
            else:
                file_helper.save_data(signal_probability, data, dataMean,
                                      dataStd, A)

    # from pudb import set_trace; set_trace()