コード例 #1
0
def continue_train_cnn(run_name):
    # toy parameters
    n_sample_init = 20
    pe_rate_mhz = (0, 200)
    bin_size_ns = 0.5
    sampling_rate_mhz = 250
    amplitude_gain = 5.
    noise_lsb = (0.5, 1.5)  # 1.05
    sigma_smooth_pe_ns = 1.
    baseline = 0
    relative_gain_std = 0.1

    # training parameters
    steps_per_epoch = 1e2  # 1 step feed a batch of events
    batch_size = 400  # number of waveform per batch
    epochs = 5
    # model
    model = tf.keras.models.load_model('./Model/' + run_name + '.h5',
                                       custom_objects={
                                           'loss_all': loss_all,
                                           'loss_cumulative': loss_cumulative,
                                           'loss_chi2': loss_chi2,
                                           'loss_continuity': loss_continuity
                                       })

    n_sample = model.input_shape[1]
    tb_cb = tf.keras.callbacks.TensorBoard(log_dir='./Graph/' + run_name + 'r',
                                           batch_size=batch_size)
    cp_callback = tf.keras.callbacks.ModelCheckpoint('./Model/' + run_name +
                                                     '.h5',
                                                     verbose=1)
    # data generation for training
    generator = generator_nsb(n_event=1,
                              batch_size=batch_size,
                              n_sample=n_sample + n_sample_init,
                              n_sample_init=n_sample_init,
                              pe_rate_mhz=pe_rate_mhz,
                              bin_size_ns=bin_size_ns,
                              sampling_rate_mhz=sampling_rate_mhz,
                              amplitude_gain=amplitude_gain,
                              noise_lsb=noise_lsb,
                              sigma_smooth_pe_ns=sigma_smooth_pe_ns,
                              baseline=baseline,
                              relative_gain_std=relative_gain_std)
    # training
    model.fit_generator(
        generator=generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=[tb_cb, cp_callback],
    )
    model.save('./Model/' + run_name + 'r.h5')
    print('done training ' + run_name + 'r')
コード例 #2
0
def generator_coherent_pixels(n_event=None,
                              batch_size=1,
                              n_sample=90,
                              n_sample_init=20,
                              coherant_rate_mhz=10,
                              uncoherant_rate_mhz=90,
                              coherant_noise_lsb=0.1,
                              uncoherant_noise_lsb=0.9,
                              n_pixel=2,
                              bin_size_ns=0.5,
                              sampling_rate_mhz=250.,
                              amplitude_gain=5.0):
    generator_coher = generator_nsb(n_event=n_event,
                                    batch_size=batch_size,
                                    n_sample=n_sample,
                                    n_sample_init=n_sample_init,
                                    pe_rate_mhz=coherant_rate_mhz,
                                    bin_size_ns=bin_size_ns,
                                    sampling_rate_mhz=sampling_rate_mhz,
                                    amplitude_gain=amplitude_gain,
                                    noise_lsb=coherant_noise_lsb)
    generators_uncoher = []
    for pixel in range(n_pixel):
        generator_uncoher = generator_nsb(n_event=n_event,
                                          batch_size=batch_size,
                                          n_sample=n_sample,
                                          n_sample_init=n_sample_init,
                                          pe_rate_mhz=uncoherant_rate_mhz,
                                          bin_size_ns=bin_size_ns,
                                          sampling_rate_mhz=sampling_rate_mhz,
                                          amplitude_gain=amplitude_gain,
                                          noise_lsb=uncoherant_noise_lsb)
        generators_uncoher.append(generator_uncoher)
    generator_pixels = correlate_pixels(generator_coher,
                                        *generators_uncoher,
                                        delay_sample=0)
    return generator_pixels
コード例 #3
0
def toy_nsb_prediction(model_name,
                       pe_rate_mhz=30,
                       sampling_rate_mhz=250,
                       batch_size=400,
                       noise_lsb=1.05,
                       bin_size_ns=0.5,
                       n_sample=90,
                       sigma_smooth_pe_ns=0.,
                       baseline=0.,
                       relative_gain_std=0.1,
                       shift_proba_bin=0):
    # toy parameters
    n_sample_init = 50
    amplitude_gain = 5.

    generator = generator_nsb(n_event=None,
                              batch_size=batch_size,
                              n_sample=n_sample + n_sample_init,
                              n_sample_init=n_sample_init,
                              pe_rate_mhz=pe_rate_mhz,
                              bin_size_ns=bin_size_ns,
                              sampling_rate_mhz=sampling_rate_mhz,
                              amplitude_gain=amplitude_gain,
                              noise_lsb=noise_lsb,
                              sigma_smooth_pe_ns=sigma_smooth_pe_ns,
                              baseline=baseline,
                              relative_gain_std=relative_gain_std,
                              shift_proba_bin=shift_proba_bin)
    waveform, pe = next(generator)

    model = tf.keras.models.load_model('./Model/' + model_name + '.h5',
                                       custom_objects={
                                           'loss_all': loss_all,
                                           'loss_cumulative': loss_cumulative,
                                           'loss_chi2': loss_chi2,
                                           'loss_continuity': loss_continuity
                                       })
    model.compile(
        optimizer=tf.keras.optimizers.Adam(1e-3),
        loss=loss_all,  # loss_all
        metrics=[loss_cumulative, loss_chi2, loss_continuity
                 ]  # loss_cumulative, loss_chi2, loss_continuity
    )
    print('model ' + model_name + ' is loaded')
    predict_pe = model.predict(waveform)
    loss = model.evaluate(x=waveform, y=pe)
    print('ĺoss=', loss)
    return waveform, pe, predict_pe,
コード例 #4
0
def generator_rnn(rnn_input_size=30, rnn_output_size=8, **kwargs):
    gen_nsb = generator_nsb(**kwargs)
    for waveform_batch, proba_batch in gen_nsb:
        batch_size, num_sample = waveform_batch.shape
        num_input_per_waveform = num_sample - rnn_input_size + 1
        waveform_batch_rnn = np.zeros(
            [batch_size, num_input_per_waveform, rnn_input_size])
        proba_batch_rnn = np.zeros(
            [batch_size, num_input_per_waveform, rnn_output_size])
        for iteration in range(num_input_per_waveform):
            indexes_wf = range(iteration, iteration + rnn_input_size)
            waveform_batch_rnn[:, iteration, :] = waveform_batch[:, indexes_wf]
            indexes_pb = range(iteration * rnn_output_size,
                               (iteration + 1) * rnn_output_size)
            proba_batch_rnn[:, iteration, :] = proba_batch[:, indexes_pb]
        yield waveform_batch_rnn, proba_batch_rnn
コード例 #5
0
def plot_resolution_flash(model_name,
                          filename=None,
                          n_pe_flashes=(1, 2, 5, 10, 20, 50, 100, 200, 500,
                                        1000),
                          noise_lsb=1,
                          nsb_rates_mhz=(40, ),
                          batch_size=400,
                          time_resolution_windows_ns=(16, ),
                          charge_resolution_windows_ns=(28, ),
                          bin_flash=80,
                          bin_size_ns=0.5,
                          shift_proba_bin=0):
    from cycler import cycler
    from matplotlib import cm

    jet = cm.get_cmap('jet')
    title = 'model ' + model_name[:20] + ', ' + str(batch_size) + \
            ' flashes per light level, noise ' + str(noise_lsb) + ' LSB'
    n_nsb_rate = len(nsb_rates_mhz)
    n_flash_pe = len(n_pe_flashes)
    n_windows_time_resol = len(time_resolution_windows_ns)
    n_windows_charge_resol = len(charge_resolution_windows_ns)
    if n_windows_time_resol > 4 or n_windows_charge_resol > 4:
        raise ValueError('Only up to 4 windows can be plotted')
    time_bias = np.zeros([n_nsb_rate, n_flash_pe, n_windows_time_resol])
    time_resolution = np.zeros_like(time_bias)
    charge_bias = np.zeros([n_nsb_rate, n_flash_pe, n_windows_charge_resol])
    charge_resolution = np.zeros_like(charge_bias)
    model = tf.keras.models.load_model('./Model/' + model_name + '.h5',
                                       custom_objects={
                                           'loss_all': loss_all,
                                           'loss_cumulative': loss_cumulative,
                                           'loss_chi2': loss_chi2,
                                           'loss_continuity': loss_continuity
                                       })
    for i, n_pe_flash in enumerate(n_pe_flashes):
        print(n_pe_flash, 'pe flashes')
        gen_flash = generator_flash(n_event=1,
                                    batch_size=batch_size,
                                    n_sample=4320,
                                    bin_flash=bin_flash,
                                    n_pe_flash=(n_pe_flash, n_pe_flash),
                                    bin_size_ns=bin_size_ns,
                                    sampling_rate_mhz=250,
                                    amplitude_gain=5.,
                                    noise_lsb=noise_lsb,
                                    shift_proba_bin=shift_proba_bin)
        waveform_flash, pe_truth_flash = next(gen_flash)
        for r, nsb_rate_mhz in enumerate(nsb_rates_mhz):
            gen_nsb = generator_nsb(n_event=1,
                                    batch_size=batch_size,
                                    n_sample=4340,
                                    n_sample_init=20,
                                    pe_rate_mhz=nsb_rate_mhz,
                                    bin_size_ns=bin_size_ns,
                                    sampling_rate_mhz=250,
                                    amplitude_gain=5.,
                                    noise_lsb=0,
                                    sigma_smooth_pe_ns=0.,
                                    shift_proba_bin=shift_proba_bin)
            waveform_nsb, pe_truth_nsb = next(gen_nsb)
            predict_pe = model.predict(waveform_flash + waveform_nsb)
            for t in range(n_windows_time_resol):
                delta_time_ns = time_resolution_windows_ns[t] / 2
                t_bias, t_resol = time_resolution_flash(
                    predict_pe=predict_pe,
                    time_flash=bin_flash * bin_size_ns,
                    bin_size_ns=bin_size_ns,
                    delta_time_ns=delta_time_ns)
                time_bias[r, i, t] = t_bias
                time_resolution[r, i] = t_resol
            for c in range(n_windows_charge_resol):
                delta_time_ns = charge_resolution_windows_ns[c] / 2
                charge_pred = charge_flash(
                    predict_pe=predict_pe,
                    time_flash=(bin_flash + shift_proba_bin) * bin_size_ns,
                    bin_size_ns=bin_size_ns,
                    delta_time_ns=delta_time_ns)
                charge_true = charge_flash(
                    predict_pe=pe_truth_flash + pe_truth_nsb,
                    time_flash=(bin_flash + shift_proba_bin) * bin_size_ns,
                    bin_size_ns=bin_size_ns,
                    delta_time_ns=delta_time_ns)
                charge_bias[r, i, c] = np.nanmean(charge_pred - charge_true) /\
                                       np.mean(charge_true)
                charge_resolution[r, i, c] = np.nanstd(
                    charge_pred - charge_true) / np.mean(charge_true)
    fig, axes = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
    axes[0].set_title('time resolution\n' + title)
    lines_rate = []
    legend_rate = []
    lines_window = []
    legend_window = []
    cc = (cycler(color=jet(np.linspace(0, 1, n_nsb_rate))) *
          cycler(linestyle=['-', '--', '-.', ':'][:n_windows_time_resol]))
    axes[0].set_prop_cycle(cc)
    axes[1].set_prop_cycle(cc)
    for r, nsb_rate_mhz in enumerate(nsb_rates_mhz):
        for t in range(n_windows_time_resol):
            l, = axes[0].semilogx(n_pe_flashes, time_bias[r, :, t])
            axes[1].loglog(n_pe_flashes, time_resolution[r, :, t])
            if r == 0:
                label = 'window ' + str(time_resolution_windows_ns[t]) + ' ns'
                lines_window.append(l)
                legend_window.append(label)
            if t == 0:
                label = 'nsb rate ' + str(nsb_rate_mhz) + ' MHz'
                lines_rate.append(l)
                legend_rate.append(label)
    axes[1].set_xlabel('# photo-electrons per flash')
    axes[0].set_ylabel('bias [ns]')
    axes[1].set_ylabel('time resolution [ns]')
    axes[1].set_ylim([1e-3, 10])
    axes[0].legend(lines_rate, legend_rate)
    axes[1].legend(lines_window, legend_window)
    plt.tight_layout()
    if filename is None:
        plt.show()
    else:
        saved = 'plots/time_resolution_' + filename
        plt.savefig(saved)
        print(saved, 'saved')
    plt.close(fig)

    fig, axes = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
    lines_rate = []
    legend_rate = []
    lines_window = []
    legend_window = []
    cc = (cycler(color=jet(np.linspace(0, 1, n_nsb_rate))) *
          cycler(linestyle=['-', '--', '-.', ':'][:n_windows_charge_resol]))
    axes[0].set_prop_cycle(cc)
    axes[1].set_prop_cycle(cc)
    axes[0].set_title('charge resolution\n' + title)
    for r, nsb_rate_mhz in enumerate(nsb_rates_mhz):
        for c in range(n_windows_charge_resol):
            label = 'nsb rate ' + str(nsb_rate_mhz) + ' MHz, window ' + \
                    str(charge_resolution_windows_ns[c]) + ' ns'
            l, = axes[0].semilogx(n_pe_flashes,
                                  charge_bias[r, :, c] * 100,
                                  label=label)
            axes[1].semilogx(n_pe_flashes, charge_resolution[r, :, c] * 100)
            if r == 0:
                label = 'window ' + str(
                    charge_resolution_windows_ns[c]) + ' ns'
                lines_window.append(l)
                legend_window.append(label)
            if c == 0:
                label = 'nsb rate ' + str(nsb_rate_mhz) + ' MHz'
                lines_rate.append(l)
                legend_rate.append(label)
    # axes[0].xlabel('# photo-electrons per flash')
    axes[1].set_xlabel('# photo-electrons per flash')
    axes[0].set_ylabel('bias [%]')
    axes[1].set_ylabel('charge resolution [%]')
    #axes[1].set_ylim([0.05, 5])
    axes[0].legend(lines_rate, legend_rate)
    axes[1].legend(lines_window, legend_window)
    plt.tight_layout()
    if filename is None:
        plt.show()
    else:
        saved = 'plots/charge_resolution_' + filename
        plt.savefig(saved)
        print(saved, 'saved')
    plt.close(fig)
コード例 #6
0
    def train(self,
              run_name,
              lr=5e-4,
              n_sample_init=50,
              batch_size=10,
              shift_proba_bin=64,
              sigma_smooth_pe_ns=2.,
              steps_per_epoch=200,
              epochs=100):
        """
        train the CNN.
        :param run_name: name of the model
        :param lr: learning rate
        :param n_sample_init: parameter of the waveform generator used for
        training.
        :param batch_size: number of waveforms per batch
        :param shift_proba_bin: how many bins the photo-electron probabilities
        are shifted
        :param sigma_smooth_pe_ns: the pe truth (integers) is smoothed by a
        Gaussian of the given standard deviation. No smoothing is done if it is
        set to 0.
        :param steps_per_epoch: number of batch processed for each epoch
        :param epochs: number of epoch used in the training.
        """
        # model compilation
        print("compile model...")
        self.model.compile(optimizer=tf.keras.optimizers.Adam(lr,
                                                              amsgrad=True),
                           loss=self.loss_all,
                           metrics=[
                               self.loss_cumulative, self.loss_chi2,
                               self.loss_continuity
                           ])
        print("model compiled, number of parameters:",
              self.model.count_params())

        # data generation for training
        generator = generator_nsb(n_event=None,
                                  batch_size=batch_size,
                                  n_sample=self.n_sample + n_sample_init,
                                  n_sample_init=n_sample_init,
                                  pe_rate_mhz=(5, 400),
                                  bin_size_ns=0.5,
                                  sampling_rate_mhz=250,
                                  amplitude_gain=5.,
                                  noise_lsb=(0.5, 1.5),
                                  sigma_smooth_pe_ns=sigma_smooth_pe_ns,
                                  baseline=0,
                                  relative_gain_std=0.05,
                                  shift_proba_bin=shift_proba_bin,
                                  dtype=np.float64)

        #setting up callbacks
        tb_callback = tf.keras.callbacks.TensorBoard(log_dir='./Graph/' +
                                                     run_name,
                                                     batch_size=batch_size)
        cp_callback = tf.keras.callbacks.ModelCheckpoint('./Model/' +
                                                         run_name + '.h5',
                                                         verbose=1)

        # training
        self.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=[tb_callback, cp_callback],
        )
コード例 #7
0
def train_cnn():
    # training parameters
    steps_per_epoch = 100  # 1 step feed a batch of events
    batch_size = 400  # number of waveform per batch
    epochs = 10
    lr = 1e-3  # 1e-4

    # toy parameters
    n_sample_init = 20
    pe_rate_mhz = 0, 200
    bin_size_ns = 0.5
    sampling_rate_mhz = 250
    amplitude_gain = 5.
    noise_lsb = 0  # 1.05
    sigma_smooth_pe_ns = 0.25
    baseline = 0

    # model definition
    n_sample = 90
    n_filer1 = 4
    kernel_size = 10
    n_filer2 = 8
    n_filer3 = 8
    padding = "same"  # same
    model = tf.keras.Sequential([
        tf.keras.layers.Reshape([n_sample, 1],
                                input_shape=[n_sample],
                                name="input_reshape"),
        tf.keras.layers.Conv1D(filters=n_filer1,
                               kernel_size=kernel_size,
                               strides=1,
                               padding=padding,
                               name="conv1",
                               activation="relu"),
        tf.keras.layers.Conv1D(filters=n_filer2,
                               kernel_size=kernel_size,
                               strides=1,
                               padding=padding,
                               name="conv2",
                               activation="relu"),
        tf.keras.layers.Conv1D(filters=n_filer3,
                               kernel_size=kernel_size,
                               strides=1,
                               padding=padding,
                               name="conv3",
                               activation="relu"),
        tf.keras.layers.Reshape([n_sample * n_filer3, 1], name="reshape"),
        tf.keras.layers.ZeroPadding1D(4),
        tf.keras.layers.LocallyConnected1D(1, 9, name="LC"),
        # tf.keras.layers.Dense(units=n_bin, name="dense"),
        tf.keras.layers.Flatten(),
        tf.keras.layers.ReLU(negative_slope=0, threshold=0, trainable=False)
    ])
    # model compilation
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr),
        loss=loss_all,  # loss_all, loss_chi2
        metrics=[loss_cumulative, loss_chi2, loss_continuity]  # 'accuracy'
    )
    print("number of parameters:", model.count_params())

    # data generation for training
    generator = generator_nsb(n_event=None,
                              batch_size=batch_size,
                              n_sample=n_sample + n_sample_init,
                              n_sample_init=n_sample_init,
                              pe_rate_mhz=pe_rate_mhz,
                              bin_size_ns=bin_size_ns,
                              sampling_rate_mhz=sampling_rate_mhz,
                              amplitude_gain=amplitude_gain,
                              noise_lsb=noise_lsb,
                              sigma_smooth_pe_ns=sigma_smooth_pe_ns,
                              baseline=baseline)

    # training
    run = 0
    run_name = 'conv_filter' + str(n_filer1) + str(n_filer2) + str(n_filer3) + \
               '_kernel' + str(kernel_size) + '_lr' + str(lr)
    # run_name += '_dense'
    run_name += '_LCpos'
    if np.size(pe_rate_mhz) > 1:
        run_name += '_rate' + str(pe_rate_mhz[0]) + '-' + \
                    str(pe_rate_mhz[1])
    else:
        run_name += '_rate' + str(pe_rate_mhz)
    if sigma_smooth_pe_ns > 0:
        run_name += '_smooth' + str(sigma_smooth_pe_ns)
    if np.size(noise_lsb) > 1:
        run_name += '_noise' + str(noise_lsb[0]) + '-' + \
                    str(noise_lsb[1])
    else:
        run_name += '_noise' + str(noise_lsb)
    while os.path.exists('./Graph/' + run_name + '_run' + str(run)):
        run += 1
    tbCallBack = tf.keras.callbacks.TensorBoard(log_dir='./Graph/' + run_name +
                                                '_run' + str(run),
                                                batch_size=batch_size)

    model.fit_generator(
        generator=generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=[tbCallBack],
    )
    model.save('./Model/' + run_name + '_run' + str(run) + '.h5')
    print('done training ' + run_name + '_run' + str(run))
コード例 #8
0
def train_cnn(lr=5e-4,
              n_sample_init=50,
              batch_size=10,
              shift_proba_bin=64,
              sigma_smooth_pe_ns=2.):

    initializer = tf.keras.initializers.Orthogonal()
    # model definition
    model = tf.keras.Sequential([
        tf.keras.layers.Reshape((4320, 1, 1), input_shape=(4320, )),
        tf.keras.layers.Conv2D(filters=16,
                               kernel_size=(32, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.UpSampling2D(size=(2, 1)),
        tf.keras.layers.Conv2D(filters=16,
                               kernel_size=(64, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.UpSampling2D(size=(2, 1)),
        tf.keras.layers.Conv2D(filters=8,
                               kernel_size=(128, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.UpSampling2D(size=(2, 1)),
        tf.keras.layers.Conv2D(filters=8,
                               kernel_size=(256, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=4,
                               kernel_size=(128, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=4,
                               kernel_size=(64, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=2,
                               kernel_size=(32, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=2,
                               kernel_size=(8, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=1,
                               kernel_size=(1, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=.1, max_value=None),
        tf.keras.layers.Conv2D(filters=1,
                               kernel_size=(1, 1),
                               padding="same",
                               kernel_initializer=initializer),
        tf.keras.layers.ReLU(negative_slope=1e-6, threshold=0,
                             trainable=False),
        tf.keras.layers.Flatten(),
    ])
    run_name = 'C32x16_U2_C64x16_U2_C128x8_U2_C256x8_C128x4_C64x4_C32x2_C8x2_C1x1_C1x1_ns0.1_shift' + str(
        shift_proba_bin) + '_all1-50-10lr' + str(lr) + "smooth" + str(
            sigma_smooth_pe_ns) + '_amsgrad'
    n_sample = model.input_shape[1]

    # model compilation
    model.compile(
        optimizer=tf.keras.optimizers.Adam(lr, amsgrad=True),
        loss=loss_all,  # loss_all
        metrics=[loss_cumulative, loss_chi2, loss_continuity
                 ]  # loss_cumulative, loss_chi2, loss_continuity
    )
    print("number of parameters:", model.count_params())

    # data generation for training
    generator = generator_nsb(n_event=None,
                              batch_size=batch_size,
                              n_sample=n_sample + n_sample_init,
                              n_sample_init=n_sample_init,
                              pe_rate_mhz=(5, 400),
                              bin_size_ns=0.5,
                              sampling_rate_mhz=250,
                              amplitude_gain=5.,
                              noise_lsb=(0.5, 1.5),
                              sigma_smooth_pe_ns=sigma_smooth_pe_ns,
                              baseline=0,
                              relative_gain_std=0.05,
                              shift_proba_bin=shift_proba_bin,
                              dtype=np.float64)

    # training
    run = 0
    while os.path.exists('./Graph/' + run_name + '_run' + str(run)):
        run += 1
    run_name += '_run' + str(run)

    tbCallBack = tf.keras.callbacks.TensorBoard(log_dir='./Graph/' + run_name,
                                                batch_size=batch_size)
    print()
    cp_callback = tf.keras.callbacks.ModelCheckpoint('./Model/' + run_name +
                                                     '.h5',
                                                     verbose=1)

    steps_per_epoch = 200  # 1 step feed a batch of events
    epochs = 100

    try:
        model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=[tbCallBack, cp_callback],
        )
    finally:
        # model.save('./Model/' + run_name + '.h5')  # done by ModelCheckpoint callback
        print('done training ' + run_name)
        return run_name