コード例 #1
0
    def functional_model_advanced_old(self, loss_fn):
        #first lbn layer does the lorentz boost
        inputs = tf.keras.Input(shape=(4, 4))
        input_shape = (4, 4)
        LBN_output_features = ["E", "px", "py", "pz"]
        x = LBNLayer(input_shape,
                     n_particles=4,
                     boost_mode=LBN.PAIRS,
                     features=LBN_output_features)(inputs)
        x = tf.keras.layers.Dense(64, activation="relu")(x)
        boosted = tf.keras.layers.Dense(16)(x)

        model1 = tf.keras.Model(inputs=inputs, outputs=boosted)
        #model1.compile(optimizer='adam', loss=loss_fn, metrics=['mae'])

        #second layer does the cross-product
        LBN_output_features = [
            "E", "px", "py", "pz", "m", "pair_dy", "pair_cos"
        ]  #should have a cross-product feature in here
        x = tf.keras.layers.Reshape((4, 4))(boosted)
        x = LBNLayer((4, 4),
                     n_particles=4,
                     boost_mode=LBN.PAIRS,
                     features=LBN_output_features,
                     name='LBN2')(x)
        x = tf.keras.layers.Dense(64, activation="relu")(x)
        lambdas = tf.keras.layers.Dense(1)(x)

        model2 = tf.keras.Model(inputs=inputs, outputs=lambdas)
        model2.compile(optimizer='adam', loss=loss_fn, metrics=['mae'])
        return model2
コード例 #2
0
ファイル: test.py プロジェクト: Nollde/LBN
    def test_keras_layer(self):
        l = LBNLayer(10,
                     boost_mode=LBN.PAIRS,
                     features=self.feature_set,
                     seed=123)
        self.assertIsInstance(l.lbn, LBN)

        # build a custom model
        class Model(tf.keras.models.Model):
            def __init__(self):
                super(Model, self).__init__()

                init = tf.keras.initializers.RandomNormal(mean=0.,
                                                          stddev=0.1,
                                                          seed=123)

                self.lbn = l
                self.dense = tf.keras.layers.Dense(1024,
                                                   activation="elu",
                                                   kernel_regularizer=init)
                self.softmax = tf.keras.layers.Dense(2,
                                                     activation="softmax",
                                                     kernel_regularizer=init)

            def call(self, *args, **kwargs):
                return self.softmax(self.dense(self.lbn(*args, **kwargs)))

        model = Model()
        output = model(self.vectors_t).numpy()

        self.assertAlmostEqual(output[0, 0], 0 if PY3 else 1, 5)
        self.assertAlmostEqual(output[0, 1], 1 if PY3 else 0, 5)
        self.assertAlmostEqual(output[1, 0], 0 if PY3 else 1, 5)
        self.assertAlmostEqual(output[1, 1], 1 if PY3 else 0, 5)
コード例 #3
0
 def lbn_model(self, loss_fn):
     input_shape = (4, 4)
     LBN_output_features = [
         "E", "px", "py", "pz", "m", "pair_dy", "pair_cos"
     ]
     model = tf.keras.models.Sequential()
     model.add(
         LBNLayer(input_shape,
                  n_particles=4,
                  boost_mode=LBN.PAIRS,
                  features=LBN_output_features))
     model.add(BatchNormalization())
     model.add(
         tf.keras.layers.Dense(300,
                               kernel_initializer='normal',
                               activation='relu'))
     model.add(
         tf.keras.layers.Dense(300,
                               kernel_initializer='normal',
                               activation='relu'))
     model.add(tf.keras.layers.Dense(1))
     if not loss_fn:
         loss_fn = 'mean_squared_error'
     model.compile(optimizer='adam', loss=loss_fn)
     # model.compile(optimizer='adam', loss=custom_mse)
     return model
コード例 #4
0
ファイル: test.py プロジェクト: riga/LBN
    def test_keras_saving(self):
        lbnlayer = LBNLayer(self.vectors.shape,
                            n_particles=10,
                            boost_mode=LBN.PAIRS,
                            features=self.feature_set,
                            seed=123)
        self.assertIsInstance(lbnlayer.lbn, LBN)

        # build a custom model
        input_tensor = tf.keras.Input(shape=self.vectors.shape[1:])
        out_tensor = lbnlayer(input_tensor)
        model = tf.keras.Model(input_tensor, out_tensor)

        tmp_model_path = "tmp_model.h5"
        try:
            model.save(tmp_model_path)
        except:
            print("An error occoured during saving")
            raise

        try:
            tf.keras.models.load_model(tmp_model_path,
                                       custom_objects={"LBNLayer": LBNLayer})
        except:
            print("An Exception occoured during loading")
            raise

        self.assertEqual(os.path.isfile(tmp_model_path), True)

        try:
            os.remove(tmp_model_path)
        except OSError:
            pass
コード例 #5
0
ファイル: test.py プロジェクト: riga/LBN
    def test_keras_layer(self):
        ext = tf.Variable([[1, 2], [3, 4]], dtype=tf.float32)
        l = LBNLayer(self.vectors_aux_t.shape,
                     n_particles=10,
                     boost_mode=LBN.PAIRS,
                     features=self.feature_set,
                     external_features=ext,
                     seed=123)
        self.assertIsInstance(l.lbn, LBN)

        # build a custom model
        class Model(tf.keras.models.Model):
            def __init__(self):
                super(Model, self).__init__()

                init = tf.keras.initializers.RandomNormal(mean=0.,
                                                          stddev=0.1,
                                                          seed=123)

                self.lbn = l
                self.dense = tf.keras.layers.Dense(1024,
                                                   activation="elu",
                                                   kernel_regularizer=init)
                self.softmax = tf.keras.layers.Dense(2,
                                                     activation="softmax",
                                                     kernel_regularizer=init)

            def call(self, *args, **kwargs):
                return self.softmax(self.dense(self.lbn(*args, **kwargs)))

        model = Model()
        output = model(self.vectors_aux_t).numpy()

        self.assertEqual(output.shape, (2, 2))
コード例 #6
0
        def __init__(self, lbn_layer, *args, **kwargs):
            super(DummyModel, self).__init__(*args, **kwargs)

            l = lbn_layer.lbn
            self.lbn_layer = LBNLayer(
                n_particles=l.n_particles,
                n_restframes=l.n_restframes,
                boost_mode=l.boost_mode,
                particle_weights=lbn_layer.particle_weights,
                restframe_weights=lbn_layer.restframe_weights,
                features=lbn_layer.feature_names,
            )
コード例 #7
0
    def __init__(self,
                 n_constituents,
                 n_targets,
                 params,
                 hidden,
                 fr_activation=0,
                 fo_activation=0,
                 fc_activation=0,
                 De=8,
                 Do=8,
                 sum_O=True,
                 debug=False):
        super(LEIA, self).__init__()

        # initialize the LBN layer for preprocessing
        self.lbn = LBNLayer(n_particles=n_constituents,
                            n_restframes=n_constituents,
                            boost_mode='pairs')

        self.hidden = int(hidden)
        self.P = params
        self.N = self.lbn.lbn.n_out
        self.Nr = self.N * (self.N - 1)
        self.Dr = 0
        self.De = De
        self.Dx = 0
        self.Do = Do
        self.n_targets = n_targets
        self.fr_activation = fr_activation
        self.fo_activation = fo_activation
        self.fc_activation = fc_activation
        self.assign_matrices()
        self.Ra = tf.ones([self.Dr, self.Nr])
        self.fr1 = layers.Dense(
            self.hidden)  #, input_shape=(2 * self.P + self.Dr,)
        self.fr2 = layers.Dense(int(self.hidden /
                                    2))  # , input_shape=(self.hidden,)
        self.fr3 = layers.Dense(self.De)  # , input_shape=(int(self.hidden/2),)

        self.fo1 = layers.Dense(
            self.hidden)  # , input_shape=(self.P + self.Dx + (2 * self.De),)
        self.fo2 = layers.Dense(int(self.hidden /
                                    2))  # , input_shape=(self.hidden,)
        self.fo3 = layers.Dense(self.Do)  # , input_shape=(int(self.hidden/2),)

        self.fc1 = layers.Dense(hidden)
        self.fc2 = layers.Dense(int(hidden / 2))
        self.fc3 = layers.Dense(self.n_targets)
        self.sum_O = sum_O
        self.debug = debug
コード例 #8
0
    def functional_model(self, loss_fn):
        inputs = tf.keras.Input(shape=(4, 4))
        input_shape = (4, 4)
        LBN_output_features = [
            "E", "px", "py", "pz", "m", "pair_dy", "pair_cos"
        ]
        x = LBNLayer(input_shape,
                     n_particles=4,
                     boost_mode=LBN.PAIRS,
                     features=LBN_output_features)(inputs)
        x = tf.keras.layers.Dense(300, activation="relu")(x)
        x = tf.keras.layers.Dense(300, activation="relu")(x)
        outputs = tf.keras.layers.Dense(1)(x)

        model = tf.keras.Model(inputs=inputs, outputs=outputs)
        model.compile(optimizer='adam', loss=loss_fn, metrics=['mae'])
        return model
コード例 #9
0
ファイル: test.py プロジェクト: riga/LBN
    def test_keras_layer_graph_connection(self):
        l = LBNLayer((10, 4),
                     n_particles=10,
                     boost_mode=LBN.PAIRS,
                     features=self.feature_set,
                     seed=123)
        self.assertIsInstance(l.lbn, LBN)

        # build a custom model
        class Model(tf.keras.models.Model):
            def __init__(self):
                super(Model, self).__init__()

                init = tf.keras.initializers.RandomNormal(mean=0.,
                                                          stddev=0.1,
                                                          seed=123)

                self.lbn = l
                self.dense = tf.keras.layers.Dense(1024,
                                                   activation="elu",
                                                   kernel_regularizer=init)
                self.softmax = tf.keras.layers.Dense(2,
                                                     activation="softmax",
                                                     kernel_regularizer=init)

            def call(self, *args, **kwargs):
                return self.softmax(self.dense(self.lbn(*args, **kwargs)))

        model = Model()

        x1 = tf.Variable(create_four_vectors((2, 10)), dtype=tf.float32)
        x2 = tf.Variable(create_four_vectors((2, 10)), dtype=tf.float32)

        with tf.GradientTape(persistent=True) as g:
            y1 = model(x1)
            y2 = model(x2)

        # ensure gradients are computed properly and not across objects
        self.assertIsNotNone(g.gradient(y1, x1))
        self.assertIsNotNone(g.gradient(y2, x2))
        self.assertIsNone(g.gradient(y2, x1))
        self.assertIsNone(g.gradient(y1, x2))
コード例 #10
0
    def createModelLBN(self, user_hyperparameters={}, _weightsDir=''):
        """make lbn model"""

        print("++ Setting hyperparameters...")
        for hp in self.hyperparameters.keys():
            if hp in user_hyperparameters.keys():
                self.hyperparameters[hp] = user_hyperparameters[hp]

            print("{} = {}".format(hp, self.hyperparameters[hp]))

        #init = tf.keras.initializers.RandomNormal(mean=0., stddev=0.1, seed=123)

        features = ["E", "pt", "eta", "phi", "m", "pair_dr"]
        lbn_layer = LBNLayer(n_particles=self.hyperparameters['nLBNParticles'],
                             boost_mode="pairs",
                             features=features)

        metrics = [
            tf.keras.metrics.categorical_accuracy,
            tf.keras.metrics.AUC(name='auc'),
        ]

        l2_reg = tf.keras.regularizers.l2(1e-4)

        dense_kwargs_IML = dict(
            activation="selu",
            kernel_initializer=tf.keras.initializers.lecun_normal(),
            kernel_regularizer=l2_reg,
        )

        dense_kwargs = dict(
            activation=self.hyperparameters['hiddenActivation'],
            kernel_initializer=tf.keras.initializers.lecun_normal(),
            kernel_regularizer=l2_reg,
        )

        _model = tf.keras.models.Sequential()

        #_model.add(LBNLayer(5, boost_mode=LBN.PAIRS, features=features))
        _model.add(lbn_layer)
        _model.add(tf.keras.layers.BatchNormalization(axis=1))

        _model.add(
            tf.keras.layers.Dense(
                self.hyperparameters['nodesInFirstHiddenLayer'],
                **dense_kwargs))
        _model.add(
            tf.keras.layers.Dense(
                self.hyperparameters['nodesInSecondHiddenLayer'],
                **dense_kwargs))

        #self.model.add(tf.keras.layers.Dense(750, activation='relu'))#, kernel_regularizer=l2_reg))
        #self.model.add(tf.keras.layers.Dense(256, activation='relu'))
        #self.model.add(tf.keras.layers.Dropout(0.2))

        #self.model.add(tf.keras.layers.Dense(128, activation='relu'))
        #self.model.add(tf.keras.layers.Dense(64, activation='relu'))
        #self.model.add(tf.keras.layers.Dense(32, activation='relu'))

        _model.add(
            tf.keras.layers.Dense(
                2,
                activation=self.hyperparameters['outputActivation'],
                kernel_regularizer=l2_reg))

        _model.compile(loss=self.hyperparameters['lossFunction'],
                       optimizer='adam',
                       metrics=metrics)

        if _weightsDir != '':

            local_dir = os.path.join(topDir, "lbn", "models", _weightsDir)
            modelfile = os.path.join(local_dir, _weightsDir) + '.hdf5'
            print("++ loading model from {}".format(modelfile))
            #<-- FIXME: this does not check if file exits

            _model.predict(np.empty([1, 32]))
            _model.load_weights(modelfile)

        return _model
コード例 #11
0
    def functional_model_advanced(self, loss_fn):
        #first lbn layer does the lorentz boost
        inputs = tf.keras.Input(shape=(4, 4))
        input_shape = (4, 4)
        LBN_output_features = ["E", "px", "py", "pz"]
        x = LBNLayer(input_shape,
                     n_particles=4,
                     boost_mode=LBN.PAIRS,
                     features=LBN_output_features)(inputs)
        #x = tf.keras.layers.Dense(64, activation="relu")(x) #not sure whether it's better to add in extra Dense layers or not
        boosted = tf.keras.layers.Dense(16)(x)

        #second layer A calculates y_1_2 and y_1_1 using Dense
        y = tf.keras.layers.Dense(64, activation='relu')(boosted)
        #y = tf.keras.layers.Dense(64, activation="relu")(y) #not sure whether it's better to add in extra Dense layers or not
        y = tf.keras.layers.Dense(2)(y)

        #second layer B does y perp (the cross-product) using formulas
        boosted_4by4 = tf.keras.layers.Reshape((4, 4))(boosted)
        lambda_plus = tf.math.l2_normalize(tf.linalg.cross(
            boosted_4by4[:, 2, 1:], boosted_4by4[:, 0, 1:]),
                                           axis=1)
        lambda_minus = tf.math.l2_normalize(tf.linalg.cross(
            boosted_4by4[:, 3, 1:], boosted_4by4[:, 1, 1:]),
                                            axis=1)

        #third layer concatenate ys and lambdas
        y_and_lambdas = tf.keras.layers.Concatenate()(
            [y, lambda_plus, lambda_minus])
        #print('THE SHAPE IS', y_and_lambdas.shape)
        #print(type(y_and_lambdas))

        #fourth layer A calculate phi CP unshifted and y_t using Dense
        phi_y = tf.keras.layers.Dense(64, activation='relu')(y_and_lambdas)
        #phi_y = tf.keras.layers.Dense(64, activation="relu")(phi_y) #not sure whether it's better to add in extra Dense layers or not
        phi_cp_un = tf.keras.layers.Dense(2)(phi_y)

        #fourth layer B calculate O star (cross-product) using formulas
        O = tf.math.reduce_sum(tf.math.multiply(
            tf.linalg.cross(lambda_plus, lambda_minus), boosted_4by4[:, 1,
                                                                     1:]),
                               axis=1)
        O = tf.keras.layers.Reshape((1, ))(O)

        #print('SHAPES COMING:')
        #print(lambda_plus.shape)
        #print(lambda_minus.shape)
        #print(boosted_4by4[:,1,1:].shape)
        #print(phi_cp_un.shape)
        #print(O.shape)

        #fifth layer concatenate phi_cp_un and O, and one more dense layer to combine them
        phi_O = tf.keras.layers.Concatenate()([phi_cp_un, O])
        #phi_O = tf.keras.layers.Dense(64, activation="relu")(phi_O) #not sure whether it's better to add in extra Dense layers or not
        phi_O_finished = tf.keras.layers.Dense(64, activation='relu')(phi_O)

        outputs = tf.keras.layers.Dense(1)(phi_O_finished)

        model = tf.keras.Model(inputs=inputs, outputs=outputs)
        model.compile(optimizer='adam', loss=loss_fn, metrics=['mae'])
        return model
コード例 #12
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetGeneratorModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the generator rather than the input data (which are dummies)
    """
    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '').replace(' ',
                                                                  '').replace(
                                                                      '_', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    lbn_input_shape = (len(parameters.LBN_inputs) // 4, 4)
    input_lbn_Layer = Input(shape=lbn_input_shape, name='LBN_inputs')
    lbn_layer = LBNLayer(
        lbn_input_shape,
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Tensorboard logs #
    #    path_board = os.path.join(parameters.main_path,"TensorBoard")
    #    suffix = 0
    #    while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #        suffix += 1
    #    path_board = os.path.join(path_board,"Run_"+str(suffix))
    #    os.makedirs(path_board)
    #    logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #    board = TensorBoard(log_dir=path_board,
    #                        histogram_freq=1,
    #                        batch_size=params['batch_size'],
    #                        write_graph=True,
    #                        write_grads=True,
    #                        write_images=True)
    #    Callback_list = [loss_history,early_stopping,reduceLR,board]
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()

    # Generator #
    training_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='training',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)
    validation_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='validation',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)

    # Some verbose logging #
    logging.info("Will use %d workers" % parameters.workers)
    logging.warning("Tensorflow location " + tf.__file__)
    if len(tf.config.experimental.list_physical_devices('XLA_GPU')) > 0:
        logging.info("GPU detected")
    #logging.warning(K.tensorflow_backend._get_available_gpus())
    # Fit #
    history = model.fit_generator(
        generator=training_generator,  # Training data from generator instance
        validation_data=
        validation_generator,  # Validation data from generator instance
        epochs=params['epochs'],  # Number of epochs
        verbose=1,
        max_queue_size=parameters.workers * 2,  # Length of batch queue
        callbacks=Callback_list,  # Callbacks
        initial_epoch=
        initial_epoch,  # In case of resumed training will be different from 0
        workers=parameters.
        workers,  # Number of threads for batch generation (0 : all in same)
        shuffle=True,  # Shuffle order at each epoch
        use_multiprocessing=True)  # Needs to be turned on for queuing batches

    # Plot history #
    PlotHistory(loss_history)

    return history, model
コード例 #13
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the data provided as inputs
    """
    # Split y = [target,weight], Talos does not leave room for the weight so had to be included in one of the arrays
    w_train = y_train[:, -1]
    w_val = y_val[:, -1]
    y_train = y_train[:, :-1]
    y_val = y_val[:, :-1]

    x_train_lbn = x_train[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_train = x_train[:, :-len(parameters.LBN_inputs)]

    x_val_lbn = x_val[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_val = x_val[:, :-len(parameters.LBN_inputs)]

    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    input_lbn_Layer = Input(shape=x_train_lbn.shape[1:], name='LBN_inputs')
    lbn_layer = LBNLayer(
        x_train_lbn.shape[1:],
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Check preprocessing #
    preprocess = Model(inputs=inputs_numeric, outputs=encoded_all[-1])
    x_numeric = x_train[:, [not m for m in parameters.mask_op]]
    out_preprocess = preprocess.predict(np.hsplit(x_numeric,
                                                  x_numeric.shape[1]),
                                        batch_size=params['batch_size'])
    mean_scale = np.mean(out_preprocess)
    std_scale = np.std(out_preprocess)
    if abs(mean_scale) > 0.01 or abs(
        (std_scale - 1) /
            std_scale) > 0.1:  # Check that scaling is correct to 1%
        logging.warning(
            "Something is wrong with the preprocessing layer (mean = %0.6f, std = %0.6f), maybe you loaded an incorrect scaler"
            % (mean_scale, std_scale))

    # Tensorboard logs #
    #path_board = os.path.join(parameters.main_path,"TensorBoard")
    #suffix = 0
    #while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #    suffix += 1
    #path_board = os.path.join(path_board,"Run_"+str(suffix))
    #os.makedirs(path_board)
    #logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #board = TensorBoard(log_dir=path_board,
    #                    histogram_freq=1,
    #                    batch_size=params['batch_size'],
    #                    write_graph=True,
    #                    write_grads=True,
    #                    write_images=True)
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()
    fit_inputs = np.hsplit(x_train, x_train.shape[1])
    fit_val = (np.hsplit(x_val, x_val.shape[1]), y_val, w_val)
    if params['n_particles'] > 0:
        fit_inputs.append(x_train_lbn)
        fit_val[0].append(x_val_lbn)
    # Fit #
    history = model.fit(x=fit_inputs,
                        y=y_train,
                        sample_weight=w_train,
                        epochs=params['epochs'],
                        batch_size=params['batch_size'],
                        verbose=1,
                        validation_data=fit_val,
                        callbacks=Callback_list)

    # Plot history #
    PlotHistory(loss_history, params)

    return history, model