Пример #1
0
    def generate(
            self, V: Variat, input_features_layer: DenseFeatures
    ) -> Generator[Model, None, None]:

        model = tf.keras.Sequential([
            input_features_layer,
            layers.Dense(158,
                         activation='selu',
                         kernel_initializer=initializers.lecun_normal()),
            layers.Dropout(0.2),
            layers.Dense(168,
                         activation='swish',
                         kernel_initializer=initializers.GlorotNormal()),
            layers.Dropout(0.2),
            layers.Dense(178,
                         activation='swish',
                         kernel_initializer=initializers.GlorotNormal()),
            layers.Dropout(0.2),
            layers.Dense(188,
                         activation='selu',
                         kernel_initializer=initializers.lecun_normal()),
            layers.Dropout(0.2),
            layers.Dense(1,
                         activation="sigmoid",
                         kernel_initializer=initializers.lecun_normal())
        ])

        yield model
Пример #2
0
def resnet_blocks(input_res, kernel, name):
    gen_initializer = lecun_normal()
    in_res_1 = ReflectPadding3D(padding=1)(input_res)
    out_res_1 = Conv3D(kernel,
                       3,
                       strides=1,
                       kernel_initializer=gen_initializer,
                       use_bias=False,
                       name=name + '_conv_a',
                       data_format='channels_first')(in_res_1)
    out_res_1 = InstanceNormalization3D(name=name + '_isnorm_a')(out_res_1)
    out_res_1 = Activation('relu')(out_res_1)

    in_res_2 = ReflectPadding3D(padding=1)(out_res_1)
    out_res_2 = Conv3D(kernel,
                       3,
                       strides=1,
                       kernel_initializer=gen_initializer,
                       use_bias=False,
                       name=name + '_conv_b',
                       data_format='channels_first')(in_res_2)
    out_res_2 = InstanceNormalization3D(name=name + '_isnorm_b')(out_res_2)

    out_res = Add()([out_res_2, input_res])
    return out_res
Пример #3
0
    def __init__(self, model_features):
        super(cnn, self).__init__()
        """ Define here the layers used during the forward-pass
            of the neural network.
        """
        l2_regularization_scale = model_features.l2_regularization_scale
        dropout_probability = model_features.dropout_probability
        nodes_layer_1 = model_features.nodes_layer_1
        nodes_layer_2 = model_features.nodes_layer_2
        input_shape = (-1, 1024, 1)
        number_filters = model_features.number_filters
        kernel_size = model_features.kernel_size
        self.batch_size = model_features.batch_size
        self.scaler = model_features.scaler
        # Convolution layers.
        self.cnn_layer1 = tf.layers.Conv1D(filters=number_filters[0],
                                           kernel_size=kernel_size[0],
                                           strides=1,
                                           padding='valid',
                                           activation='relu')
        self.max_pool1 = tf.layers.MaxPooling1D(pool_size=2,
                                                strides=2,
                                                padding='valid')
        self.cnn_layer2 = tf.layers.Conv1D(filters=number_filters[1],
                                           kernel_size=kernel_size[1],
                                           strides=1,
                                           padding='valid',
                                           activation='relu')
        self.max_pool2 = tf.layers.MaxPooling1D(pool_size=2,
                                                strides=2,
                                                padding='valid')

        # Fully connected layers.
        self.flatten1 = tf.layers.Flatten()
        self.dropout1 = tf.layers.Dropout(dropout_probability)
        self.dense_layer1 = tf.layers.Dense(nodes_layer_1,
                                            kernel_initializer=lecun_normal(),
                                            activation=tf.nn.relu)
        self.dropout2 = tf.layers.Dropout(dropout_probability)
        self.output_layer = tf.layers.Dense(57,
                                            kernel_initializer=lecun_normal(),
                                            activation=None)
def get_compiled_mlp_model(num_users,
                           num_items,
                           learning_rate=0.001,
                           layers_num=[20, 10],
                           reg_layers=[0, 0]):
    assert len(layers_num) == len(reg_layers)
    num_layer = len(layers_num)  # Number of layers in the MLP
    # Input variables
    user_input = layers.Input(shape=(1, ), dtype='int32', name='user_input')
    item_input = layers.Input(shape=(1, ), dtype='int32', name='item_input')

    user_embedding = layers.Embedding(input_dim=num_users,
                                      output_dim=int(layers_num[0] / 2),
                                      name='user_embedding',
                                      embeddings_regularizer=regularizers.l2(
                                          reg_layers[0]),
                                      input_length=1)
    item_embedding = layers.Embedding(input_dim=num_items,
                                      output_dim=int(layers_num[0] / 2),
                                      name='item_embedding',
                                      embeddings_regularizer=regularizers.l2(
                                          reg_layers[0]),
                                      input_length=1)

    # Crucial to flatten an embedding vector!
    user_latent = layers.Flatten()(user_embedding(user_input))
    item_latent = layers.Flatten()(item_embedding(item_input))

    # The 0-th layer is the concatenation of embedding layers
    vector = layers.concatenate([user_latent, item_latent])

    # MLP layers
    for idx in range(1, num_layer):
        layer = layers.Dense(layers_num[idx],
                             kernel_regularizer=regularizers.l2(
                                 reg_layers[idx]),
                             activation='relu',
                             name='layer%d' % idx)
        vector = layer(vector)

    # Final prediction layer
    prediction = layers.Dense(1,
                              activation='sigmoid',
                              kernel_initializer=initializers.lecun_normal(),
                              name='prediction')(vector)

    model_mlp = models.Model(inputs=[user_input, item_input],
                             outputs=prediction)
    model_mlp.compile(optimizer=optimizers.Adam(lr=learning_rate,
                                                clipnorm=0.5),
                      loss='binary_crossentropy')

    return model_mlp
Пример #5
0
    def __init__(self,
                 repr_length,
                 none_initializer=initializers.zeros,
                 kernel_initializer=initializers.lecun_normal(),
                 bias_initializer=initializers.zeros):
        super(OptionCase, self).__init__()

        self.repr_length = repr_length
        self.none_initializer = none_initializer
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer

        self.supports_masking = False
        self.input_spec = InputSpec(min_ndim=2)
Пример #6
0
def Classifier_Net(x1, x2):

    dense_1 = Dense(128, kernel_initializer=initializers.he_normal())
    dense_2 = Dense(128, kernel_initializer=initializers.he_normal())
    dense_final = Dense(1,
                        kernel_initializer=initializers.lecun_normal(),
                        activation='sigmoid')

    x = Add()([dense_1(x1), dense_2(x2)])
    x = Activation('relu')(BatchNormalization()(x))
    x = dense_final(Dropout(0.4)(x))
    # x = dense_final(x)

    return x
Пример #7
0
def get_model():
    dmf_num_layer = len(userlayers)  #Number of layers in the DMF
    mlp_num_layer = len(layers)  #Number of layers in the MLP

    # Input variables
    user_rating = Input(shape=(num_items, ), dtype='int32', name='user_input')
    item_rating = Input(shape=(num_users, ), dtype='int32', name='item_input')

    # DMF part
    userlayer = Dense(userlayers[0], activation="linear", name='user_layer0')
    itemlayer = Dense(itemlayers[0], activation="linear", name='item_layer0')
    dmf_user_latent = userlayer(user_rating)
    dmf_item_latent = itemlayer(item_rating)
    for idx in range(1, dmf_num_layer):
        userlayer = Dense(userlayers[idx],
                          activation='relu',
                          name='user_layer%d' % idx)
        itemlayer = Dense(itemlayers[idx],
                          activation='relu',
                          name='item_layer%d' % idx)
        dmf_user_latent = userlayer(dmf_user_latent)
        dmf_item_latent = itemlayer(dmf_item_latent)
    dmf_vector = multiply([dmf_user_latent, dmf_item_latent])

    # MLP part
    MLP_Embedding_User = Dense(layers[0] // 2,
                               activation="linear",
                               name='user_embedding')
    MLP_Embedding_Item = Dense(layers[0] // 2,
                               activation="linear",
                               name='item_embedding')
    mlp_user_latent = MLP_Embedding_User(user_rating)
    mlp_item_latent = MLP_Embedding_Item(item_rating)
    mlp_vector = concatenate([mlp_user_latent, mlp_item_latent])
    for idx in range(1, mlp_num_layer):
        layer = Dense(layers[idx], activation='relu', name="layer%d" % idx)
        mlp_vector = layer(mlp_vector)

    # Concatenate DMF and MLP parts
    predict_vector = concatenate([dmf_vector, mlp_vector])

    # Final prediction layer
    prediction = Dense(1,
                       activation='sigmoid',
                       kernel_initializer=initializers.lecun_normal(),
                       name="prediction")(predict_vector)

    model_ = Model(inputs=[user_rating, item_rating], outputs=prediction)

    return model_
def create_prm_initializer(prm):

    if prm['initializer'] is None:
        prm['initializer_func'] = None

    if prm['initializer'] == 'glorot_normal':
        prm['initializer_func'] = glorot_normal()

    if prm['initializer'] == 'lecun_uniform':
        prm['initializer_func'] = lecun_uniform()

    if prm['initializer'] == 'lecun_normal':
        prm['initializer_func'] = lecun_normal()

    return (prm)
Пример #9
0
def gated_classifier_model(Xv, Xp):
    mlp_phr = Dense(300, kernel_initializer=initializers.he_normal())
    mlp_vis = Dense(300, kernel_initializer=initializers.he_normal())
    final_mlp = Dense(1,
                      kernel_initializer=initializers.lecun_normal(),
                      activation='sigmoid',
                      name='final')

    g_phr, g_vis = multimodal_gate_model(Xv, Xp)
    h_phr = Activation('tanh')(BatchNormalization()(mlp_phr(Xp)))
    h_vis = Activation('tanh')(BatchNormalization()(mlp_vis(Xv)))

    h = Add()([Multiply()([g_phr, h_phr]), Multiply()([g_vis, h_vis])])
    h = final_mlp(Dropout(0.4)(h))
    h = Flatten()(h)
    return h
Пример #10
0
 def create_str_to_initialiser_converter(self):
     """Creates a dictionary which converts strings to initialiser"""
     str_to_initialiser_converter = {
         "glorot_normal": initializers.glorot_normal,
         "glorot_uniform": initializers.glorot_uniform,
         "xavier_normal": initializers.glorot_normal,
         "xavier_uniform": initializers.glorot_uniform,
         "xavier": initializers.glorot_uniform,
         "he_normal": initializers.he_normal(),
         "he_uniform": initializers.he_uniform(),
         "lecun_normal": initializers.lecun_normal(),
         "lecun_uniform": initializers.lecun_uniform(),
         "truncated_normal": initializers.TruncatedNormal,
         "variance_scaling": initializers.VarianceScaling,
         "default": initializers.glorot_uniform
     }
     return str_to_initialiser_converter
def classifier_model(f1, f2):
    mlp_1 = Dense(128,
                  kernel_initializer=initializers.he_normal(),
                  name='mlp_1')
    mlp_2 = Dense(128,
                  kernel_initializer=initializers.he_normal(),
                  name='mlp_2')
    final_mlp = Dense(1,
                      kernel_initializer=initializers.lecun_normal(),
                      activation='sigmoid',
                      name='final')

    final_fuse = Add()([mlp_1(f1), mlp_2(f2)])

    h = Activation('relu')(BatchNormalization()(final_fuse))
    h = final_mlp(Dropout(0.4)(h))
    h = Flatten()(h)
    return h
def get_compiled_gmf_model(num_users,
                           num_items,
                           latent_dim=8,
                           learning_rate=0.001,
                           reg=[0, 0]):
    # Input variables
    user_input = layers.Input(shape=(1, ), dtype='int32', name='user_input')
    item_input = layers.Input(shape=(1, ), dtype='int32', name='item_input')

    user_embedding = layers.Embedding(
        input_dim=num_users,
        output_dim=latent_dim,
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg[0]),
        input_length=1,
        name='user_embedding')
    item_embedding = layers.Embedding(
        input_dim=num_items,
        output_dim=latent_dim,
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg[1]),
        input_length=1,
        name='item_embedding')

    # Crucial to flatten an embedding vector!
    user_latent = layers.Flatten()(user_embedding(user_input))
    item_latent = layers.Flatten()(item_embedding(item_input))

    # Element-wise product of user and item embeddings
    predict_vector = layers.multiply([user_latent, item_latent])
    prediction = layers.Dense(1,
                              activation='sigmoid',
                              kernel_initializer=initializers.lecun_normal(),
                              name='prediction')(predict_vector)

    model_gmf = models.Model(inputs=[user_input, item_input],
                             outputs=prediction)
    model_gmf.compile(optimizer=optimizers.Adam(lr=learning_rate,
                                                clipnorm=0.5),
                      loss='binary_crossentropy')
    #model_gmf.compile(optimizer=Accoptimizers.Adam(lr=learning_rate, clipnorm=0.5), loss='binary_crossentropy')

    return model_gmf
Пример #13
0
    def __init__(self,
                 ncomp=10,
                 nfeat=154,
                 nunits=50,
                 kernel="RBF",
                 sigma=1.,
                 quasiRandom=True,
                 cosOnly=False,
                 inputd=None,
                 outputd=None,
                 nsteps=200,
                 weights=None):

        self.ncomp = ncomp  # number of mixture components
        self.nunits = nunits  # number of output units for LSTM
        self.inputd = inputd  # dimensionality of the input
        self.nsteps = nsteps  # number of time steps in the
        self.outputd = outputd  # dimensionality of the output
        self.weights = weights  # sample weights
        self.nfeat = nfeat  # number of RFFs
        self.quasiRandom = quasiRandom
        self.cosOnly = cosOnly
        self.sigma = sigma * np.ones(self.nunits)
        self.kernel = kernel
        self.rff_tf = RFF_TF(self.nfeat, self.nunits, self.sigma, self.cosOnly,
                             self.quasiRandom, self.kernel)

        # Set for GPU use
        config = tf.ConfigProto(device_count={'CPU': 2, 'GPU': 2})
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        def elu_modif(x, a=1.):
            e = 1e-15
            return ELU(alpha=a)(x) + 1. + e

        # Note: The output size will be (outputd + 2) * ncomp

        # For reference, not used at the moment
        def log_sum_exp(x, axis=None):
            """Log-sum-exp trick implementation"""
            x_max = K.max(x, axis=axis, keepdims=True)
            return K.log(K.sum(K.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max

        def tril_matrix(elements):
            tfd = tfp.distributions
            tril_m = tfd.fill_triangular(elements)
            tf.matrix_set_diag(tril_m, tf.exp(tf.matrix_diag_part(tril_m)))
            return tril_m

        def mean_log_Gaussian_like(y_true, parameters):
            # This version uses tensorflow_probability
            components = K.reshape(parameters, [
                -1, self.outputd +
                int(0.5 * (self.outputd + 1) * self.outputd) + 1, self.ncomp
            ])
            mu = components[:, :self.outputd, :]
            mu = K.reshape(mu, [-1, self.ncomp, self.outputd])
            sigma = components[:, self.outputd:self.outputd +
                               int(0.5 * (self.outputd + 1) * self.outputd), :]
            sigma = K.reshape(
                sigma,
                [-1, self.ncomp,
                 int(0.5 * (self.outputd + 1) * self.outputd)])
            alpha = components[:, -1, :]

            # alpha = K.softmax(K.clip(alpha,1e-8,1.))
            alpha = K.clip(alpha, 1e-8, 1.)

            tfd = tfp.distributions
            mix = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(probs=alpha),
                components_distribution=tfd.MultivariateNormalTriL(
                    loc=mu, scale_tril=tril_matrix(sigma)))

            log_gauss = mix.log_prob(y_true)
            res = -K.mean(log_gauss)
            return res

        # This returns a tensor
        inputs = Input(shape=(None, self.inputd))

        # Initializer with a particular seed
        initializer = lecun_normal(seed=1.)

        # Add the LSTM layer
        nn = CuDNNLSTM(self.nunits)(inputs)

        # Computes the random Fourier features
        rff = Lambda(self.rff_tf.toFeatures)(nn)

        FC_mus = Dense(units=self.outputd * self.ncomp,
                       activation='linear',
                       kernel_initializer=initializer,
                       name='FC_mus')(rff)
        FC_sigmas_d = Dense(units=self.outputd * self.ncomp,
                            activation='linear',
                            kernel_initializer=initializer,
                            name='FC_sigmas_d')(
                                rff)  # K.exp, W_regularizer=l2(1e-3)
        FC_sigmas = Dense(
            units=int(0.5 * (self.outputd - 1) * self.outputd * self.ncomp),
            activation='linear',
            kernel_initializer=initializer,
            name='FC_sigmas')(rff)  # K.exp, W_regularizer=l2(1e-3)
        FC_alphas = Dense(units=self.ncomp,
                          activation='softmax',
                          kernel_initializer=initializer,
                          name='FC_alphas')(rff)

        output = concatenate([FC_mus, FC_sigmas_d, FC_sigmas, FC_alphas],
                             axis=1)
        self.model = Model(inputs=inputs, outputs=output)

        # Note: Replace 'rmsprop' by 'adam' depending on your needs.
        self.model.compile('adam', loss=mean_log_Gaussian_like)
Пример #14
0
    def __init__(self,
                 ncomp=10,
                 nhidden=2,
                 nunits=[50, 24, 24],
                 inputd=None,
                 outputd=None,
                 nsteps=200,
                 weights=None):

        self.ncomp = ncomp  # number of mixture components
        self.nhidden = nhidden  # number of hidden layers
        self.nunits = nunits  # number of units per hidden layer (integer or array)
        self.inputd = inputd  # dimensionality of the input
        self.nsteps = nsteps  # number of time steps in the
        self.outputd = outputd  # dimensionality of the output
        self.weights = weights  # sample weights

        # Set for GPU use
        config = tf.ConfigProto(device_count={'CPU': 2, 'GPU': 2})
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        def elu_modif(x, a=1.):
            e = 1e-15
            return ELU(alpha=a)(x) + 1. + e

        # Note: The output size will be (outputd + 2) * ncomp

        # For reference, not used at the moment
        def log_sum_exp(x, axis=None):
            """Log-sum-exp trick implementation"""
            x_max = K.max(x, axis=axis, keepdims=True)
            return K.log(K.sum(K.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max

        def tril_matrix(elements):
            tfd = tfp.distributions
            tril_m = tfd.fill_triangular(elements)
            tf.matrix_set_diag(tril_m, tf.exp(tf.matrix_diag_part(tril_m)))
            return tril_m

        def mean_log_Gaussian_like(y_true, parameters):
            # This version uses tensorflow_probability
            components = K.reshape(parameters, [
                -1, self.outputd +
                int(0.5 * (self.outputd + 1) * self.outputd) + 1, self.ncomp
            ])
            mu = components[:, :self.outputd, :]
            mu = K.reshape(mu, [-1, self.ncomp, self.outputd])
            sigma = components[:, self.outputd:self.outputd +
                               int(0.5 * (self.outputd + 1) * self.outputd), :]
            sigma = K.reshape(
                sigma,
                [-1, self.ncomp,
                 int(0.5 * (self.outputd + 1) * self.outputd)])
            alpha = components[:, -1, :]

            # alpha = K.softmax(K.clip(alpha,1e-8,1.))
            alpha = K.clip(alpha, 1e-8, 1.)

            tfd = tfp.distributions
            mix = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(probs=alpha),
                components_distribution=tfd.MultivariateNormalTriL(
                    loc=mu, scale_tril=tril_matrix(sigma)))

            log_gauss = mix.log_prob(y_true)
            res = -K.mean(log_gauss)
            return res

        # This returns a tensor
        inputs = Input(shape=(self.nsteps, self.inputd))

        # Initializer with a particular seed
        initializer = lecun_normal(seed=1.)

        # Add the LSTM layer
        # nn = CuDNNLSTM(self.nunits[0])(inputs)
        nn = LSTM(self.nunits[0])(inputs)

        # a layer instance is callable on a tensor, and returns a tensor
        # nn = Dense(self.nunits[0], activation='tanh', kernel_initializer=initializer)(nn)
        # nn = Dropout(0.05)(nn)

        # for i in range(self.nhidden - 2):
        #     nn = Dense(self.nunits[i + 1], activation='tanh', kernel_initializer=initializer)(nn)
        #     # nn = Dropout(0.05)(nn)

        FC_mus = Dense(units=self.outputd * self.ncomp,
                       activation='linear',
                       kernel_initializer=initializer,
                       name='FC_mus')(nn)
        FC_sigmas_d = Dense(units=self.outputd * self.ncomp,
                            activation='linear',
                            kernel_initializer=initializer,
                            name='FC_sigmas_d')(
                                nn)  # K.exp, W_regularizer=l2(1e-3)
        FC_sigmas = Dense(
            units=int(0.5 * (self.outputd - 1) * self.outputd * self.ncomp),
            activation='linear',
            kernel_initializer=initializer,
            name='FC_sigmas')(nn)  # K.exp, W_regularizer=l2(1e-3)
        FC_alphas = Dense(units=self.ncomp,
                          activation='softmax',
                          kernel_initializer=initializer,
                          name='FC_alphas')(nn)

        output = concatenate([FC_mus, FC_sigmas_d, FC_sigmas, FC_alphas],
                             axis=1)
        self.model = Model(inputs=inputs, outputs=output)

        # Note: Replace 'rmsprop' by 'adam' depending on your needs.
        self.model.compile('adam', loss=mean_log_Gaussian_like)

        self.model.summary()
Пример #15
0
    def __init__(self,
                 ncomp=10,
                 nfeat=500,
                 inputd=None,
                 cosOnly=False,
                 kernel="RBF",
                 sigma=1,
                 quasiRandom=True,
                 outputd=None,
                 weights=None):

        self.ncomp = ncomp  # number of mixture components
        self.nfeat = nfeat  # number of features
        self.inputd = inputd  # dimensionality of the input
        self.outputd = outputd  # dimensionality of the output
        self.quasiRandom = quasiRandom
        self.cosOnly = cosOnly
        self.sigma = sigma * np.ones(self.inputd)
        self.kernel = kernel
        self.rff = RFF(self.nfeat, self.inputd, self.sigma, self.cosOnly,
                       self.quasiRandom, self.kernel)
        self.weights = weights  # weight function
        self.scaler = StandardScaler()

        # self.scaler = MinMaxScaler()
        # self.scaler = MaxAbsScaler()
        # self.scaler = Normalizer(norm='l2')
        # self.scaler = QuantileTransformer(output_distribution='normal', random_state=0)
        # Set up for CPU. Only when doing LSTM training we will use the GPU
        config = tf.ConfigProto(device_count={'CPU': 12, 'GPU': 0})
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        def elu_modif(x, a=1.):
            e = 1e-15
            return ELU(alpha=a)(x) + 1. + e

        # Note: The output size will be (outputd + 2) * ncomp

        # For reference, not used at the moment
        def log_sum_exp(x, axis=None):
            """Log-sum-exp trick implementation"""
            x_max = K.max(x, axis=axis, keepdims=True)
            return K.log(K.sum(K.exp(x - x_max), axis=axis,
                               keepdims=True)) + x_max

        def tril_matrix(elements):
            tfd = tfp.distributions
            tril_m = tfd.fill_triangular(elements)
            tf.matrix_set_diag(tril_m, tf.exp(tf.matrix_diag_part(tril_m)))
            return tril_m

        def mean_log_Gaussian_like(y_true, parameters):
            # This version uses tensorflow_probability
            components = K.reshape(parameters, [
                -1, self.outputd +
                int(0.5 * (self.outputd + 1) * self.outputd) + 1, self.ncomp
            ])
            mu = components[:, :self.outputd, :]
            mu = K.reshape(mu, [-1, self.ncomp, self.outputd])
            sigma = components[:, self.outputd:self.outputd +
                               int(0.5 * (self.outputd + 1) * self.outputd), :]
            sigma = K.reshape(
                sigma,
                [-1, self.ncomp,
                 int(0.5 * (self.outputd + 1) * self.outputd)])
            alpha = components[:, -1, :]

            # alpha = K.softmax(K.clip(alpha,1e-8,1.))
            alpha = K.clip(alpha, 1e-8, 1.)
            tfd = tfp.distributions
            mix = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(probs=alpha),
                components_distribution=tfd.MultivariateNormalTriL(
                    loc=mu, scale_tril=tril_matrix(sigma)))

            log_gauss = mix.log_prob(y_true)
            res = -K.mean(log_gauss)
            return res

        # This returns a tensor
        inputs = Input(shape=(self.nfeat, ))

        # Initializer with a particular seed
        # initializer = RandomUniform(minval=0., maxval=1.)
        initializer = lecun_normal(seed=1.)

        # Note: Replace 'rmsprop' by 'adam' depending on your needs.
        FC_mus = Dense(units=self.outputd * self.ncomp,
                       activation='linear',
                       kernel_initializer=initializer,
                       name='FC_mus')(inputs)
        FC_sigmas_d = Dense(units=self.outputd * self.ncomp,
                            activation='linear',
                            kernel_initializer='Ones',
                            kernel_regularizer=l2(10.),
                            name='FC_sigmas_d')(
                                inputs)  # K.exp, W_regularizer=l2(1e-3)
        FC_sigmas = Dense(
            units=int(0.5 * (self.outputd - 1) * self.outputd * self.ncomp),
            activation='linear',
            kernel_initializer='Ones',
            kernel_regularizer=l2(1.),
            name='FC_sigmas')(inputs)  # K.exp, W_regularizer=l2(1e-3)
        FC_alphas = Dense(units=self.ncomp,
                          activation='softmax',
                          kernel_initializer='Ones',
                          name='FC_alphas')(inputs)

        # Adding a small constant to the main diagonal to avoid numerical problems
        output = concatenate(
            [FC_mus, FC_sigmas_d + 1e-4, FC_sigmas, FC_alphas], axis=1)
        self.model = Model(inputs=inputs, outputs=output)
        self.model.compile('adam', loss=mean_log_Gaussian_like)
Пример #16
0
    def __init__(self,
                 ncomp=10,
                 nhidden=2,
                 nunits=[24, 24],
                 inputd=None,
                 outputd=None,
                 weights=None):

        self.ncomp = ncomp  # number of mixture components
        self.nhidden = nhidden  # number of hidden layers
        self.nunits = nunits  # number of units per hidden layer (integer or array)
        self.inputd = inputd  # dimensionality of the input
        self.outputd = outputd  # dimensionality of the output
        self.weights = weights  # sample weights

        # This returns a tensor
        inputs = Input(shape=(self.inputd, ))

        # Initializer with a particular seed
        initializer = lecun_normal(seed=1.)

        # a layer instance is callable on a tensor, and returns a tensor
        nn = Dense(self.nunits[0],
                   activation='tanh',
                   kernel_initializer=initializer)(inputs)
        # nn = Dropout(0.05)(nn)

        for i in range(self.nhidden - 2):
            nn = Dense(self.nunits[i + 1],
                       activation='tanh',
                       kernel_initializer=initializer)(nn)
            # nn = Dropout(0.05)(nn)

        FC_mus = Dense(units=self.outputd * self.ncomp,
                       activation='linear',
                       kernel_initializer=initializer,
                       name='FC_mus')(nn)
        FC_sigmas_d = Dense(units=self.outputd * self.ncomp,
                            activation='linear',
                            kernel_initializer=initializer,
                            name='FC_sigmas_d')(
                                nn)  # K.exp, W_regularizer=l2(1e-3)
        FC_sigmas = Dense(
            units=int(0.5 * (self.outputd - 1) * self.outputd * self.ncomp),
            activation='linear',
            kernel_initializer=initializer,
            name='FC_sigmas')(nn)  # K.exp, W_regularizer=l2(1e-3)
        FC_alphas = Dense(units=self.ncomp,
                          activation='softmax',
                          kernel_initializer=initializer,
                          name='FC_alphas')(nn)

        # Adding a small constant to the main diagonal to avoid numerical problems
        output = concatenate(
            [FC_mus, FC_sigmas_d + 1e-4, FC_sigmas, FC_alphas], axis=1)
        self.model = Model(inputs=inputs, outputs=output)

        # Note: Replace 'rmsprop' by 'adam' depending on your needs.
        self.model.compile('adam',
                           loss=MDNN.mean_log_gaussian_loss(
                               self.outputd, self.ncomp))
Пример #17
0
    def __init__(self, model_features):
        super(cnn, self).__init__()
        """ Define here the layers used during the forward-pass
            of the neural network.
        """
        self.l2_regularization_scale = model_features.l2_regularization_scale
        dropout_probability = model_features.dropout_probability
        self.batch_size = model_features.batch_size
        self.dense_nodes = model_features.dense_nodes
        regularizer = tf.contrib.layers.l2_regularizer(
            scale=self.l2_regularization_scale)
        number_filters = model_features.number_filters
        kernel_length = model_features.kernel_length
        kernel_strides = model_features.kernel_strides
        pool_size = model_features.pool_size
        pool_strides = model_features.pool_strides
        self.scaler = model_features.scaler
        cnn_activation_function = model_features.cnn_activation_function
        dnn_activation_function = model_features.dnn_activation_function
        output_size = model_features.output_size
        cnn_trainable = model_features.cnn_trainable

        # ###################
        # ##### 1D Conv #####
        # ###################
        self.cnn_1d = {}
        for filter_index in range(len(kernel_length[0])):
            self.cnn_1d[str(filter_index)] = tf.layers.Conv1D(
                filters=number_filters[0],
                kernel_size=kernel_length[0][filter_index],
                strides=kernel_strides[0][filter_index],
                padding='same',
                activation=cnn_activation_function,
                kernel_initializer=tf.initializers.he_normal(),
                trainable=cnn_trainable)

        # ###################
        # ##### 2D Conv #####
        # ###################
        self.cnn_2d = {}
        self.cnn_2d['0'] = tf.layers.Conv2D(
            filters=number_filters[1],
            kernel_size=(kernel_length[1],
                         number_filters[0] * len(kernel_length[0])),
            strides=kernel_strides[1],
            padding='valid',
            activation=cnn_activation_function,
            kernel_initializer=tf.initializers.he_normal(),
            trainable=cnn_trainable)

        self.max_pooling2d = {}
        self.max_pooling2d['0'] = tf.layers.MaxPooling2D(
            pool_size=pool_size[1], strides=pool_strides[1], padding='same')

        # #################
        # ##### Dense #####
        # #################

        self.flatten = tf.layers.Flatten()

        self.dense_layers = {}
        self.dropout_layers = {}
        for layer, nodes in enumerate(self.dense_nodes):
            self.dense_layers[str(layer)] = tf.layers.Dense(
                nodes,
                activation=dnn_activation_function,
                kernel_initializer=lecun_normal(),
                kernel_regularizer=regularizer)
            self.dropout_layers[str(layer)] = tf.layers.Dropout(
                dropout_probability)
        self.output_layer = tf.layers.Dense(output_size, activation=None)
def get_compiled_neumf_model(num_users,
                             num_items,
                             learning_rate=0.001,
                             mf_dim=10,
                             layers_num=[10],
                             reg_layers=[0],
                             reg_mf=0):
    assert len(layers_num) == len(reg_layers)
    num_layer = len(layers_num)  #Number of layers in the MLP
    # Input variables
    user_input = layers.Input(shape=(1, ), dtype='int32', name='user_input')
    item_input = layers.Input(shape=(1, ), dtype='int32', name='item_input')

    # Embedding layer
    mf_embedding_user = layers.Embedding(
        input_dim=num_users,
        output_dim=mf_dim,
        name='mf_embedding_user',
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg_mf),
        input_length=1)
    mf_embedding_item = layers.Embedding(
        input_dim=num_items,
        output_dim=mf_dim,
        name='mf_embedding_item',
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg_mf),
        input_length=1)

    mlp_embedding_user = layers.Embedding(
        input_dim=num_users,
        output_dim=int(layers_num[0] / 2),
        name="mlp_embedding_user",
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg_layers[0]),
        input_length=1)
    mlp_embedding_item = layers.Embedding(
        input_dim=num_items,
        output_dim=int(layers_num[0] / 2),
        name='mlp_embedding_item',
        embeddings_initializer=initializers.RandomNormal(),
        embeddings_regularizer=regularizers.l2(reg_layers[0]),
        input_length=1)

    # MF part
    mf_user_latent = layers.Flatten()(mf_embedding_user(user_input))
    mf_item_latent = layers.Flatten()(mf_embedding_item(item_input))
    mf_vector = layers.multiply([mf_user_latent, mf_item_latent])

    # MLP part
    mlp_user_latent = layers.Flatten()(mlp_embedding_user(user_input))
    mlp_item_latent = layers.Flatten()(mlp_embedding_item(item_input))
    mlp_vector = layers.concatenate([mlp_user_latent, mlp_item_latent])
    for idx in range(1, num_layer):
        layer = layers.Dense(layers_num[idx],
                             kernel_regularizer=regularizers.l2(
                                 reg_layers[idx]),
                             activation='relu',
                             name="layer%d" % idx)
        mlp_vector = layer(mlp_vector)

    # Concatenate MF and MLP parts
    predict_vector = layers.concatenate([mf_vector, mlp_vector])

    # Final prediction layer
    prediction = layers.Dense(1,
                              activation='sigmoid',
                              kernel_initializer=initializers.lecun_normal(),
                              name="prediction")(predict_vector)

    model_nuemf = models.Model(inputs=[user_input, item_input],
                               outputs=prediction)
    model_nuemf.compile(optimizer=optimizers.Adam(lr=learning_rate,
                                                  clipnorm=0.5),
                        loss='binary_crossentropy')

    return model_nuemf
Пример #19
0
    def get_test_and_validation(csv_processed, train_size, test_size):
        train, test = train_test_split(csv_processed,
                                       train_size=train_size,
                                       test_size=test_size)
        batch_size = 892
        train_ds = DataframeHelper.df_to_dataset(train, batch_size=batch_size)
        val_ds = DataframeHelper.df_to_dataset(test, batch_size=batch_size)
        return train_ds, val_ds

    feature_layer = tf.keras.layers.DenseFeatures(
        create_input_features_layer())
    model = tf.keras.Sequential([
        feature_layer,
        layers.Dense(158,
                     activation='selu',
                     kernel_initializer=initializers.lecun_normal()),
        layers.Dropout(0.2),
        layers.Dense(168,
                     activation='swish',
                     kernel_initializer=initializers.GlorotNormal()),
        layers.Dropout(0.2),
        layers.Dense(178,
                     activation='swish',
                     kernel_initializer=initializers.GlorotNormal()),
        layers.Dropout(0.2),
        layers.Dense(188,
                     activation='selu',
                     kernel_initializer=initializers.lecun_normal()),
        layers.Dropout(0.2),
        layers.Dense(1,
                     activation="sigmoid",
Пример #20
0
def segsrgan_generator_block(name: str, shape: tuple, kernel: int):
    gen_initializer = lecun_normal()
    inputs = Input(shape=(1, shape[0], shape[1], shape[2]))

    # Representation
    gennet = ReflectPadding3D(padding=3)(inputs)
    gennet = Conv3D(kernel,
                    7,
                    strides=1,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_conv1',
                    data_format='channels_first')(gennet)
    gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv1')(gennet)
    gennet = Activation('relu')(gennet)

    # Downsampling 1
    gennet = ReflectPadding3D(padding=1)(gennet)
    gennet = Conv3D(kernel * 2,
                    3,
                    strides=2,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_conv2',
                    data_format='channels_first')(gennet)
    gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv2')(gennet)
    gennet = Activation('relu')(gennet)

    # Downsampling 2
    gennet = ReflectPadding3D(padding=1)(gennet)
    gennet = Conv3D(kernel * 4,
                    3,
                    strides=2,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_conv3',
                    data_format='channels_first')(gennet)
    gennet = InstanceNormalization3D(name=name + '_gen_isnorm_conv3')(gennet)
    gennet = Activation('relu')(gennet)

    # Resnet blocks : 6, 8*4 = 32
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block1')
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block2')
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block3')
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block4')
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block5')
    gennet = resnet_blocks(gennet, kernel * 4, name=name + '_gen_block6')

    # Upsampling 1
    gennet = UpSampling3D(size=(2, 2, 2), data_format='channels_first')(gennet)
    gennet = ReflectPadding3D(padding=1)(gennet)
    gennet = Conv3D(kernel * 2,
                    3,
                    strides=1,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_deconv1',
                    data_format='channels_first')(gennet)
    gennet = InstanceNormalization3D(name=name + '_gen_isnorm_deconv1')(gennet)
    gennet = Activation('relu')(gennet)

    # Upsampling 2
    gennet = UpSampling3D(size=(2, 2, 2), data_format='channels_first')(gennet)
    gennet = ReflectPadding3D(padding=1)(gennet)
    gennet = Conv3D(kernel,
                    3,
                    strides=1,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_deconv2',
                    data_format='channels_first')(gennet)
    gennet = InstanceNormalization3D(name=name + '_gen_isnorm_deconv2')(gennet)
    gennet = Activation('relu')(gennet)

    # Reconstruction
    gennet = ReflectPadding3D(padding=3)(gennet)
    gennet = Conv3D(1,
                    7,
                    strides=1,
                    kernel_initializer=gen_initializer,
                    use_bias=False,
                    name=name + '_gen_1conv',
                    data_format='channels_first')(gennet)

    predictions = gennet

    model = Model(inputs=inputs, outputs=predictions, name=name)
    return model