Пример #1
0
def conv3x3(x, filters=64,
    strides=1, use_bias=False,
    kernel_initializer=initializers.VarianceScaling(
        scale=2.0, mode='fan_out'),
    name=None):
    return convnxn(x, filters, 3, strides, use_bias,
        kernel_initializer, name)
Пример #2
0
    def get_model(self):
        # Input Layer
        user_input = Input(shape=(1,), dtype='int32', name='user_input')
        item_input = Input(shape=(1,), dtype='int32', name='item_input')
        text_input = Input(shape=(self.sim_feature_size,), dtype='float32', name='text_input')

        # Embedding layer
        MF_Embedding_User = Embedding(input_dim=self.num_users, output_dim=self.mf_embedding_dim, name='mf_embedding_user',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        MF_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=self.mf_embedding_dim, name='mf_embedding_item',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(0.01), input_length=1)
        # MF part
        mf_user_latent = tf.keras.layers.Flatten()(MF_Embedding_User(user_input))
        mf_item_latent = tf.keras.layers.Flatten()(MF_Embedding_Item(item_input))  # why Flatten?
        mf_vector = concatenate([mf_user_latent, mf_item_latent])  # element-wise multiply    ???

        for idx in range(len(self.mf_fc_unit_nums)):   # 学习非线性关系
            layer = Dense(self.mf_fc_unit_nums[idx],  activation='relu', name="layer%d" % idx)
            mf_vector = layer(mf_vector)

        # Text part
        # text_input = Dense(10, activation='relu', kernel_regularizer=l2(0.01))(text_input)  #   sim? 需要再使用MLP处理下?

        # Concatenate MF and TEXT parts
        predict_vector = concatenate([mf_vector, text_input])

        for idx in range(len(self.final_MLP_layers)):   # 整合后再加上MLP?
            layer = Dense(self.final_MLP_layers[idx], activation='relu')# name="layer%d"  % idx
            predict_vector = layer(predict_vector)

        predict_vector = tf.keras.layers.Dropout(0.5)(predict_vector)    # 使用dropout?

        if new_Para.param.final_activation == 'softmax':
            predict_vector = Dense(2, activation='softmax', name="prediction")(predict_vector)
        elif new_Para.param.final_activation == 'sigmoid':
            predict_vector = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        # # Final prediction layer
        # predict_vector = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        model = Model(inputs=[user_input, item_input, text_input],outputs=predict_vector)
        return model
Пример #3
0
def conv2d_bn(x, n_filter, n_row, n_col, padding='same', stride=(1, 1), use_bias=False):
    x = Convolution2D(n_filter, (n_row, n_col), strides=stride, padding=padding, use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.0004),
                      kernel_initializer=initializers.VarianceScaling(scale=1, mode='fan_in',
                                                                      distribution='normal',
                                                                       seed=None)
                                                                      )(x)
    x = BatchNormalization()(x)
    x = tf.nn.elu(x)
    return x
Пример #4
0
    def build(self, input_shape):
        batch_size, input_dim, input_atoms = input_shape

        self.kernel = self.add_weight(name="kernel", initializer=initializers.VarianceScaling(scale=0.1),
                                      shape=[*self.kernel_size, input_atoms, self.output_dim * self.output_atoms])
        self.bias = self.add_weight(name="bias", initializer=initializers.Constant(value=0.1),
                                    shape=[self.output_dim, self.output_atoms])

        self.input_dim = input_dim
        self.input_atoms = input_atoms
Пример #5
0
def convnxn(x, filters=64, kernel_size=3,
    strides=1, use_bias=False,
    kernel_initializer=initializers.VarianceScaling(
        scale=2.0, mode='fan_out'),
    name=None):
    return layers.Conv2D(
        filters=filters, kernel_size=kernel_size,
        strides=strides, padding='SAME',
        use_bias=use_bias,
        kernel_initializer=kernel_initializer,
        name=name)(x)
Пример #6
0
 def build(self, input_shape):
     """ The __call__ method of layer will automatically run build the first time it is called.
     Create trainable layer weights (embedding dictionary) here """
     input_shape = tf.TensorShape(input_shape)
     if input_shape.rank != 3:
         raise ValueError("The input tensor must be rank of 3"
                          )  # (num_fts, batch_size, input_dense_unit)
     num_var = input_shape[0]
     shape = tf.TensorShape(
         [num_var, self.embedding_dim, self.num_embeddings])
     initializer = init.VarianceScaling(distribution='uniform')
     self.embeddings = self.add_weight(name='embeddings',
                                       shape=shape,
                                       initializer=initializer,
                                       trainable=True)
     # Make sure to call the `build` method at the end or set self.built = True
     super(VectorQuantizer, self).build(input_shape)
Пример #7
0
 def build(self, input_shape):
     input_shape = tf.TensorShape(input_shape)
     if input_shape.rank != 3:
         raise ValueError("Input shape must be 3")
     # last_dim = input_shape[-1]
     num_var = input_shape[0]
     shape = tf.TensorShape(
         [num_var, self.embedding_dim, self.num_embeddings])
     initializer = init.VarianceScaling(distribution='uniform')
     self.embeddings = self.add_weight(name='embeddings',
                                       shape=shape,
                                       initializer=initializer)
     self.ema_cluster_size = self.add_weight(
         name='ema_cluster_size',
         shape=[num_var, self.num_embeddings],
         initializer=init.get('zeros'))
     self.ema_w = self.add_weight(name='ema_dw', shape=shape)
     self.ema_w.assign(self.embeddings.read_value())
     super(VectorQuantizerEMA, self).build(input_shape)
Пример #8
0
    def build(self, input_shape):
        # input_shape = tf.TensorShape(input_shape)
        if input_shape.rank == 6:  # hidden variable structure
            ls = input_shape.as_list()
            ls[1] = 1
            input_shape = tf.TensorShape(ls)
        last_dim = input_shape[-1]

        # build custom initializer
        n_pots = tf.reduce_prod(input_shape[:-2]).numpy()
        scale, mode = (2 * n_pots,
                       "fan_in") if "he" in self.kernel_initializer else (
                           n_pots, "fan_avg")
        distribution = "uniform" if "uniform" in self.kernel_initializer else "truncated_normal"
        self.kernel_initializer = init.VarianceScaling(scale, mode,
                                                       distribution)

        self.kernel = self.add_weight(name='kernel',
                                      shape=input_shape[:-3].concatenate(
                                          [1, last_dim, self.units]),
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      dtype=self.dtype,
                                      trainable=True)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=input_shape[:-3].concatenate(
                                            [1, 1, self.units]),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        dtype=self.dtype,
                                        trainable=True)
        else:
            self.bias = None
        self.built = True