Beispiel #1
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) < 2:
            raise ValueError(
                'A `Field-Wise Bi-Interaction` layer should be called '
                'on a list of at least 2 inputs')

        self.num_fields = len(input_shape)
        embedding_size = input_shape[0][-1]

        self.kernel_mf = self.add_weight(
            name='kernel_mf',
            shape=(int(self.num_fields * (self.num_fields - 1) / 2), 1),
            initializer=tf.keras.initializers.Ones(),
            regularizer=None,
            trainable=True)

        self.kernel_fm = self.add_weight(
            name='kernel_fm',
            shape=(self.num_fields, 1),
            initializer=tf.keras.initializers.Constant(value=0.5),
            regularizer=None,
            trainable=True)
        if self.use_bias:
            self.bias_mf = self.add_weight(name='bias_mf',
                                           shape=(embedding_size),
                                           initializer=Zeros())
            self.bias_fm = self.add_weight(name='bias_fm',
                                           shape=(embedding_size),
                                           initializer=Zeros())

        super(FieldWiseBiInteraction,
              self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #2
0
 def init_from_pretrained_ssd(self, weights_path):
     self.ssd.load_weights(weights_path, by_name=True)
     for layer in self.ssd.layers:
         if layer in self.classification_headers or layer in self.regression_headers:
             if type(layer) == Sequential:
                 for sub_layer in layer.layers:
                     w = sub_layer.get_weights()
                     if w:
                         if len(w) == 2:  # Layer has bias
                             new_weights = []
                             new_weights.append(glorot_uniform()(w[0].shape))
                             new_weights.append(Zeros()(w[1].shape))
                         elif len(w) == 4:
                             new_weights = []
                             new_weights.append(Ones()(w[0].shape))
                             new_weights.append(Zeros()(w[1].shape))
                             new_weights.append(Zeros()(w[2].shape))
                             new_weights.append(Ones()(w[3].shape))
                         else:
                             new_weights = glorot_uniform()(w.shape)
                         sub_layer.set_weights(new_weights)
             else:
                 w = layer.get_weights()
                 if len(w) > 1:  # Layer has bias
                     new_weights = []
                     new_weights.append(glorot_uniform()(w[0].shape))
                     new_weights.append(Zeros()(w[1].shape))
                 else:
                     new_weights = glorot_uniform()(w.shape)
                 layer.set_weights(new_weights)
Beispiel #3
0
def get_linear_logit(features,
                     feature_columns,
                     units=1,
                     use_bias=False,
                     seed=1024,
                     prefix='linear',
                     l2_reg=0):
    """
    features: OrderedDict, {key: Input}形式
    feature_columns: 特征。Input层转换后
    """
    linear_feature_columns = copy(feature_columns)
    for i in range(len(linear_feature_columns)):
        if isinstance(linear_feature_columns[i], SparseFeat):
            linear_feature_columns[i] = linear_feature_columns[i]._replace(
                embedding_dim=1, embeddings_initializer=Zeros())
        if isinstance(linear_feature_columns[i], VarLenSparseFeat):
            linear_feature_columns[i] = linear_feature_columns[i]._replace(
                sparsefeat=linear_feature_columns[i].sparsefeat._replace(
                    embedding_dim=1, embeddings_initializer=Zeros()))

    linear_emb_list = [
        input_from_feature_columns(features,
                                   linear_feature_columns,
                                   l2_reg,
                                   seed,
                                   prefix=prefix + str(i))[0]
        for i in range(units)
    ]
    _, dense_input_list = input_from_feature_columns(features,
                                                     linear_feature_columns,
                                                     l2_reg,
                                                     seed,
                                                     prefix=prefix)

    linear_logit_list = []
    for i in range(units):

        if len(linear_emb_list[i]) > 0 and len(dense_input_list) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=2, use_bias=use_bias,
                                  seed=seed)([sparse_input, dense_input])
        elif len(linear_emb_list[i]) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            linear_logit = Linear(l2_reg, mode=0, use_bias=use_bias,
                                  seed=seed)(sparse_input)
        elif len(dense_input_list) > 0:
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=1, use_bias=use_bias,
                                  seed=seed)(dense_input)
        else:
            # raise NotImplementedError
            return add_func([])
        linear_logit_list.append(linear_logit)

    return concat_func(linear_logit_list)
Beispiel #4
0
def get_linear_logit(features,
                     feature_columns,
                     units=1,
                     use_bias=False,
                     seed=1024,
                     prefix='linear',
                     l2_reg=0):
    # units是线性logit的个数,可以在多个空间做线性logit(不同的权重),多个线性logit拼接,增加模型容量
    linear_feature_columns = copy(feature_columns)
    for i in range(len(linear_feature_columns)):
        # 离散特征(单值,多值)的线性回归,这里的实现方式为长为1的embedding,等价于加权(而没有采用对输入one-hot的处理方式!!!)
        if isinstance(linear_feature_columns[i], SparseFeat):
            linear_feature_columns[i] = linear_feature_columns[i]._replace(
                embedding_dim=1, embeddings_initializer=Zeros())
        if isinstance(linear_feature_columns[i], VarLenSparseFeat):
            linear_feature_columns[i] = linear_feature_columns[i]._replace(
                sparsefeat=linear_feature_columns[i].sparsefeat._replace(
                    embedding_dim=1, embeddings_initializer=Zeros()))

    linear_emb_list = [
        input_from_feature_columns(features,
                                   linear_feature_columns,
                                   l2_reg,
                                   seed,
                                   prefix=prefix + str(i))[0]
        for i in range(units)
    ]
    _, dense_input_list = input_from_feature_columns(features,
                                                     linear_feature_columns,
                                                     l2_reg,
                                                     seed,
                                                     prefix=prefix)

    linear_logit_list = []
    for i in range(units):  # 多个空间的logit

        if len(linear_emb_list[i]) > 0 and len(dense_input_list) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=2, use_bias=use_bias,
                                  seed=seed)([sparse_input, dense_input])
        elif len(linear_emb_list[i]) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            linear_logit = Linear(l2_reg, mode=0, use_bias=use_bias,
                                  seed=seed)(sparse_input)
        elif len(dense_input_list) > 0:
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=1, use_bias=use_bias,
                                  seed=seed)(dense_input)
        else:
            # raise NotImplementedError
            return add_func([])
        linear_logit_list.append(linear_logit)

    return concat_func(linear_logit_list)  # 拼接
Beispiel #5
0
 def build(self, input_shape):
     self.bn = tf.keras.layers.BatchNormalization(
         axis=self.axis, epsilon=self.epsilon, center=False, scale=False)
     self.alphas = self.add_weight(shape=(input_shape[-1],), initializer=Zeros(
     ), dtype=tf.float32, name= 'dice_alpha')  # name='alpha_'+self.name
     super(Dice, self).build(input_shape)  # Be sure to call this somewhere!
     self.uses_learning_phase = True
Beispiel #6
0
 def build(self, input_shape):
     self.alphas = self.add_weight(shape=(input_shape[-1], ),
                                   initializer=Zeros(),
                                   dtype=tf.float32,
                                   name=self.name +
                                   'dice_alpha')  # name='alpha_'+self.name
     super(Dice, self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #7
0
    def build(self, input_shape):
        # 这里 input_shape 是第一次运行 call() 时参数 inputs 的形状
        if len(input_shape) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 3 dimensions" %
                (len(input_shape)))

        embedding_size = int(input_shape[-1])

        self.attention_W = self.add_weight(
            shape=(embedding_size, self.attention_factor),
            initializer=glorot_normal(seed=self.seed),
            regularizer=l2(self.l2_reg_w),
            name="attention_W")
        self.attention_b = self.add_weight(shape=(self.attention_factor, ),
                                           initializer=Zeros(),
                                           name="attention_b")
        self.projection_h = self.add_weight(
            shape=(self.attention_factor, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_h")
        self.projection_p = self.add_weight(
            shape=(embedding_size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_p")

        self.dropout = tf.keras.layers.Dropout(self.dropout_rate,
                                               seed=self.seed)

        self.tensordot = tf.keras.layers.Lambda(
            lambda x: tf.tensordot(x[0], x[1], axes=(-1, 0)))

        # Be sure to call this somewhere!
        super(AFMLayer, self).build(input_shape)
Beispiel #8
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                tf.keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.activation_layers = [tf.keras.layers.Activation(self.activation) \
                                  for _ in range(len(self.hidden_units))]

        super(DNN, self).build(input_shape)
Beispiel #9
0
    def build(self, input_shape):

        if self.use_bias:
            self.global_bias = self.add_weight(
                shape=(1,), initializer=Zeros(), name="global_bias")

        super(PredictionLayer, self).build(input_shape)
Beispiel #10
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)

        # range(len(self.hidden_units))

        self.kernels = []
        self.bias = []

        for i in range(len(self.hidden_units)):
            kernel = self.add_weight(
                name='kernel' + str(i),
                shape=(hidden_units[i], hidden_units[i + 1]),
                initializer=glorot_normal(seed=self.seed),
                regularizer=l2(self.l2_reg),
                trainable=True
            )
            bias = self.add_weight(
                name='bias' + str(i),
                shape=(self.hidden_units[i],),
                initializer=Zeros(),
                trainable=True
            )

            self.kernels.append(kernel)
            self.bias.append(bias)

        if self.use_bn:
            self.bn_layers = [tf.keras.layers.BatchNormalization()] * len(self.hidden_units)

        self.dropout_layers = [tf.keras.layers.Dropout()] * len(self.hidden_units)

        self.activation_layers = [activation_layer(self.activation)] * len(self.hidden_units)

        super(DNN, self).build(input_shape)  # Be sure to call this somewhere! 传递信息?
Beispiel #11
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) != 2:
            raise ValueError('A `LocalActivationUnit` layer should be called '
                             'on a list of 2 inputs')

        if len(input_shape[0]) != 3 or len(input_shape[1]) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d and %d, expect to be 3 dimensions"
                % (len(input_shape[0]), len(input_shape[1])))

        if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1:
            raise ValueError(
                'A `LocalActivationUnit` layer requires '
                'inputs of a two inputs with shape (None,1,embedding_size) and (None,T,embedding_size)'
                'Got different shapes: %s,%s' % (input_shape))
        size = 4 * \
               int(input_shape[0][-1]
                   ) if len(self.hidden_units) == 0 else self.hidden_units[-1]
        self.kernel = self.add_weight(
            shape=(size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="kernel")
        self.bias = self.add_weight(shape=(1, ),
                                    initializer=Zeros(),
                                    name="bias")
        #self.dnn = DNN(self.hidden_units, self.activation, self.l2_reg,
        #               self.dropout_rate, self.use_bn, seed=self.seed)
        super(LocalActivationUnit,
              self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #12
0
    def build(self, input_shape):

        X, A = input_shape
        embedding_size = int(X[-1])
        self.weight = self.add_weight(
            name='weight',
            shape=[embedding_size, self.att_embedding_size * self.head_num],
            dtype=tf.float32,
            regularizer=l2(self.l2_reg),
            initializer=tf.keras.initializers.glorot_uniform())
        self.att_self_weight = self.add_weight(
            name='att_self_weight',
            shape=[1, self.head_num, self.att_embedding_size],
            dtype=tf.float32,
            regularizer=l2(self.l2_reg),
            initializer=tf.keras.initializers.glorot_uniform())
        self.att_neighs_weight = self.add_weight(
            name='att_neighs_weight',
            shape=[1, self.head_num, self.att_embedding_size],
            dtype=tf.float32,
            regularizer=l2(self.l2_reg),
            initializer=tf.keras.initializers.glorot_uniform())

        if self.use_bias:
            self.bias_weight = self.add_weight(
                name='bias',
                shape=[1, self.head_num, self.att_embedding_size],
                dtype=tf.float32,
                initializer=Zeros())
        self.in_dropout = Dropout(self.dropout_rate)
        self.feat_dropout = Dropout(self.dropout_rate, )
        self.att_dropout = Dropout(self.dropout_rate, )
        # Be sure to call this somewhere!
        super(GATLayer, self).build(input_shape)
Beispiel #13
0
    def build(self, input_shapes):

        if self.feature_less:
            input_dim = int(input_shapes[0][-1])
        else:
            assert len(input_shapes) == 2
            features_shape = input_shapes[0]

            input_dim = int(features_shape[-1])

        self.kernel = self.add_weight(
            shape=(input_dim, self.units),
            initializer=glorot_uniform(seed=self.seed),
            regularizer=l2(self.l2_reg),
            name='kernel',
        )
        if self.use_bias:
            self.bias = self.add_weight(
                shape=(self.units, ),
                initializer=Zeros(),
                name='bias',
            )

        self.dropout = Dropout(self.dropout_rate, seed=self.seed)

        self.built = True
Beispiel #14
0
    def build(self, input_shape):
        # if len(self.hidden_units) == 0:
        #     raise ValueError("hidden_units is empty")
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [self.add_weight(name='kernel' + str(i),
                                        shape=(hidden_units[i], hidden_units[i + 1]),
                                        initializer=glorot_normal(seed=self.seed),
                                        regularizer=l2(self.l2_reg),
                                        trainable=True) for i in range(len(self.hidden_units))]
        self.bias = [self.add_weight(name='bias' + str(i),
                                     shape=(self.hidden_units[i],),
                                     initializer=Zeros(),
                                     trainable=True) for i in range(len(self.hidden_units))]
        if self.use_bn:
            self.bn_layers = [tf.keras.layers.BatchNormalization() for _ in range(len(self.hidden_units))]

        self.dropout_layers = [tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i) for i in range(len(self.hidden_units))]

        self.activation_layers = [activation_layer(self.activation) for _ in range(len(self.hidden_units))]

        if self.output_activation:
            self.activation_layers[-1] = activation_layer(self.output_activation)

        super(DNN, self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #15
0
    def build(self, input_shape):

        if self.use_bias:
            self.global_bias = self.add_weight(
                shape=(self.num_class,), initializer=Zeros(), name="global_bias")

        # Be sure to call this somewhere!
        super(PredictionLayer, self).build(input_shape)
def get_linear_logit(features,
                     linear_feature_columns,
                     units=1,
                     use_bias=False,
                     seed=1024,
                     prefix='linear',
                     l2_reg=0):

    features = features
    linear_feature_columns = linear_feature_columns
    units = 1
    use_bias = False
    seed = 1024
    prefix = 'linear'
    l2_reg = 0

    for i in range(len(linear_feature_columns)):
        if linear_feature_columns[i]['feat_cat'] == 'sparse':
            linear_feature_columns[i]['embedding_dim'] = 3
            linear_feature_columns[i]['embeddings_initializer'] = Zeros()

    linear_emb_list = [
        input_from_feature_columns(features,
                                   linear_feature_columns,
                                   l2_reg,
                                   seed,
                                   prefix=prefix + str(i))[0]
        for i in range(units)
    ]
    _, dense_input_list = input_from_feature_columns(features,
                                                     linear_feature_columns,
                                                     l2_reg,
                                                     seed,
                                                     prefix=prefix)

    linear_logit_list = []
    for i in range(units):

        if len(linear_emb_list[i]) > 0 and len(dense_input_list) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=2, use_bias=use_bias,
                                  seed=seed)([sparse_input, dense_input])
        elif len(linear_emb_list[i]) > 0:
            sparse_input = concat_func(linear_emb_list[i])
            linear_logit = Linear(l2_reg, mode=0, use_bias=use_bias,
                                  seed=seed)(sparse_input)
        elif len(dense_input_list) > 0:
            dense_input = concat_func(dense_input_list)
            linear_logit = Linear(l2_reg, mode=1, use_bias=use_bias,
                                  seed=seed)(dense_input)
        else:
            # raise NotImplementedError
            return add_func([])
        linear_logit_list.append(linear_logit)

    return concat_func(linear_logit_list)
Beispiel #17
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super(LayerNormalization, self).build(input_shape)
Beispiel #18
0
    def build(self, input_shape):

        if len(input_shape) != 2:
            raise ValueError(
                "Unexpected inputs dimensions %d, expect to be 2 dimensions" %
                (len(input_shape), ))

        input_dimension = int(input_shape[-1])
        # Initialize expert weights (number of input features * number of units per expert * number of experts)
        self.expert_kernels = self.add_weight(
            name='expert_kernel',
            shape=(input_dimension, self.units, self.num_experts),
            initializer=glorot_normal(seed=self.seed),
            regularizer=l2(self.l2_reg),
            trainable=True)

        # Initialize expert bias (number of units per expert * number of experts)
        self.expert_bias = self.add_weight(name='expert_bias',
                                           shape=(self.units,
                                                  self.num_experts),
                                           initializer=Zeros(),
                                           trainable=True)

        # Initialize gate weights (number of input features * number of experts * number of tasks)
        self.gate_kernels = [
            self.add_weight(name='gate_kernel_task_{}'.format(i),
                            shape=(input_dimension, self.num_experts),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True) for i in range(self.num_tasks)
        ]

        # Initialize gate bias (number of experts * number of tasks)
        self.gate_bias = [
            self.add_weight(name='gate_bias_task_{}'.format(i),
                            shape=(self.num_experts, ),
                            initializer=Zeros(),
                            trainable=True) for i in range(self.num_tasks)
        ]
        # Be sure to call this somewhere!
        super(MMoE, self).build(input_shape)
 def build(self, input_shape):
     self.neighbor_weights = self.add_weight(
         shape=(self.input_dim, self.units),
         initializer=glorot_uniform(seed=self.seed),
         regularizer=l2(self.l2_reg),
         name="neighbor_weights")
     if self.use_bias:
         self.bias = self.add_weight(shape=(self.units),
                                     initializer=Zeros(),
                                     name="bias_weight")
     self.dropout = Dropout(self.dropout_rate)
     self.built = True
Beispiel #20
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_size)
        self.kernels = [self.add_weight(name='kernel' + str(i),
                                        shape=(hidden_units[i], hidden_units[i+1]),
                                        initializer=glorot_normal(seed=self.seed),
                                        regularizer=l2(self.l2_reg),
                                        trainable=True) for i in range(len(self.hidden_size))]
        self.bias = [self.add_weight(name='bias' + str(i),
                                     shape=(self.hidden_size[i],),
                                     initializer=Zeros(),
                                     trainable=True) for i in range(len(self.hidden_size))]

        super(MLP, self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #21
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) < 2:
            raise ValueError('A `AttentionalFM` layer should be called '
                             'on a list of at least 2 inputs')

        shape_set = set()
        reduced_input_shape = [shape.as_list() for shape in input_shape]
        for i in range(len(input_shape)):
            shape_set.add(tuple(reduced_input_shape[i]))

        if len(shape_set) > 1:
            raise ValueError('A `AttentionalFM` layer requires '
                             'inputs with same shapes '
                             'Got different shapes: %s' % (shape_set))

        if len(input_shape[0]) != 3 or input_shape[0][1] != 1:
            raise ValueError('A `AttentionalFM` layer requires '
                             'inputs of a list with same shape tensor like\
                             (None, 1, embedding_size)'
                             'Got different shapes: %s' % (input_shape[0]))

        embedding_size = int(input_shape[0][-1])

        self.attention_W = self.add_weight(
            shape=(embedding_size, self.attention_factor),
            initializer=glorot_normal(seed=self.seed),
            regularizer=l2(self.l2_reg_w),
            name="attention_W")
        self.attention_b = self.add_weight(shape=(self.attention_factor, ),
                                           initializer=Zeros(),
                                           name="attention_b")
        self.projection_h = self.add_weight(
            shape=(self.attention_factor, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_h")
        self.projection_p = self.add_weight(
            shape=(embedding_size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="projection_p")
        self.dropout = tf.keras.layers.Dropout(self.dropout_rate,
                                               seed=self.seed)

        self.tensordot = tf.keras.layers.Lambda(
            lambda x: tf.tensordot(x[0], x[1], axes=(-1, 0)))

        # Be sure to call this somewhere!
        super(AFMLayer, self).build(input_shape)
Beispiel #22
0
    def build(self, input_shape):

        if len(input_shape) != 2:
            raise ValueError("Unexpected inputs dimensions %d, expect to be 2 dimensions" % (len(input_shape),))

        dim = input_shape[-1]
        self.kernels = [self.add_weight(name='kernel'+str(i),
                                        shape=(dim, 1),
                                       initializer=glorot_normal(seed=self.seed),
                                        regularizer=l2(self.l2_reg),
                                        trainable=True) for i in range(self.layer_num)]
        self.bias = [self.add_weight(name='bias'+str(i) ,
                                     shape=(dim,1),
                                    initializer=Zeros(),
                                     trainable=True) for i in range(self.layer_num)]
        super(CrossNet, self).build(input_shape)  # Be sure to call this somewhere!
    def build(self, input_shape):
        self.dense_layers = [
            Dense(self.input_dim,
                  activation="relu",
                  use_bias=True,
                  kernel_regularizer=l2(self.l2_reg))
        ]
        self.neighbor_weights = self.add_weight(
            shape=(self.input_dim * 2, self.output_dim),
            initializer=glorot_uniform(seed=self.seed),
            regularizer=l2(self.l2_reg),
            name="neighbor_weights")
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.output_dim, ),
                                        initializer=Zeros(),
                                        name="bias_weight")

        self.built = True
Beispiel #24
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) != 2:
            raise ValueError('A `LocalActivationUnit` layer should be called '
                             'on a list of 2 inputs')

        if len(input_shape[0]) != 3 or len(input_shape[1]) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d and %d, expect to be 3 dimensions"
                % (len(input_shape[0]), len(input_shape[1])))

        if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1:
            raise ValueError(
                'A `LocalActivationUnit` layer requires '
                'inputs of a two inputs with shape (None,1,embedding_size) and (None,T,embedding_size)'
                'Got different shapes: %s,%s' %
                (input_shape[0], input_shape[1]))
        size = 4 * \
               int(input_shape[0][-1]
                   ) if len(self.hidden_units) == 0 else self.hidden_units[-1]
        self.kernel = self.add_weight(
            shape=(size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="kernel")
        self.bias = self.add_weight(shape=(1, ),
                                    initializer=Zeros(),
                                    name="bias")
        self.dnn = DNN(self.hidden_units,
                       self.activation,
                       self.l2_reg,
                       self.dropout_rate,
                       self.use_bn,
                       seed=self.seed)

        self.dense = tf.keras.layers.Lambda(lambda x: tf.nn.bias_add(
            tf.tensordot(x[0], x[1], axes=(-1, 0)), x[2]))
        # 理清楚tensordot和matmul之间的区别
        # 	1. matmul在a·b中,只能把a的最后一个维度和b的倒数第二个维度乘在一起
        # 	2. tensordot可以指定,其实就是把指定的维度交换到最后一个维度,然后尽心matmul
        # att_out和self.kernel  进行tensordot
        # axis作为-1和0,其实就和matmul没有任何区别

        super(LocalActivationUnit,
              self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #25
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) != 2:
            raise ValueError('A `LocalActivationUnit` layer should be called '
                             'on a list of 2 inputs')

        if len(input_shape[0]) != 3 or len(input_shape[1]) != 3:
            raise ValueError(
                "Unexpected inputs dimensions %d and %d, expect to be 3 dimensions"
                % (len(input_shape[0]), len(input_shape[1])))

        if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1:
            raise ValueError(
                'A `LocalActivationUnit` layer requires '
                'inputs of a two inputs with shape (None,1,embedding_size) and (None,T,embedding_size)'
                'Got different shapes: %s,%s' %
                (input_shape[0], input_shape[1]))
        # 为什么hidden_units为空时设置为embedding的四倍?因为相当于不用DNN,直接接在(原始+乘+差)后面一层
        size = 4 * \
               int(input_shape[0][-1]
                   ) if len(self.hidden_units) == 0 else self.hidden_units[-1]
        self.kernel = self.add_weight(
            shape=(size, 1),
            initializer=glorot_normal(seed=self.seed),
            name="kernel")  # add_weight()
        self.bias = self.add_weight(shape=(1, ),
                                    initializer=Zeros(),
                                    name="bias")
        self.dnn = DNN(self.hidden_units,
                       self.activation,
                       self.l2_reg,
                       self.dropout_rate,
                       self.use_bn,
                       seed=self.seed)
        # DNN之后的一层,直接输出权重
        self.dense = tf.keras.layers.Lambda(lambda x: tf.nn.bias_add(
            tf.tensordot(x[0], x[1], axes=(-1, 0)), x[2]))

        super(LocalActivationUnit,
              self).build(input_shape)  # Be sure to call this somewhere!
    def build(self, input_shape):
        if len(input_shape) != 3:
            raise ValueError("Unexpected inputs dimensions {}, expect to be 3 dimensions".format(len(input_shape)))

        self.field_nums = [input_shape[1].value]
        for i, size in enumerate(self.layer_size):
            self.filters.append(self.add_weight(name='filter' + str(i),
                                                shape=[1, self.field_nums[-1] * self.field_nums[0], size],
                                                dtype=tf.float32,
                                                regularizer=l2(self.l2_reg)))
            self.bias.append(self.add_weight(name='bias' + str(i), shape=[size], dtype=tf.float32,
                                             initializer=Zeros()))
            if self.split_half:
                if i != len(self.layer_size) - 1 and size % 2 > 0:
                    raise ValueError(
                        "layer_size must be even number except for the last layer when split_half=True")

                self.field_nums.append(size // 2)
            else:
                self.field_nums.append(size)
        self.activation_layers = [Activation(self.activation) for _ in self.layer_size]
        super().build(input_shape)  # Be sure to call this somewhere!
Beispiel #27
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel_{}'.format(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_uniform(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias_{}'.format(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                BatchNormalization() for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        activations = {
            "leaky_relu": tf.keras.layers.LeakyReLU(),
            "swish": tf.nn.swish
        }
        self.activation_layer = activations[
            self.activation] if self.activation in activations else Activation(
                self.activation)

        super().build(input_shape)  # Be sure to call this somewhere!
Beispiel #28
0
    def build(self, input_shape):
        # if len(self.hidden_units) == 0:
        #     raise ValueError("hidden_units is empty")
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                tf.keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        # AlphaDropout: 1. 均值和方差不变 2. 归一化性质也不变
        self.dropout_layers = [
            tf.keras.layers.AlphaDropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.activation_layers = [
            tf.keras.layers.Activation(self.activation)
            for _ in range(len(self.hidden_units))
        ]
Beispiel #29
0
    def build(self, input_shape):
        # if len(self.hidden_units) == 0:
        #     raise ValueError("hidden_units is empty")
        input_size = input_shape[-1]
        print(input_size, self.udg_embedding_size)
        hidden_units = [int(input_size) - self.udg_embedding_size] + list(
            self.hidden_units)
        udg_units = []
        for i, x in enumerate(hidden_units):
            tmp = []
            for j in range(self.udg_embedding_layer):
                if j == 0:
                    tmp.append(self.udg_embedding_size)
                else:
                    tmp.append(x)
            udg_units.append(tmp)
        print(udg_units)
        #udg_units = [[128,384,384],[128,200,200],[128,80,80]]
        self.udg_kernels = [
            self.add_weight(name='udg_kernel' + str(i) + '_' + str(j),
                            shape=(udg_units[i][j], hidden_units[i]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True) for i in range(len(udg_units))
            for j in range(3)
        ]
        self.udg_bias = [
            self.add_weight(name='udg_bias' + str(i) + '_' + str(j),
                            shape=(hidden_units[i], ),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True) for i in range(len(udg_units))
            for j in range(3)
        ]
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                tf.keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.udg_activation_layers = []
        for i, x in enumerate(hidden_units):
            for j in range(self.udg_embedding_layer):
                if j != self.udg_embedding_layer - 1:
                    self.udg_activation_layers.append(activation_layer('relu'))
                else:
                    self.udg_activation_layers.append(
                        activation_layer('sigmoid'))

        self.activation_layers = [
            activation_layer(self.activation)
            for _ in range(len(self.hidden_units))
        ]

        super(DNN_UDG,
              self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #30
0
    def build(self, input_shape):
        assert len(
            input_shape
        ) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
        self.input_num_capsule = int(input_shape[1])
        self.input_dim_capsule = int(input_shape[2])

        # transmission matrix
        self.reweight_W = self.add_weight(
            shape=[self.input_dim_capsule, self.num_capsule, self.dim_capsule],
            initializer=glorot_uniform(seed=self.seed),
            name='reweight_W')

        self.num_fields = self.num_capsule
        self.kernel_mf = self.add_weight(
            name='kernel_mf',
            shape=(int(self.num_fields * (self.num_fields - 1) / 2), 1),
            initializer=tf.keras.initializers.Ones(),
            regularizer=None,
            trainable=True)

        self.kernel_fm = self.add_weight(
            name='kernel_fm',
            shape=(self.num_fields, 1),
            initializer=tf.keras.initializers.Constant(value=0.5),
            regularizer=None,
            trainable=True)

        # self-attention
        self.kernel_highint = self.add_weight(
            name='kernel_highint',
            shape=(self.num_fields, 1),
            initializer=tf.keras.initializers.Constant(value=0.5),
            regularizer=None,
            trainable=True)

        self.self_attention_factor = self.dim_capsule
        self.self_attention_layer = 1
        self.head_num = 2
        # embedding_size=self.self_attention_factor * self.head_num
        embedding_size = self.dim_capsule
        self.bias_mf = self.add_weight(name='bias_mf',
                                       shape=(embedding_size),
                                       initializer=Zeros())
        self.bias_fm = self.add_weight(name='bias_fm',
                                       shape=(embedding_size),
                                       initializer=Zeros())

        self.routing_init = self.add_weight(
            name="routing_init",
            shape=(self.num_capsule, self.input_num_capsule),
            initializer=tf.keras.initializers.TruncatedNormal(seed=self.seed,
                                                              stddev=10))

        self.bias_highint = self.add_weight(name='bias_highint',
                                            shape=(self.self_attention_factor *
                                                   self.head_num),
                                            initializer=Zeros())
        self.built = True
        # Be sure to call this somewhere!
        super(CapsuleLayer, self).build(input_shape)