Example #1
0
    def __init__(self, bias=-3, **kwargs):
        super(HighwayNetStep, self).__init__(**kwargs)
        self.bias = initializers.Constant(value=bias)

        self.multiply1 = Multiply()
        self.multiply2 = Multiply()
        self.add = Add()
Example #2
0
def highway(value, activation="tanh", transform_gate_bias=-1.0):
    dim = K.int_shape(value)[-1]
    transform_gate_bias_initializer = initializers.Constant(
        transform_gate_bias)
    transform_gate = Dense(
        units=dim, bias_initializer=transform_gate_bias_initializer)(value)
    transform_gate = Activation("sigmoid")(transform_gate)
    carry_gate = Lambda(lambda x: 1.0 - x,
                        output_shape=(dim, ))(transform_gate)
    transformed_data = Dense(units=dim)(value)
    transformed_data = Activation(activation)(transformed_data)
    transformed_gated = Multiply()([transform_gate, transformed_data])
    identity_gated = Multiply()([carry_gate, value])
    value = Add()([transformed_gated, identity_gated])
    return value
Example #3
0
    def build_model(self):
        in_id = Input(shape=(None, ), name='input_word_ids', dtype=tf.int32)
        in_mask = Input(shape=(None, ), name='input_mask', dtype=tf.int32)
        in_segment = Input(shape=(None, ),
                           name='input_type_ids',
                           dtype=tf.int32)
        in_valid_positions = Input(shape=(None, self.slots_num),
                                   name='valid_positions')
        bert_inputs = [in_id, in_mask, in_segment]
        inputs = bert_inputs + [in_valid_positions]

        if self.is_bert:
            name = 'BertLayer'
        else:
            name = 'AlbertLayer'
        bert_pooled_output, bert_sequence_output = hub.KerasLayer(
            self.bert_hub_path, trainable=True, name=name)(bert_inputs)

        intents_fc = Dense(self.intents_num,
                           activation='softmax',
                           name='intent_classifier')(bert_pooled_output)

        slots_output = TimeDistributed(
            Dense(self.slots_num, activation='softmax'))(bert_sequence_output)
        slots_output = Multiply(name='slots_tagger')(
            [slots_output, in_valid_positions])

        self.model = Model(inputs=inputs, outputs=[slots_output, intents_fc])
Example #4
0
def space_attention_block(input, filters, kernel_size):
    output_trunk = input

    x = Conv3D(filters=filters, kernel_size=kernel_size, padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(input)
    x = BatchNormalization(axis=-1)(x)
    x = Activation('relu')(x)

    x_1 = Conv3D(filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x)
    x_1 = Activation('relu')(x_1)

    x_2 = Conv3D(filters * 2, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_1)
    x_2 = Activation('relu')(x_2)

    x_3 = Conv3DTranspose(filters=filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_2)
    x_3 = Activation('relu')(x_3)

    x_4 = Conv3DTranspose(filters=filters, kernel_size=kernel_size, strides=(2, 2, 1), padding='same')(x_3)
    x_4 = Activation('sigmoid')(x_4)
    # x_4 = Activation('relu')(x_4)

    output = Multiply()([x_4, x])

    # output = add([output_trunk, x_4])

    # output = Lambda(lambda x: x + 1)(x_4)
    # output = Multiply()([output, output_trunk])

    x_add = add([output, output_trunk])

    return x_add
Example #5
0
    def build_model(self):
        in_id = Input(shape=(None, ), name='input_ids')
        in_mask = Input(shape=(None, ), name='input_masks')
        in_segment = Input(shape=(None, ), name='segment_ids')
        in_valid_positions = Input(shape=(None, self.slots_num),
                                   name='valid_positions')
        bert_inputs = [in_id, in_mask, in_segment, in_valid_positions]

        if self.is_bert:
            bert_pooled_output, bert_sequence_output = BertLayer(
                n_fine_tune_layers=self.num_bert_fine_tune_layers,
                bert_path=self.bert_hub_path,
                pooling='mean',
                name='BertLayer')(bert_inputs)
        else:
            bert_pooled_output, bert_sequence_output = AlbertLayer(
                fine_tune=True
                if self.num_bert_fine_tune_layers > 0 else False,
                albert_path=self.bert_hub_path,
                pooling='mean',
                name='AlbertLayer')(bert_inputs)

        intents_fc = Dense(self.intents_num,
                           activation='softmax',
                           name='intent_classifier')(bert_pooled_output)

        slots_output = TimeDistributed(
            Dense(self.slots_num, activation='softmax'))(bert_sequence_output)
        slots_output = Multiply(name='slots_tagger')(
            [slots_output, in_valid_positions])

        self.model = Model(inputs=bert_inputs,
                           outputs=[slots_output, intents_fc])
Example #6
0
    def build_model(self):

        in_id = Input(shape=(None, ), name='input_ids')
        in_mask = Input(shape=(None, ), name='input_masks')
        in_segment = Input(shape=(None, ), name='segment_ids')
        in_valid_positions = Input(shape=(None, self.slots_num),
                                   name='valid_positions')
        bert_inputs = [in_id, in_mask, in_segment, in_valid_positions]

        # the output of trained Bert
        bert_pooled_output, bert_sequence_output = BertLayer(
            n_fine_tune_layer=self.num_bert_fine_tune_layers,
            name='BertLayer')(bert_inputs)

        # add the additional layer for intent classification and slot filling
        intents_drop = Dropout(rate=0.1)(bert_pooled_output)
        intents_fc = Dense(self.intents_num,
                           activation='softmax',
                           name='intent_classifier')(intents_drop)

        slots_drop = Dropout(rate=0.1)(bert_sequence_output)
        slots_output = TimeDistributed(
            Dense(self.slots_num, activation='softmax'))(slots_drop)
        slots_output = Multiply(name='slots_tagger')(
            [slots_output, in_valid_positions])

        self.model = Model(inputs=bert_inputs,
                           outputs=[slots_output, intents_fc])
Example #7
0
 def __attention_3d_block(self, _lstm_output, _time_steps) -> Layer:
     """https://github.com/philipperemy/keras-attention-mechanism/blob/master/attention_lstm.py
     """
     att = Permute((2, 1))(_lstm_output)
     att = Reshape((_lstm_output.shape[2].value, _time_steps))(att)
     att = Dense(_time_steps, activation='softmax')(att)
     att_probs = Permute((2, 1), name='attention_vec')(att)
     return Multiply(name='attention_mul')([_lstm_output, att_probs])
 def se_net(in_block, depth):
     x = GlobalAveragePooling2D()(in_block)
     x = Dense(depth // 16,
               activation='relu',
               kernel_initializer=default_init,
               bias_initializer='zeros')(x)
     x = Dense(depth, activation='sigmoid',
               kernel_regularizer=l2_reg)(x)
     return Multiply()([in_block, x])
Example #9
0
 def f(inputs):
     squeeze = GlobalAveragePooling2D()(inputs)
     out_dim = squeeze.get_shape().as_list()[-1]
     excitation = Dense(units=out_dim / ratio)(squeeze)
     excitation = Activation("relu")(excitation)
     excitation = Dense(units=out_dim)(excitation)
     excitation = Activation("sigmoid")(excitation)
     excitation = Reshape([1, 1, out_dim])(excitation)
     scale = Multiply()([inputs, excitation])
     return scale
Example #10
0
def relation_attention_module(glob_fts, fts):
    attention_weights = []
    for i in range(len(fts)):
        fts[i] = Concatenate()([fts[i], glob_fts])
        weight = attention_extractor(fts[i])
        fts[i] = Multiply()([fts[i], weight])
        attention_weights.append(weight)
    total_weights = Add()(attention_weights)
    numerator = Add()(
        fts)  # numerator of fraction in definition of P_ran (see paper)
    final_representation = Lambda(lambda_divide)([numerator, total_weights])
    return final_representation
Example #11
0
    def get_categories_new_average_feature_extracter(self):
        """
        categories_feature的特征提取器    无序  极短  暂定为各个类别词embedding的平均;有问题,再改!!!不使用add,使用沿一个轴的相加,除以每个项目的tag数目
        :return:
        """
        # 之前的仅适用于padding的0的embedding也为0的情况
        if self.categories_feature_extracter is None:
            categories_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
            categories_size_input_reciprocal = Input(shape=(1,), dtype='float32')
            categories_size_input = Lambda(lambda x: 1 / x)(categories_size_input_reciprocal)  # tag的长度 不可以!!

            # size=len(np.nonzero(categories_input)) # 词汇数 对tensor不能使用nonzero!
            embedding_layer = self.get_embedding_layer()
            embedded_results = embedding_layer(categories_input)  # 转化为2D (samples, sequence_length, output_dim)

            # 切片且求和
            def slide_sum(paras):
                _embedded_results = paras[0]
                _categories_size_input = paras[1]

                def fn(elements):
                    _embedded_results_ = elements[0]
                    _categories_size_input_ = K.cast(K.squeeze(elements[1], axis=0), tf.int32)
                    # 具体原因未知:bug: From merging shape 0 with other shapes. for 'model_2_2/map/while/strided_slice/stack_1' (op: 'Pack') with input shapes: [1], [].
                    if len(_categories_size_input_.shape) == 1:
                        _categories_size_input_ = K.cast(K.squeeze(_categories_size_input_, axis=0), tf.int32)
                    # print('_embedded_results_1',_embedded_results_)
                    # print('_categories_size_input_', _categories_size_input_)

                    # def slice2D(x, index):
                    #     return x[150-index:, :]
                    # embedded_results_ = Lambda(slice2D, arguments={'index':_categories_size_input_})(_embedded_results_) # 切片 2D

                    _embedded_results_ = _embedded_results_[MAX_SEQUENCE_LENGTH - _categories_size_input_:, :]  # 切片  2D
                    # print('_embedded_results_2',_embedded_results_)
                    _embedded_results_ = Lambda(lambda x: K.sum(x, axis=0))(_embedded_results_)
                    # print('_embedded_results_3', _embedded_results_)

                    return _embedded_results_

                return K.map_fn(fn, (_embedded_results, _categories_size_input), dtype=(tf.float32))

            embedded_results = Lambda(slide_sum)([embedded_results, categories_size_input])
            # print('after reduce_sum:', embedded_results)
            embedded_results = Multiply()([embedded_results, categories_size_input_reciprocal])
            # print('after devide,embedded_results:', embedded_results)
            self.categories_feature_extracter = Model(
                inputs=[categories_input, categories_size_input_reciprocal],
                outputs=[embedded_results], name='categories_feature_extracter')

            # print('build new_average_feature_extracter,done!')
        return self.categories_feature_extracter
Example #12
0
def self_attention_module(crops, CNN):
    attention_weights = []
    for i in range(len(crops)):
        crops[i] = CNN(crops[i])
        weight = attention_extractor(crops[i])
        crops[i] = Multiply()([crops[i], weight])
        attention_weights.append(weight)
    total_weights = Add()(attention_weights)
    numerator = Add()(
        crops)  # numerator of fraction in definition of F_m (see paper)
    global_feature_representation = Lambda(lambda_divide)(
        [numerator, total_weights])
    return global_feature_representation
Example #13
0
    def squeeze_excitation_layer(self, input_x, out_dim, ratio, layer_name):
        with tf.name_scope(layer_name):

            squeeze = GlobalAveragePooling3D()(input_x)

            excitation = Dense(units=out_dim / ratio)(squeeze)
            excitation = Activation("relu")(excitation)
            excitation = Dense(units=out_dim)(excitation)
            excitation = Activation("sigmoid")(excitation)
            excitation = Reshape([1, 1, 1, out_dim])(excitation)
            scale = Multiply()([input_x, excitation])

            return scale
Example #14
0
    def get_model(self):
        num_layer = len(self.layers)  # Number of layers in the MLP
        # Input variables
        user_input = Input(shape=(1,), dtype='int32', name='user_input')
        item_input = Input(shape=(1,), dtype='int32', name='item_input')

        # Embedding layer
        MF_Embedding_User = Embedding(input_dim=self.num_users, output_dim=self.mf_embedding_dim, name='mf_embedding_user',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(self.reg_mf), input_length=1) #

        MF_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=self.mf_embedding_dim, name='mf_embedding_item',
                                      embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                      embeddings_regularizer=l2(self.reg_mf), input_length=1) #

        MLP_Embedding_User = Embedding(input_dim=self.num_users, output_dim=int(self.mf_fc_unit_nums[0] / 2), name="mlp_embedding_user",
                                       embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                       embeddings_regularizer=l2(self.reg_layers[0]), input_length=1) #

        MLP_Embedding_Item = Embedding(input_dim=self.num_items, output_dim=int(self.mf_fc_unit_nums[0] / 2), name='mlp_embedding_item',
                                       embeddings_initializer=initializers.VarianceScaling(scale=0.01,distribution='normal'),
                                       embeddings_regularizer=l2(self.reg_layers[0]), input_length=1) #

        # MF part
        mf_user_latent = tf.keras.layers.Flatten()(MF_Embedding_User(user_input))
        mf_item_latent = tf.keras.layers.Flatten()(MF_Embedding_Item(item_input))
        #   mf_vector = merge([mf_user_latent, mf_item_latent], mode='mul')  # element-wise multiply
        mf_vector=Multiply()([mf_user_latent, mf_item_latent])

        # MLP part
        mlp_user_latent = tf.keras.layers.Flatten()(MLP_Embedding_User(user_input))
        mlp_item_latent = tf.keras.layers.Flatten()(MLP_Embedding_Item(item_input))
        #   mlp_vector = merge([mlp_user_latent, mlp_item_latent], mode='concat')
        mlp_vector = Concatenate()([mlp_user_latent, mlp_item_latent])

        for idx in range(1, num_layer):
            layer = Dense(self.mf_fc_unit_nums[idx],  activation='relu', name="layer%d" % idx) # kernel_regularizer=l2(reg_layers[idx]),
            mlp_vector = layer(mlp_vector)

        # Concatenate MF and MLP parts
        # mf_vector = Lambda(lambda x: x * alpha)(mf_vector)
        # mlp_vector = Lambda(lambda x : x * (1-alpha))(mlp_vector)
        #   predict_vector = merge([mf_vector, mlp_vector], mode='concat')
        predict_vector = Concatenate()([mf_vector, mlp_vector])

        # Final prediction layer
        prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name="prediction")(predict_vector)

        model = Model(input=[user_input, item_input],output=prediction)
        return model
Example #15
0
def ESMM(dnn_feature_columns, tower_dnn_hidden_units=(256, 128, 64), l2_reg_embedding=0.00001, l2_reg_dnn=0,
         seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_types=('binary', 'binary'),
         task_names=('ctr', 'ctcvr')):
    """Instantiates the Entire Space Multi-Task Model architecture.

    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param tower_dnn_hidden_units:  list,list of positive integer or empty list, the layer number and units in each layer of task DNN.
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector.
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN.
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task_types:  str, indicating the loss of each tasks, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss.
    :param task_names: list of str, indicating the predict target of each tasks. default value is ['ctr', 'ctcvr']

    :return: A Keras model instance.
    """
    if len(task_names) != 2:
        raise ValueError("the length of task_names must be equal to 2")

    for task_type in task_types:
        if task_type != 'binary':
            raise ValueError("task must be binary in ESMM, {} is illegal".format(task_type))

    features = build_input_features(dnn_feature_columns)
    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
                                                                         l2_reg_embedding, seed)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    ctr_output = DNN(tower_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(
        dnn_input)
    cvr_output = DNN(tower_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(
        dnn_input)

    ctr_logit = Dense(1, use_bias=False)(ctr_output)
    cvr_logit = Dense(1, use_bias=False)(cvr_output)

    ctr_pred = PredictionLayer('binary', name=task_names[0])(ctr_logit)
    cvr_pred = PredictionLayer('binary')(cvr_logit)

    ctcvr_pred = Multiply(name=task_names[1])([ctr_pred, cvr_pred])  # CTCVR = CTR * CVR

    model = Model(inputs=inputs_list, outputs=[ctr_pred, ctcvr_pred])
    return model
Example #16
0
    def __init__(self, num_filters):
        super(SubAttentionBlock, self).__init__()

        # self.gating = gating_signal(256)
        self.att_conv1 = Conv2D(num_filters, (2, 2), strides=(2, 2))
        self.att_conv2 = Conv2D(num_filters, (1, 1), use_bias=True)

        self.att_add = Add()

        self.att_relu = Activation('relu')
        self.att_conv3 = Conv2D(1, (1, 1), use_bias=True)
        self.att_sigm = Activation('sigmoid')
        self.att_multiply = Multiply()

        self.att_conv4 = Conv2D(int(num_filters / 2), (1, 1))
        self.att_batch_norm = BatchNormalization()
    def create(self):
        """ Creates MoE model.

        Returns
        -------
        model: Model
            A Mixture of Experts model
        """

        inputs = Input(shape=self.input_shape)
        if self.units is not None:
            gate_activations = Dense(
                self.units, kernel_regularizer=self.kernel_regularizer)(inputs)
            gate_activations = Dense(
                self.num_classes * (self.num_experts + 1),
                kernel_regularizer=self.kernel_regularizer)(gate_activations)
        else:
            gate_activations = Dense(
                self.num_classes * (self.num_experts + 1),
                kernel_regularizer=self.kernel_regularizer)(inputs)

        expert_activations = Dense(
            self.num_classes * self.num_experts,
            kernel_regularizer=self.kernel_regularizer)(inputs)

        # (Batch * #Labels) x (num_experts + 1)
        gate_reshaped = Reshape(
            (self.num_classes, self.num_experts + 1))(gate_activations)
        gating_distribution = Activation('softmax')(gate_reshaped)

        # (Batch * #Labels) x num_experts
        expert_reshaped = Reshape(
            (self.num_classes, self.num_experts))(expert_activations)
        expert_distribution = Activation('sigmoid')(expert_reshaped)

        slice_gating = Lambda(lambda x: x[:, :, :self.num_experts])(
            gating_distribution)
        probs = Multiply()([slice_gating, expert_distribution])

        outputs = Lambda(lambda x: sum(x, axis=2))(probs)
        model = Model(inputs, outputs)

        if self.summary:
            model.summary()

        return model
Example #18
0
 def f(input_):
     residual = input_
     h = Convolution1D(n_atrous_filters,
                       atrous_filter_size,
                       padding='same',
                       dilation_rate=atrous_rate)(input_)
     tanh_out = Activation('tanh')(h)
     s = Convolution1D(n_atrous_filters,
                       atrous_filter_size,
                       padding='same',
                       dilation_rate=atrous_rate)(input_)
     sigmoid_out = Activation('sigmoid')(s)
     merged = Multiply()([tanh_out, sigmoid_out])
     skip_out = Convolution1D(1, 1, activation='relu',
                              padding='same')(merged)
     out = Add()([skip_out, residual])
     return out, skip_out
Example #19
0
def MES(input_list):  #Motion estimation
    delta_c, I_c = Coarse_flow(input_list, upscale_factor=4)

    input_list.append(delta_c)
    input_list.append(I_c)

    delta_f = Fine_flow(input_list, upscale_factor=2)

    delta = Add()([delta_c, delta_f])
    delta_x = Multiply()([input_list[1], delta])
    delta_x = Add()([
        tf.expand_dims(delta_x[:, :, :, 0], -1),
        tf.expand_dims(delta_x[:, :, :, 1], -1)
    ])

    I_MES = Add()([input_list[1], delta_x])

    return I_MES
def squeeze_and_excitation_layer(lst_layer, r=16, activation='relu'):
    """
	Squeeze and excitation layer.
	:param lst_layer: keras layer. Layer to append the SE block to.
	:param r: int. Ratio to squeeze the number of channels as defined in the original paper (default: 16)
	:param activation: str. Name of non-linear activation to use. ReLU is the default one.
	:return: keras layer. Next layer (output)
	"""
    num_channels = int(lst_layer.get_shape()[-1])
    gap = GlobalAveragePooling2D()(lst_layer)
    reduct = Dense(num_channels // r,
                   activation=activation,
                   kernel_initializer='orthogonal')(gap)
    expand = Dense(num_channels,
                   activation='sigmoid',
                   kernel_initializer='orthogonal')(reduct)
    return Multiply()(
        [lst_layer,
         expand])  # Broadcast adds dimensions at the beginning of expand
Example #21
0
def Coarse_flow(input_list, upscale_factor):
    input_shape = Concatenate()(input_list)

    conv2d_0 = Conv2D(filters=24,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      padding="same",
                      activation="relu")(input_shape)
    conv2d_1 = Conv2D(filters=24,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding="same",
                      activation="relu")(conv2d_0)
    conv2d_2 = Conv2D(filters=24,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      padding="same",
                      activation="relu")(conv2d_1)
    conv2d_3 = Conv2D(filters=24,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding="same",
                      activation="relu")(conv2d_2)
    conv2d_4 = Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding="same",
                      activation="tanh")(conv2d_3)

    pixel_shuffle = Lambda(lambda z: tf.nn.depth_to_space(z, upscale_factor))(
        conv2d_4)

    delta_x = Multiply()([input_list[1], pixel_shuffle])
    delta_x = Add()([
        tf.expand_dims(delta_x[:, :, :, 0], -1),
        tf.expand_dims(delta_x[:, :, :, 1], -1)
    ])

    I_coarse = Add()([input_list[1], delta_x])

    return pixel_shuffle, I_coarse
    def build_model(self):
        in_id = Input(shape=(None, ), name='input_word_ids', dtype=tf.int32)
        in_mask = Input(shape=(None, ), name='input_mask', dtype=tf.int32)
        in_valid_positions = Input(shape=(None, self.slots_num),
                                   name='valid_positions')
        bert_inputs = [in_id, in_mask]
        inputs = bert_inputs + [in_valid_positions]

        bert_sequence_output = self.trans_model(bert_inputs)[0]
        #        bert_pooled_output = Lambda(function=lambda x: tf.keras.backend.mean(x, axis=1))(bert_sequence_output)
        bert_pooled_output = GlobalAveragePooling1D()(bert_sequence_output)

        intents_fc = Dense(self.intents_num,
                           activation='softmax',
                           name='intent_classifier')(bert_pooled_output)

        slots_output = TimeDistributed(
            Dense(self.slots_num, activation='softmax'))(bert_sequence_output)
        slots_output = Multiply(name='slots_tagger')(
            [slots_output, in_valid_positions])

        self.model = Model(inputs=inputs, outputs=[slots_output, intents_fc])
Example #23
0
    def build_model(self):
        in_id = Input(shape=(None, ), name='input_word_ids', dtype=tf.int32)
        in_mask = Input(shape=(None, ), name='input_mask', dtype=tf.int32)
        in_segment = Input(shape=(None, ),
                           name='input_type_ids',
                           dtype=tf.int32)
        in_valid_positions = Input(shape=(None, self.slots_num),
                                   name='valid_positions')
        bert_inputs = [in_id, in_mask, in_segment]
        inputs = bert_inputs + [in_valid_positions]

        bert_sequence_output, bert_pooled_output = self.trans_model(
            bert_inputs)

        intents_fc = Dense(self.intents_num,
                           activation='softmax',
                           name='intent_classifier')(bert_pooled_output)

        slots_output = TimeDistributed(
            Dense(self.slots_num, activation='softmax'))(bert_sequence_output)
        slots_output = Multiply(name='slots_tagger')(
            [slots_output, in_valid_positions])

        self.model = Model(inputs=inputs, outputs=[slots_output, intents_fc])
Example #24
0
def iterative_net_depth(batch_size):

    # Image pair input branch
    # 256x192x(3*2)
    image_pair = Input(batch_shape=(batch_size, 192, 256, 6), name='image_pair')


    # Input(batch_shape=(batch_size, h, w, c))
    layer_1a = Conv2D(32, kernel_size=(9, 1), strides=(
        2, 1), activation='relu', padding='same', input_shape=(192, 256, 6))(image_pair)
    layer_1b = Conv2D(32, kernel_size=(1, 9), strides=(
        1, 2), activation='relu', padding='same')(layer_1a)

    # 128x96x32
    layer_2a = Conv2D(32, kernel_size=(7, 1), strides=(
        2, 1), activation='relu', padding='same')(layer_1b)
    layer_2b = Conv2D(32, kernel_size=(1, 7), strides=(
        1, 2), activation='relu', padding='same')(layer_2a)

    # Optical flow and conf input branch
    optical_flow_input = Input(batch_shape=(batch_size, 48, 64, 2), name='optical_flow_input')
    optical_flow_conf_input = Input(batch_shape=(batch_size, 48, 64, 2), name='confidense_input')
    prev_motion = Input(batch_shape=(batch_size, 6), name='prev_motion_input')

    depth_from_flow_and_motion = Lambda(helpers.depth_from_flow_and_motion)([optical_flow_input, prev_motion])

    second_image = Lambda(lambda x: slice(
        x, (0, 0, 0, 3), (-1, -1, -1, 3)))(image_pair)

    wraped_2nd_image = Lambda(helpers.warped_image_from_flow)(
        [second_image, optical_flow_input])

    # Concatinate wraped 2nd image, optical flow, optical flow conf
    # and depth from flow and motion

    flow_conf_wraped2nd_depth_merge = concatenate(
        [depth_from_flow_and_motion, optical_flow_input, optical_flow_conf_input, wraped_2nd_image])

    layer_wraped_input_a = Conv2D(32, kernel_size=(3, 1), strides=(
        1, 1), activation='relu', padding='same')(flow_conf_wraped2nd_depth_merge)
    # layer3b should be feeded forward later
    layer_wraped_input_b = Conv2D(32, kernel_size=(1, 3), strides=(
        1, 1), activation='relu', padding='same')(layer_wraped_input_a)
    # Concatinate input branches

    concat_input = concatenate([layer_wraped_input_b, layer_2b])

    layer3a = Conv2D(64, kernel_size=(3, 1), strides=(
        1, 1), activation='relu', padding='same')(concat_input)
    # layer3b should be feeded forward later
    layer3b = Conv2D(64, kernel_size=(1, 3), strides=(
        1, 1), activation='relu', padding='same')(layer3a)

    layer4a = Conv2D(128, kernel_size=(5, 1), strides=(
        2, 1), activation='relu', padding='same')(layer3b)
    layer4b = Conv2D(128, kernel_size=(1, 5), strides=(
        1, 2), activation='relu', padding='same')(layer4a)

    layer5a = Conv2D(128, kernel_size=(3, 1), strides=(
        1, 1), activation='relu', padding='same')(layer4b)
    # Layer5b shall be feeded forward
    layer5b = Conv2D(128, kernel_size=(1, 3), strides=(
        1, 1), activation='relu', padding='same')(layer5a)

    layer6a = Conv2D(256, kernel_size=(5, 1), strides=(
        2, 1), activation='relu', padding='same')(layer5b)
    layer6b = Conv2D(256, kernel_size=(1, 5), strides=(
        1, 2), activation='relu', padding='same')(layer6a)

    # 16x12x256

    layer7a = Conv2D(256, kernel_size=(3, 1), strides=(
        1, 1), activation='relu', padding='same')(layer6b)
    # Layer7b shall be feeded forward
    layer7b = Conv2D(256, kernel_size=(1, 3), strides=(
        1, 1), activation='relu', padding='same')(layer7a)

    # 16x12x256

    layer8a = Conv2D(512, kernel_size=(3, 1), strides=(
        2, 1), activation='relu', padding='same')(layer7b)
    layer8b = Conv2D(512, kernel_size=(1, 3), strides=(
        1, 2), activation='relu', padding='same')(layer8a)

    layer9a = Conv2D(512, kernel_size=(3, 1), strides=(
        1, 1), activation='relu', padding='same')(layer8b)
    # Layer9b shall be feeded forward
    layer9b = Conv2D(512, kernel_size=(1, 3), strides=(
        1, 1), activation='relu', padding='same')(layer9a)

    # End of encoder part
    # ------------------------------

    # Fully connected motion branch begins here
    #
    layer_motion1 = Conv2D(24, kernel_size=(3, 3), strides=(
        1, 1), activation='relu', padding='same')(layer9b)

    flattend_motion_layer = Flatten()(layer_motion1)

    layer_motion2 = Dense(1024)(flattend_motion_layer)

    layer_motion3 = Dense(128)(layer_motion2)

    layer_motion4 = Dense(7)(layer_motion3)

    motion_output = Lambda(lambda x: slice(x, (0, 0), (-1, 6)), name='motion_output')(layer_motion4)

    scale = Lambda(lambda x: slice(x, (0, 6), (-1, -1)))(layer_motion4)

    # End of motion branch
    # ------------------------------

    upconv1 = Conv2DTranspose(256, kernel_size=(4, 4), strides=(
        2, 2),  padding='same', activation='relu')(layer9b)

    upconv1_merge = concatenate([upconv1, layer7b])  # changed

    upconv2 = Conv2DTranspose(128, kernel_size=(4, 4), strides=(
        2, 2),  padding='same', activation='relu')(upconv1)

    upconv2_merge = concatenate([upconv2, layer5b])

    upconv3 = Conv2DTranspose(64, kernel_size=(4, 4), strides=(
        2, 2),  padding='same', activation='relu')(upconv2_merge)

    upconv3_merge = concatenate([upconv3, layer3b])

    layer10 = Conv2D(24, kernel_size=(3, 3), strides=(
        1, 1), activation='relu', padding='same')(upconv3_merge)

    layer11 = Conv2D(4, kernel_size=(3, 3), strides=(
        1, 1), activation=None, padding='same')(layer10)

    # Output :D

    depth = Lambda(lambda x: slice(
        x, (0, 0, 0, 0), (-1, -1, -1, 1)))(layer11)

    scale = Lambda(lambda x: expand_dims(x, 1))(scale)
    scale = Lambda(lambda x: expand_dims(x, 1))(scale)

    #scale = expand_dims(scale, 1)

    depth_output = Multiply(name='depth_output')([depth, scale])

    normals_output = Lambda(lambda x: slice(
        x, (0, 0, 0, 1), (-1, -1, -1, -1)), name='normals_output')(layer11)

    iteraive_motion_depth_normal = Model(inputs=[image_pair, optical_flow_input, optical_flow_conf_input, prev_motion],
                                          outputs=[motion_output, depth_output, normals_output])

    return iterative_motion_depth_normal
Example #25
0
def test_delete_channels_merge_others(channel_index, data_format):
    layer_test_helper_merge_2d(Add(), channel_index, data_format)
    layer_test_helper_merge_2d(Multiply(), channel_index, data_format)
    layer_test_helper_merge_2d(Average(), channel_index, data_format)
    layer_test_helper_merge_2d(Maximum(), channel_index, data_format)
def network():
    inputs = layers.Input(shape=(cfg().norm_h, cfg().norm_w, 3))
    inputs_depth = layers.Input(shape=(cfg().norm_h, cfg().norm_w, 3))

    print("")
    print("")
    print("")
    print("")
    print("")
    print("")
    print("inputs = ", inputs.shape)
    print("inputs_depth = ", inputs_depth.shape)
    print("")
    print("")
    print("")
    print("")
    print("")

    ######################################### 1 ######################################
    x = _conv_block(inputs, 32, (3, 3), strides=(2, 2))
    x = _inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1)
    x = _inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2)
    x = _inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3)
    print("x = ", x)

    ##################################################################################
    ######################################### 2 ######################################
    y = _conv_block(inputs_depth, 32, (3, 3), strides=(2, 2))
    y = _inverted_residual_block(y, 16, (3, 3), t=1, strides=1, n=1)
    y = _inverted_residual_block(y, 24, (3, 3), t=6, strides=2, n=2)
    y = _inverted_residual_block(y, 32, (3, 3), t=6, strides=2, n=3)
    print("y = ", y)

    ##################################################################################
    ################################### COMBINE ######################################
    # Concat
    #xy = layers.Concatenate()([x, y])
    # Kali
    xy = Multiply()([x, y])
    ##################################################################################

    ############################## EARLY FUSION ######################################
    xy = _inverted_residual_block(xy, 64, (3, 3), t=6, strides=2, n=4)
    xy = _inverted_residual_block(xy, 96, (3, 3), t=6, strides=1, n=3)
    xy = _inverted_residual_block(xy, 160, (3, 3), t=6, strides=2, n=3)
    xy = _inverted_residual_block(xy, 320, (3, 3), t=6, strides=1, n=1)

    xy = _conv_block(xy, 1280, (1, 1), strides=(1, 1))
    xy = layers.GlobalAveragePooling2D()(xy)
    xy = layers.Reshape((1, 1, 1280))(xy)
    xy = layers.Dropout(0.3, name='Dropout')(xy)

    # Dimensions branch
    dimensions = layers.Conv2D(3, (1, 1), padding='same', name='d_conv')(xy)
    dimensions = layers.Reshape((3, ), name='dimensions')(dimensions)

    # Orientation branch
    orientation = layers.Conv2D(4, (1, 1), padding='same', name='o_conv')(xy)
    orientation = layers.Reshape((cfg().bin, -1))(orientation)
    orientation = layers.Lambda(l2_normalize, name='orientation')(orientation)

    # Confidence branch
    confidence = layers.Conv2D(cfg().bin, (1, 1),
                               padding='same',
                               name='c_conv')(xy)
    confidence = layers.Activation('softmax', name='softmax')(confidence)
    confidence = layers.Reshape((2, ), name='confidence')(confidence)

    # Build model
    model = tf.keras.Model([inputs, inputs_depth],
                           [dimensions, orientation, confidence])
    model.summary()

    return model
Example #27
0
    def get_model(self):
        if not self.model:
            mashup_id_input = Input(shape=(1, ),
                                    dtype='int32',
                                    name='mashup_id_input')
            api_id_input = Input(shape=(1, ),
                                 dtype='int32',
                                 name='api_id_input')
            inputs = [mashup_id_input, api_id_input]

            user_text_vec = self.user_text_feature_extractor()(mashup_id_input)
            item_text_vec = self.item_text_feature_extractor()(api_id_input)
            user_tag_vec = self.user_tag_feature_extractor()(mashup_id_input)
            item_tag_vec = self.item_tag_feature_extractor()(api_id_input)
            feature_list = [
                user_text_vec, item_text_vec, user_tag_vec, item_tag_vec
            ]

            if self.old_new == 'LR_PNCF':  # 旧场景,使用GMF形式的双塔模型
                x = Concatenate(name='user_concatenate')(
                    [user_text_vec, user_tag_vec])
                y = Concatenate(name='item_concatenate')(
                    [item_text_vec, item_tag_vec])
                output = Multiply()([x, y])
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       use_bias=False,
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)  # 参数学习权重,非线性
                self.model = Model(inputs=inputs,
                                   outputs=[predict_result],
                                   name='predict_model')
                return self.model

            elif self.old_new == 'new' and self.CI_handle_slt_apis_mode:
                # 已选择的服务
                mashup_slt_apis_input = Input(
                    shape=(new_Para.param.slt_item_num, ),
                    dtype='int32',
                    name='slt_api_ids_input')
                mashup_slt_apis_input_3D = Reshape(
                    (new_Para.param.slt_item_num, 1))(mashup_slt_apis_input)
                # mashup_slt_apis_num_input = Input(shape=(1,), dtype='int32', name='mashup_slt_apis_num_input')
                inputs.append(mashup_slt_apis_input)
                mask = Lambda(lambda x: K.not_equal(x, self.all_api_num))(
                    mashup_slt_apis_input)  # (?, 3) !!!

                # 已选择的服务直接复用item_feature_extractor
                slt_text_vec_list, slt_tag_vec_list = [], []
                for i in range(new_Para.param.slt_item_num):
                    x = Lambda(slice, arguments={'index': i})(
                        mashup_slt_apis_input_3D)  # (?,1,1)
                    x = Reshape((1, ))(x)
                    temp_item_text_vec = self.item_text_feature_extractor()(x)
                    temp_item_tag_vec = self.item_tag_feature_extractor()(x)
                    slt_text_vec_list.append(temp_item_text_vec)
                    slt_tag_vec_list.append(temp_item_tag_vec)

                if self.CI_handle_slt_apis_mode in ('attention', 'average'):
                    # text和tag使用各自的attention block
                    slt_text_vec_list = [
                        Reshape((1, new_Para.param.embedding_dim))(key_2D)
                        for key_2D in slt_text_vec_list
                    ]
                    slt_tag_vec_list = [
                        Reshape((1, new_Para.param.embedding_dim))(key_2D)
                        for key_2D in slt_tag_vec_list
                    ]  # 增加了一维  eg:[None,50]->[None,1,50]
                    text_keys_embs = Concatenate(axis=1)(
                        slt_text_vec_list)  # [?,3,50]
                    tag_keys_embs = Concatenate(axis=1)(
                        slt_tag_vec_list)  # [?,3,50]

                    if self.CI_handle_slt_apis_mode == 'attention':
                        query_item_text_vec = Lambda(
                            lambda x: tf.expand_dims(x, axis=1))(
                                item_text_vec)  # (?, 50)->(?, 1, 50)
                        query_item_tag_vec = Lambda(
                            lambda x: tf.expand_dims(x, axis=1))(item_tag_vec)
                        # 压缩历史,得到向量
                        text_hist = AttentionSequencePoolingLayer(
                            supports_masking=True)(
                                [query_item_text_vec, text_keys_embs],
                                mask=mask)
                        tag_hist = AttentionSequencePoolingLayer(
                            supports_masking=True)(
                                [query_item_tag_vec, tag_keys_embs], mask=mask)

                    else:  # 'average'
                        text_hist = SequencePoolingLayer(
                            'mean', supports_masking=True)(text_keys_embs,
                                                           mask=mask)
                        tag_hist = SequencePoolingLayer('mean',
                                                        supports_masking=True)(
                                                            tag_keys_embs,
                                                            mask=mask)

                    text_hist = Lambda(lambda x: tf.squeeze(x, axis=1))(
                        text_hist)  # (?, 1, 50)->(?, 50)
                    tag_hist = Lambda(lambda x: tf.squeeze(x, axis=1))(
                        tag_hist)

                elif self.CI_handle_slt_apis_mode == 'full_concate':
                    text_hist = Concatenate(axis=1)(
                        slt_text_vec_list)  # [?,150]
                    tag_hist = Concatenate(axis=1)(slt_tag_vec_list)  # [?,150]
                else:
                    raise ValueError('wrong CI_handle_slt_apis_mode!')

                feature_list.extend([text_hist, tag_hist])

            else:  # 包括新模型不处理已选择服务和旧模型
                pass
            feature_list = list(map(NoMask(),
                                    feature_list))  # DNN不支持mak,所以不能再传递mask
            all_features = Concatenate(
                name='all_content_concatenate')(feature_list)

            output = DNN(self.content_fc_unit_nums[:-1])(all_features)
            output = Dense(self.content_fc_unit_nums[-1],
                           activation='relu',
                           kernel_regularizer=l2(new_Para.param.l2_reg),
                           name='text_tag_feature_extracter')(output)

            # 输出层
            if new_Para.param.final_activation == 'softmax':
                predict_result = Dense(2,
                                       activation='softmax',
                                       name="prediction")(output)
            elif new_Para.param.final_activation == 'sigmoid':
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)

            # Model
            # if self.IfUniteNI:
            #     inputs.append(user_NI_input)
            self.model = Model(inputs=inputs,
                               outputs=[predict_result],
                               name='predict_model')

            for layer in self.model.layers:
                print(layer.name)
            print('built CI model, done!')
        return self.model
def attention_block(input,
                    input_channels=None,
                    output_channels=None,
                    encoder_depth=1):
    """
    attention block
    https://arxiv.org/abs/1704.06904
    """

    p = 1
    t = 2
    r = 1

    if input_channels is None:
        input_channels = input.get_shape()[-1].value
    if output_channels is None:
        output_channels = input_channels

    # First Residual Block
    for i in range(p):
        input = residual_block(input)

    # Trunc Branch
    output_trunk = input
    for i in range(t):
        output_trunk = residual_block(output_trunk)

    # Soft Mask Branch

    ## encoder
    ### first down sampling
    output_soft_mask = MaxPool2D(padding='same')(input)  # 32x32
    for i in range(r):
        output_soft_mask = residual_block(output_soft_mask)

    skip_connections = []
    for i in range(encoder_depth - 1):

        ## skip connections
        output_skip_connection = residual_block(output_soft_mask)
        skip_connections.append(output_skip_connection)
        # print ('skip shape:', output_skip_connection.get_shape())

        ## down sampling
        output_soft_mask = MaxPool2D(padding='same')(output_soft_mask)
        for _ in range(r):
            output_soft_mask = residual_block(output_soft_mask)

            ## decoder
    skip_connections = list(reversed(skip_connections))
    for i in range(encoder_depth - 1):
        ## upsampling
        for _ in range(r):
            output_soft_mask = residual_block(output_soft_mask)
        output_soft_mask = UpSampling2D()(output_soft_mask)
        ## skip connections
        output_soft_mask = Add()([output_soft_mask, skip_connections[i]])

    ### last upsampling
    for i in range(r):
        output_soft_mask = residual_block(output_soft_mask)
    output_soft_mask = UpSampling2D()(output_soft_mask)

    ## Output
    output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
    output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
    output_soft_mask = Activation('sigmoid')(output_soft_mask)

    # Attention: (1 + output_soft_mask) * output_trunk
    output = Lambda(lambda x: x + 1)(output_soft_mask)
    output = Multiply()([output, output_trunk])  #

    # Last Residual Block
    for i in range(p):
        output = residual_block(output)

    return output
Example #29
0
        kl_batch = -.5 * backend.sum(
            1 + log_var - backend.square(mu) - backend.exp(log_var), axis=-1)

        self.add_loss(backend.mean(kl_batch), inputs=inputs)

        return inputs


# Loss Function #
# y_true - True labels #
# y_pred - Predicted labels #
def nll(y_true, y_pred):
    return backend.sum(backend.binary_crossentropy(y_true, y_pred), axis=-1)


# Decoder Specific
x = Input(shape=(original_dim, ))
h = Dense(intermediate_dim, activation='relu')(x)

z_mu = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)

(z_mu, z_log_var) = KLDivergenceLayer()([z_mu, z_log_var])

# Encoder Specific #
z_sigma = Lambda(lambda t: backend.exp(.5 * t))(z_log_var)

eps = Input(tensor=backend.random_normal(
    stddev=epsilon_std, shape=(backend.shape(x)[0], latent_dim)))
z_eps = Multiply()([z_sigma, eps])
z = Add()([z_mu, z_eps])
Example #30
0
def Att(att_dim, inputs, name):
    V = inputs
    QK = Dense(att_dim, use_bias=False)(inputs)
    QK = Activation("softmax", name=name)(QK)
    MV = Multiply()([V, QK])
    return (MV)