Beispiel #1
0
def not_equal(f, other):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.not_equal(x[0], x[1])),
                   name=graph_unique_name("not_equal")) for X in f.outputs
        ]
    else:
        _warn_for_ndarray(other)
        lmbd = [
            Lambda(lambda x: K.cast_to_floatx(K.not_equal(x, other)),
                   name=graph_unique_name("not_equal")) for X in f.outputs
        ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Beispiel #2
0
def accuracy_ignore_padding(y_true, y_pred):
    prediction = backend.argmax(y_pred, axis=-1)
    target = backend.argmax(y_true, axis=-1)

    accuracy = backend.equal(
        backend.array_ops.boolean_mask(prediction,
                                       backend.not_equal(target, 0)),
        backend.array_ops.boolean_mask(target, backend.not_equal(target, 0)))

    return backend.mean(accuracy)
Beispiel #3
0
    def call(self, inputs, **kwargs):
        inputs_shape = K.shape(inputs)

        mask = K.cast(K.squeeze(K.any(K.not_equal(inputs, 0.),
                                      axis=(-2, -1),
                                      keepdims=True),
                                axis=-1),
                      dtype=inputs.dtype)

        inputs_to_lstm = K.reshape(inputs,
                                   (-1, inputs.shape[-2], inputs.shape[-1]))

        inputs_embed = super(InferenceSpeakerEmbedding,
                             self).call(inputs_to_lstm)

        inputs_embed = K.reshape(
            inputs_embed,
            (inputs_shape[0], inputs_shape[1], inputs_embed.shape[-1]))

        inputs_embed = inputs_embed * mask

        n = K.sum(mask, axis=1)

        inputs_embed = K.sum(inputs_embed, axis=1) / n

        return inputs_embed
Beispiel #4
0
def masked_vgg_loss(y_true, y_pred):
    mask_value = K.constant([[[-1.0, -1.0, 1.0]]])
    mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())
    masked = K.mean(K.square((features_extractor(preprocess_vgg(mask_true * y_pred)) -
                              features_extractor(preprocess_vgg(mask_true * y_true)))), axis=-1)

    return 0.006 * masked
Beispiel #5
0
def fractional_accuracy(y_true, y_pred):
    equal = K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1))
    X = K.mean(K.sum(K.cast(equal, tf.float32), axis=-1))
    not_equal = K.not_equal(K.argmax(y_true, axis=-1), K.argmax(y_pred,
                                                                axis=-1))
    Y = K.mean(K.sum(K.cast(not_equal, tf.float32), axis=-1))
    return X / (X + Y)
def masked_mse(y_true, y_pred):
    # masked function
    mask_true = K.cast(K.not_equal(y_true, 0), K.floatx())
    # masked squared error
    masked_squared_error = K.square(mask_true * (y_true - y_pred))
    masked_mse = K.sum(masked_squared_error, axis=-1) / K.maximum(
        K.sum(mask_true, axis=-1), 1)
    return masked_mse
def mask_acc(y_true, y_pred):
    y_true_class = K.argmax(y_true, axis=-1)
    y_pred_class = K.argmax(y_pred, axis=-1)

    ignore_mask = K.cast(K.not_equal(y_true_class, 0), "int32")
    matches = K.cast(K.equal(y_true_class, y_pred_class),
                     "int32") * ignore_mask
    accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1)
    return accuracy
Beispiel #8
0
def getMask(inputs):
    # 这里输入的还是np矩阵
    src_mask = k.not_equal(inputs, 0)  # 0->False
    src_mask = k.expand_dims(src_mask, 1)  # None, 1, length
    # self.src_mask = tf.tile(self.src_mask, [1, src.shape[1], 1])  # None, length, length
    src_mask2 = tf.transpose(src_mask, [0, 2, 1])  # None, 1, length
    src_mask = src_mask & src_mask2  # None, length, length
    src_mask = tf.cast(src_mask, dtype=tf.float32)
    # self.src_mask = tf.reshape(self.src_mask, [-1, src.shape[1], src.shape[1]])  # None, length, length,
    return src_mask
Beispiel #9
0
def top_k_accuracy(y_true, y_pred, mask_value=-1, k=5):
    """ Top-K Accuracy with Masking """
    mask = K.squeeze(K.not_equal(y_true, mask_value), axis=-1)
    acc = sparse_top_k_categorical_accuracy(tf.boolean_mask(y_true, mask),
                                            tf.boolean_mask(y_pred, mask),
                                            k=k)
    # return 0 if result is nan
    acc_filtered = tf.cond(tf.is_nan(acc), lambda: tf.constant(0, tf.float32),
                           lambda: acc)
    return acc_filtered
Beispiel #10
0
    def call(self, inputs, **kwargs):
        pair1, pair2 = inputs

        pair1_shape, pair2_shape = K.shape(pair1), K.shape(pair2)

        pair1_mask = K.cast(K.squeeze(K.any(K.not_equal(pair1, 0.),
                                            axis=(-2, -1),
                                            keepdims=True),
                                      axis=-1),
                            dtype=pair1.dtype)
        pair2_mask = K.cast(K.squeeze(K.any(K.not_equal(pair2, 0.),
                                            axis=(-2, -1),
                                            keepdims=True),
                                      axis=-1),
                            dtype=pair2.dtype)

        pair1_to_lstm = K.reshape(pair1,
                                  (-1, pair1.shape[-2], pair1.shape[-1]))
        pair2_to_lstm = K.reshape(pair2,
                                  (-1, pair2.shape[-2], pair2.shape[-1]))

        batch = K.concatenate([pair1_to_lstm, pair2_to_lstm], axis=0)

        embedded = super(TestSpeakerEmbedding, self).call(batch)

        pair1_embed = embedded[:K.shape(pair1_to_lstm)[0]]
        pair2_embed = embedded[K.shape(pair1_to_lstm)[0]:]

        pair1_embed = K.reshape(pair1_embed,
                                (pair1_shape[0], pair1_shape[1], -1))
        pair2_embed = K.reshape(pair2_embed,
                                (pair2_shape[0], pair2_shape[1], -1))

        pair1_embed = pair1_embed * pair1_mask
        pair2_embed = pair2_embed * pair2_mask

        pair1_n = K.sum(pair1_mask, axis=1)
        pair2_n = K.sum(pair2_mask, axis=1)

        pair1_embed = K.sum(pair1_embed, axis=1) / pair1_n
        pair2_embed = K.sum(pair2_embed, axis=1) / pair2_n

        return pair1_embed, pair2_embed
Beispiel #11
0
def accuracy(y_true, y_pred, mask_value=-1):
    """ Accuracy with Masking """
    # mask = K.not_equal(y_true, mask_value)
    mask = K.squeeze(K.not_equal(y_true, mask_value), axis=-1)
    acc = sparse_categorical_accuracy(tf.boolean_mask(y_true, mask),
                                      tf.boolean_mask(y_pred, mask))
    # return 0 if result is empty
    res_size = tf.shape(acc)[0]
    acc_filtered = tf.cond(tf.equal(res_size, 0),
                           lambda: tf.constant(0, tf.float32), lambda: acc)
    return acc_filtered
Beispiel #12
0
def not_equal(f, other, tol=None):
    """Element-wise comparison applied to the `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.
        tol: (float) If you need a tolerance measure.

    # Returns
        A Functional.
    """
    validate_functional(f)
    assert isinstance(
        tol, (type(None), float)), 'Expected a floating value for `tol`.'

    inputs = f.inputs.copy()
    if is_functional(other):
        inputs += to_list(other.inputs)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x[0], x[1]))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x[0] - x[1]), tol))
    else:
        _warn_for_ndarray(other)
        if tol is None:
            lambda_opr = lambda x: K.cast_to_floatx(K.not_equal(x, other))
        else:
            lambda_opr = lambda x: K.cast_to_floatx(
                K.greater(K.abs(x - other), tol))

    lmbd = [
        Lambda(lambda_opr, name=graph_unique_name("not_equal"))
        for X in f.outputs
    ]

    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=_apply_operation(lmbd, f, other),
                     layers=lmbd)
    return res
Beispiel #13
0
def masked_se(y_true, y_pred):
    """
    Function to define the masked squared error
    :param y_true: true label
    :param y_pred: predicted label
    :return: masked squared error
    """
    # Masked function
    mask_true = K.cast(K.not_equal(y_true, 0), K.floatx())
    # Masked squared error
    masked_squared_error = K.square(mask_true * (y_true - y_pred))
    masked_mse = K.sum(masked_squared_error, axis=-1)
    return masked_mse
def mbce(y_true, y_pred):
    """ Balanced sigmoid cross-entropy loss with masking """
    mask = K.not_equal(y_true, -1.0)
    mask = K.cast(mask, dtype=np.float32)
    num_examples = K.sum(mask, axis=1)
    pos = K.cast(K.equal(y_true, 1.0), dtype=np.float32)
    num_pos = K.sum(pos, axis=None)
    neg = K.cast(K.equal(y_true, 0.0), dtype=np.float32)
    num_neg = K.sum(neg, axis=None)
    pos_ratio = 1.0 - num_pos / num_neg
    mbce = mask * tf.nn.weighted_cross_entropy_with_logits(
        targets=y_true, logits=y_pred, pos_weight=pos_ratio)
    mbce = K.sum(mbce, axis=1) / num_examples
    return K.mean(mbce, axis=-1)
Beispiel #15
0
def masked_rmse_clip(y_true, y_pred):
    """
    Function to define the masked root mean squared error with clipping
    :param y_true: true label
    :param y_pred: predicted label
    :return: masked root mean squared error with clipping
    """
    # Masked function
    mask_true = K.cast(K.not_equal(y_true, 0), K.floatx())
    y_pred = K.clip(y_pred, 1, 5)
    # Masked squared error
    masked_squared_error = K.square(mask_true * (y_true - y_pred))
    masked_rmse_clip = K.sqrt(
        K.sum(masked_squared_error, axis=-1) /
        K.maximum(K.sum(mask_true, axis=-1), 1))
    return masked_rmse_clip
Beispiel #16
0
    def call(self, x, mask=None):
        '''mask是上一层的'''
        '''# using 'mask' you can access the mask passed from the previous layer'''
        # x [batch_size, seq_len, embedding_size]
        if self.supports_masking:
            # mask [batch_size, seq_len]
            if mask is None:
                # 先判断是否非零,然后执行OR运算,计算每个序列的有效长度
                mask = K.any(K.not_equal(x, 0), -1)  # [batch_size, seq_len]
                mask = K.cast(mask, K.floatx())
                return K.sum(x, axis=1) / K.sum(mask, axis=1, keepdims=True)

            if mask is not None:
                mask = K.cast(mask, K.floatx())
                # [batch_size, embedding_size, seq_len]
                mask = K.repeat(mask, x.shape[-1].value)
                # [batch_size, seq_len, embedding_size]
                mask = tf.transpose(mask, [0, 2, 1])
                x = x * mask
                return K.sum(x, axis=1) / K.sum(mask, axis=1)
Beispiel #17
0
    def classification_loss(self, y_true, y_pred):
        # TODO: try weighted_categorical_crossentropy
        labels = y_true[..., :-1]
        # -1 for ignore, 0 for background, 1 for object
        anchor_state = y_true[..., -1]

        classification = y_pred
        # filter out "ignore" anchors
        indices = tf.where(K.not_equal(anchor_state, -1))
        labels = tf.gather_nd(labels, indices)
        classification = tf.gather_nd(classification, indices)

        # compute the loss
        loss = focal(labels, classification, alpha=self.alpha, gamma=self.gamma)

        # compute the normalizer: the number of positive anchors
        normalizer = tf.where(K.equal(anchor_state, 1))
        normalizer = K.cast(K.shape(normalizer)[0], K.floatx())
        normalizer = K.maximum(K.cast_to_floatx(1.0), normalizer)

        return K.sum(loss) / normalizer
Beispiel #18
0
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss
Beispiel #19
0
    def get_model(self):
        if not self.model:
            mashup_fea_input = Input(shape=(self.num_feat, ),
                                     dtype='float32',
                                     name='NI_mashup_fea_input')  # (None,25)
            api_id_input = Input(shape=(1, ),
                                 dtype='int32',
                                 name='NI_api_id_input')
            inputs = [mashup_fea_input, api_id_input]

            self.prepare()
            api_implict_embs = self.api_implict_emb_layer(
                api_id_input)  # (None,1,25)
            api_implict_embs_2D = Lambda(lambda x: tf.squeeze(x, axis=1))(
                api_implict_embs)  # (None,25)
            feature_list = [mashup_fea_input, api_implict_embs_2D]

            if self.old_new == 'new' and self.NI_handle_slt_apis_mode:
                mashup_slt_apis_input = Input(
                    shape=(new_Para.param.slt_item_num, ),
                    dtype='int32',
                    name='slt_apis_input')
                inputs.append(mashup_slt_apis_input)
                keys_slt_api_implict_embs = self.api_implict_emb_layer(
                    mashup_slt_apis_input)  # (None,3,25)

                if self.NI_handle_slt_apis_mode in ('attention', 'average'):
                    mask = Lambda(lambda x: K.not_equal(x, self.all_api_num))(
                        mashup_slt_apis_input)  # (?, 3) !!!
                    if self.NI_handle_slt_apis_mode == 'attention':
                        slt_api_implict_embs_hist = AttentionSequencePoolingLayer(
                            supports_masking=True)(
                                [api_implict_embs, keys_slt_api_implict_embs],
                                mask=mask)
                    else:  # 'average'
                        slt_api_implict_embs_hist = SequencePoolingLayer(
                            'mean',
                            supports_masking=True)(keys_slt_api_implict_embs,
                                                   mask=mask)
                    slt_api_implict_embs_hist = Lambda(
                        lambda x: tf.squeeze(x, axis=1))(
                            slt_api_implict_embs_hist)  # (?, 1, 25)->(?, 25)
                elif self.NI_handle_slt_apis_mode == 'full_concate':
                    slt_api_implict_embs_hist = Reshape(
                        (new_Para.param.slt_item_num * self.num_feat, ))(
                            keys_slt_api_implict_embs)  # (?,75)
                else:
                    raise ValueError('wrong NI_handle_slt_apis_mode!')
                feature_list.append(slt_api_implict_embs_hist)

            feature_list = list(map(NoMask(),
                                    feature_list))  # DNN不支持mak,所以不能再传递mask
            all_features = Concatenate(
                name='all_emb_concatenate')(feature_list)

            output = DNN(self.cf_unit_nums[:-1])(all_features)
            output = Dense(self.cf_unit_nums[-1],
                           activation='relu',
                           kernel_regularizer=l2(new_Para.param.l2_reg),
                           name='implict_dense_{}'.format(
                               len(self.cf_unit_nums)))(output)

            # 输出层
            if new_Para.param.final_activation == 'softmax':
                predict_result = Dense(2,
                                       activation='softmax',
                                       name="prediction")(output)
            elif new_Para.param.final_activation == 'sigmoid':
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)

            self.model = Model(inputs=inputs,
                               outputs=[predict_result],
                               name='predict_model')

            for layer in self.model.layers:
                print(layer.name)
            if not os.path.exists(self.model_name_path):
                with open(self.model_name_path, 'w+') as f:
                    f.write(self.get_name())
        return self.model
Beispiel #20
0
 def compute_mask(self, inputs, mask=None):
     return K.not_equal(inputs, '--PAD--')
Beispiel #21
0
    def get_model(self):
        if not self.model:
            mashup_id_input = Input(shape=(1, ),
                                    dtype='int32',
                                    name='mashup_id_input')
            api_id_input = Input(shape=(1, ),
                                 dtype='int32',
                                 name='api_id_input')
            inputs = [mashup_id_input, api_id_input]

            user_text_vec = self.user_text_feature_extractor()(mashup_id_input)
            item_text_vec = self.item_text_feature_extractor()(api_id_input)
            user_tag_vec = self.user_tag_feature_extractor()(mashup_id_input)
            item_tag_vec = self.item_tag_feature_extractor()(api_id_input)
            feature_list = [
                user_text_vec, item_text_vec, user_tag_vec, item_tag_vec
            ]

            if self.old_new == 'LR_PNCF':  # 旧场景,使用GMF形式的双塔模型
                x = Concatenate(name='user_concatenate')(
                    [user_text_vec, user_tag_vec])
                y = Concatenate(name='item_concatenate')(
                    [item_text_vec, item_tag_vec])
                output = Multiply()([x, y])
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       use_bias=False,
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)  # 参数学习权重,非线性
                self.model = Model(inputs=inputs,
                                   outputs=[predict_result],
                                   name='predict_model')
                return self.model

            elif self.old_new == 'new' and self.CI_handle_slt_apis_mode:
                # 已选择的服务
                mashup_slt_apis_input = Input(
                    shape=(new_Para.param.slt_item_num, ),
                    dtype='int32',
                    name='slt_api_ids_input')
                mashup_slt_apis_input_3D = Reshape(
                    (new_Para.param.slt_item_num, 1))(mashup_slt_apis_input)
                # mashup_slt_apis_num_input = Input(shape=(1,), dtype='int32', name='mashup_slt_apis_num_input')
                inputs.append(mashup_slt_apis_input)
                mask = Lambda(lambda x: K.not_equal(x, self.all_api_num))(
                    mashup_slt_apis_input)  # (?, 3) !!!

                # 已选择的服务直接复用item_feature_extractor
                slt_text_vec_list, slt_tag_vec_list = [], []
                for i in range(new_Para.param.slt_item_num):
                    x = Lambda(slice, arguments={'index': i})(
                        mashup_slt_apis_input_3D)  # (?,1,1)
                    x = Reshape((1, ))(x)
                    temp_item_text_vec = self.item_text_feature_extractor()(x)
                    temp_item_tag_vec = self.item_tag_feature_extractor()(x)
                    slt_text_vec_list.append(temp_item_text_vec)
                    slt_tag_vec_list.append(temp_item_tag_vec)

                if self.CI_handle_slt_apis_mode in ('attention', 'average'):
                    # text和tag使用各自的attention block
                    slt_text_vec_list = [
                        Reshape((1, new_Para.param.embedding_dim))(key_2D)
                        for key_2D in slt_text_vec_list
                    ]
                    slt_tag_vec_list = [
                        Reshape((1, new_Para.param.embedding_dim))(key_2D)
                        for key_2D in slt_tag_vec_list
                    ]  # 增加了一维  eg:[None,50]->[None,1,50]
                    text_keys_embs = Concatenate(axis=1)(
                        slt_text_vec_list)  # [?,3,50]
                    tag_keys_embs = Concatenate(axis=1)(
                        slt_tag_vec_list)  # [?,3,50]

                    if self.CI_handle_slt_apis_mode == 'attention':
                        query_item_text_vec = Lambda(
                            lambda x: tf.expand_dims(x, axis=1))(
                                item_text_vec)  # (?, 50)->(?, 1, 50)
                        query_item_tag_vec = Lambda(
                            lambda x: tf.expand_dims(x, axis=1))(item_tag_vec)
                        # 压缩历史,得到向量
                        text_hist = AttentionSequencePoolingLayer(
                            supports_masking=True)(
                                [query_item_text_vec, text_keys_embs],
                                mask=mask)
                        tag_hist = AttentionSequencePoolingLayer(
                            supports_masking=True)(
                                [query_item_tag_vec, tag_keys_embs], mask=mask)

                    else:  # 'average'
                        text_hist = SequencePoolingLayer(
                            'mean', supports_masking=True)(text_keys_embs,
                                                           mask=mask)
                        tag_hist = SequencePoolingLayer('mean',
                                                        supports_masking=True)(
                                                            tag_keys_embs,
                                                            mask=mask)

                    text_hist = Lambda(lambda x: tf.squeeze(x, axis=1))(
                        text_hist)  # (?, 1, 50)->(?, 50)
                    tag_hist = Lambda(lambda x: tf.squeeze(x, axis=1))(
                        tag_hist)

                elif self.CI_handle_slt_apis_mode == 'full_concate':
                    text_hist = Concatenate(axis=1)(
                        slt_text_vec_list)  # [?,150]
                    tag_hist = Concatenate(axis=1)(slt_tag_vec_list)  # [?,150]
                else:
                    raise ValueError('wrong CI_handle_slt_apis_mode!')

                feature_list.extend([text_hist, tag_hist])

            else:  # 包括新模型不处理已选择服务和旧模型
                pass
            feature_list = list(map(NoMask(),
                                    feature_list))  # DNN不支持mak,所以不能再传递mask
            all_features = Concatenate(
                name='all_content_concatenate')(feature_list)

            output = DNN(self.content_fc_unit_nums[:-1])(all_features)
            output = Dense(self.content_fc_unit_nums[-1],
                           activation='relu',
                           kernel_regularizer=l2(new_Para.param.l2_reg),
                           name='text_tag_feature_extracter')(output)

            # 输出层
            if new_Para.param.final_activation == 'softmax':
                predict_result = Dense(2,
                                       activation='softmax',
                                       name="prediction")(output)
            elif new_Para.param.final_activation == 'sigmoid':
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)

            # Model
            # if self.IfUniteNI:
            #     inputs.append(user_NI_input)
            self.model = Model(inputs=inputs,
                               outputs=[predict_result],
                               name='predict_model')

            for layer in self.model.layers:
                print(layer.name)
            print('built CI model, done!')
        return self.model
Beispiel #22
0
 def masked_loss_function(y_true, y_pred, mask_value=mask_value):
     mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
     return loss_function(y_true * mask, y_pred * mask)
Beispiel #23
0
        layer.trainable = False

    # CNNの出力
    u = Flatten()(encoder.output)

    # LSTMの初期状態
    h_0 = Dense(hid_dim)(u)
    c_0 = Dense(hid_dim)(u)

    # LSTMの入力
    y = Input(shape=(None, ), dtype='int32')
    y_in = Lambda(lambda x: x[:, :-1])(y)
    y_out = Lambda(lambda x: x[:, 1:])(y)

    # 誤差関数のマスク
    mask = Lambda(lambda x: K.cast(K.not_equal(x, w2i['<pad>']), 'float32'))(
        y_out)

    # 層の定義
    embedding = Embedding(vocab_size, emb_dim)
    lstm = LSTM(hid_dim,
                activation='tanh',
                return_sequences=True,
                return_state=True)
    dense = Dense(vocab_size)
    softmax = Activation('softmax')

    # 順伝播
    y_emb = embedding(y_in)
    h, _, _ = lstm(y_emb, initial_state=[h_0, c_0])  # 第2,3戻り値(最終ステップのh, c)は無視
    h = dense(h)
Beispiel #24
0
    def get_model(self):
        if not self.model:
            mashup_id_input = Input(shape=(1, ),
                                    dtype='int32',
                                    name='mashup_id_input')
            api_id_input = Input(shape=(1, ),
                                 dtype='int32',
                                 name='api_id_input')
            inputs = [mashup_id_input, api_id_input]

            mashup_text_fea = self.mid2text_fea_layer(
                mashup_id_input)  # (None,1,25)
            api_text_fea = self.aid2text_fea_layer(api_id_input)  # (None,1,25)

            mashup_tag_fea = self.mid2tag_fea_layer(
                mashup_id_input)  # (None,1,25)
            api_tag_fea = self.aid2tag_fea_layer(api_id_input)  # (None,1,25)

            api_implict_emb = self.api_implict_emb_layer(
                api_id_input)  # (None,1,25)

            feature_list = [
                mashup_text_fea, api_text_fea, mashup_tag_fea, api_tag_fea,
                api_implict_emb
            ]

            if self.new_old == 'new' and new_Para.param.need_slt_apis:
                mashup_slt_apis_input = Input(
                    shape=(new_Para.param.slt_item_num, ),
                    dtype='int32',
                    name='slt_api_ids_input')
                inputs.append(mashup_slt_apis_input)

                keys_slt_api_text_feas = self.aid2text_fea_layer(
                    mashup_slt_apis_input)  # (None,3,25)
                keys_slt_api_tag_feas = self.aid2tag_fea_layer(
                    mashup_slt_apis_input)  # (None,3,25)
                keys_slt_api_implict_embs = self.api_implict_emb_layer(
                    mashup_slt_apis_input)  # (None,3,25)

                mask = Lambda(lambda x: K.not_equal(x, self.all_api_num))(
                    mashup_slt_apis_input)  # (?, 3) !!!

                # query_api_text_vec = Lambda(lambda x: tf.expand_dims(x, axis=1))(api_text_fea)  # (?, 50)->(?, 1, 50)
                # query_api_tag_vec = Lambda(lambda x: tf.expand_dims(x, axis=1))(api_tag_fea)
                # query_api_implict_emb = Lambda(lambda x: tf.expand_dims(x, axis=1))(api_implict_emb)

                # 压缩历史,得到向量  ->(?, 1, 50)
                text_hist = AttentionSequencePoolingLayer(
                    supports_masking=True)(
                        [api_text_fea, keys_slt_api_text_feas], mask=mask)
                tag_hist = AttentionSequencePoolingLayer(
                    supports_masking=True)(
                        [api_tag_fea, keys_slt_api_tag_feas], mask=mask)
                implict_emb_hist = AttentionSequencePoolingLayer(
                    supports_masking=True)(
                        [api_implict_emb, keys_slt_api_implict_embs],
                        mask=mask)

                feature_list = [
                    mashup_text_fea, api_text_fea, text_hist, mashup_tag_fea,
                    api_tag_fea, tag_hist, api_implict_emb, implict_emb_hist
                ]
                feature_list = list(map(NoMask(),
                                        feature_list))  # DNN不支持mak,所以不能再传递mask

            all_features = Concatenate(
                name='all_content_concatenate')(feature_list)
            all_features = Lambda(lambda x: tf.squeeze(x, axis=1))(
                all_features)

            output = DNN(self.predict_fc_unit_nums[:-1])(all_features)
            output = Dense(self.predict_fc_unit_nums[-1],
                           activation='relu',
                           kernel_regularizer=l2(
                               new_Para.param.l2_reg))(output)

            # 输出层
            if new_Para.param.final_activation == 'softmax':
                predict_result = Dense(2,
                                       activation='softmax',
                                       name="prediction")(output)
            elif new_Para.param.final_activation == 'sigmoid':
                predict_result = Dense(1,
                                       activation='sigmoid',
                                       kernel_initializer='lecun_uniform',
                                       name="prediction")(output)

            self.model = Model(inputs=inputs,
                               outputs=[predict_result],
                               name='predict_model')
        return self.model