Ejemplo n.º 1
0
def aucMetric(true, pred):

    #We want strictly 1D arrays - cannot have (batch, 1), for instance
    true = (true - K.min(true)) / (K.max(true) - K.min(true))
    pred = (pred - K.min(pred)) / (K.max(pred) - K.min(pred))
    true = K.flatten(true)
    pred = K.flatten(pred)

    #total number of elements in this batch
    totalCount = K.shape(true)[0]

    #sorting the prediction values in descending order
    values, indices = tf.nn.top_k(pred, k=totalCount)
    #sorting the ground truth values based on the predictions above
    sortedTrue = K.gather(true, indices)

    #getting the ground negative elements (already sorted above)
    negatives = 1 - sortedTrue

    #the true positive count per threshold
    TPCurve = K.cumsum(sortedTrue)

    #area under the curve
    auc = K.sum(TPCurve * negatives)

    #normalizing the result between 0 and 1
    totalCount = K.cast(totalCount, K.floatx())
    positiveCount = K.sum(true)
    negativeCount = totalCount - positiveCount
    totalArea = positiveCount * negativeCount
    return auc / totalArea
Ejemplo n.º 2
0
    def to_predict_extractive_dataset(
        self, data: pd.DataFrame, **kwargs
    ) -> Tuple[Union[tf.Tensor, list], list, list, list, Tuple[list, list, list, list]]:
        """
        format pandas.DataFrame to create dataset for ExtractiveQA predict
        :param data: dataframe with question, context, title and id
        :param to_interpret: bool, if data is for BaseInterpret input
        :return: context, id, title, last index of each context, (DOI, authors, url, date)
        :rtype: tf.Tensor, list, list, list, list, tuple
        """
        
        self.to_predict_dataset(data)
        data = data.copy()
        data['context'] = data.apply(lambda x: self.to_context(x.question, x.context, "extractive"), axis=1)
        data['end_context'] = K.cumsum(data.apply(lambda x:len(x.context), axis=1)).numpy()
        context = self.tok_ext.batch_encode_plus(
            data.context.explode().to_list(),
            truncation = True,
            return_tensors = 'tf',
            max_length = self.extract_length,
            padding = "max_length")['input_ids']
        
        _id, title, end_context, doi, authors, url, date = list(
            data[['id', 'title', 'end_context', 'DOI', 'authors', 'URL', 'publication_date']]
            .to_dict('list').values())
        del data

        if kwargs.get('to_interpret', False):
            context = list(map(lambda x:K.reshape(x, (1, self.extract_length)), context))
        
        return context, _id, title, end_context, (doi, authors, url, date)
Ejemplo n.º 3
0
    def Mask(self, inputs, seq_len, mode="add"):
        """Mask operation used in multi-head self attention

        Args:
            seq_len (obj): sequence length of inputs.
            mode (str): mode of mask.
        
        Returns:
            obj: tensors after masking.
        """

        if seq_len == None:
            return inputs
        else:
            mask = K.one_hot(indices=seq_len[:, 0],
                             num_classes=K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, axis=1)

            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)

            if mode == "mul":
                return inputs * mask
            elif mode == "add":
                return inputs - (1 - mask) * 1e12
Ejemplo n.º 4
0
def cumsoftmax(x, mode='l2r'):
    """先softmax,然后cumsum,
    cumsum区分从左到右、从右到左两种模式
    """
    axis = K.ndim(x) - 1
    if mode == 'l2r':
        x = K.softmax(x, axis=axis)
        x = K.cumsum(x, axis=axis)
        return x
    elif mode == 'r2l':
        x = x[..., ::-1]
        x = K.softmax(x, axis=axis)
        x = K.cumsum(x, axis=axis)
        return x[..., ::-1]
    else:
        return x
    def call(self, inputs, states, constants, training=None):
        (tgt_curr_input, tgt_pos_input,
         dec_mask), dec_output = states[:3], list(states[3:])
        enc_output, enc_mask = constants

        time = K.max(tgt_pos_input)
        col_mask = K.cast(K.equal(K.cumsum(K.ones_like(dec_mask), axis=1),
                                  time),
                          dtype='int32')
        dec_mask = dec_mask + col_mask

        tgt_emb = self.o_word_emb(tgt_curr_input)
        if self.pos_emb:
            tgt_emb = tgt_emb + self.pos_emb(tgt_pos_input, pos_input=True)

        x = tgt_emb
        xs = []
        cc = K.cast(K.expand_dims(col_mask), dtype='float32')
        for i, dec_layer in enumerate(self.decoder.layers):
            dec_last_state = dec_output[i] * (1 - cc) + tf.einsum(
                'ijk,ilj->ilk', x, cc)
            x, _, _ = dec_layer(x,
                                enc_output,
                                dec_mask,
                                enc_mask,
                                dec_last_state=dec_last_state)
            xs.append(dec_last_state)

        ff_output = self.target_layer(x)
        out = K.cast(K.argmax(ff_output, -1), dtype='int32')
        return out, [out, tgt_pos_input + 1, dec_mask] + xs
Ejemplo n.º 6
0
def crps(y_true, y_pred):
    y_pred =  K.cumsum(y_pred, axis=1)
    ym =  K.cast(K.reshape(K.argmax(y_true, axis=1) - 99, (-1, 1)), 
        dtype='int32')
    n = K.arange(-99, 100)
    step = K.cast(K.greater_equal(n - ym, 0), dtype='float32')
    return K.mean(K.sum(K.square(y_pred - step), axis=1)) / 199
Ejemplo n.º 7
0
 def to_predict_abstractive_dataset(self, data):
     """
     format pandas.DataFrame to create dataset for AbstractiveQA predict
     :param data: dataframe with question, context, title and id
     :return: context, id, title and last index of each context
     :rtype: tf.Tensor, list, list, list
     """
     
     self.to_predict_dataset(data)
     data = data.copy()
     data['context'] = data.apply(lambda x: self.to_context(x.question, x.context, True), axis=1)
     data['end_context'] = K.cumsum(data.apply(lambda x:len(x.context), axis=1)).numpy()
     context = self.tok_abs.batch_encode_plus(
         data.context.explode().to_list(),
         truncation = True,
         return_tensors = 'tf',
         max_length = self.encoder_length,
         padding = "max_length")['input_ids']
     
     _id, title, end_context, doi, authors, date = list(
         data[['id', 'title', 'end_context', 'DOI', 'authors', 'publication_date']]
         .to_dict('list').values())
     del data
     
     return context, _id, title, end_context, doi, authors, date
Ejemplo n.º 8
0
    def attention(self, pre_q, pre_k, out_seq_len: int, d_model: int,
                  training=None):
        """
        Calculates the output of the attention once the affine transformations
        of the inputs are done. Here's the shapes of the arguments:
        :param pre_q: (batch_size, q_seq_len, num_heads, d_model // num_heads)
        :param pre_k: (batch_size, k_seq_len, num_heads, d_model // num_heads)
        :param out_seq_len: the length of the output sequence
        :param d_model: dimensionality of the model (by the paper)
        :param training: Passed by Keras. Should not be defined manually.
          Optional scalar tensor indicating if we're in training
          or inference phase.
        """
        q_seq_len = K.int_shape(pre_q)[-3]
        k_seq_len = K.int_shape(pre_k)[-3]
        q_flattened = K.reshape(pre_q, (-1, q_seq_len, d_model))
        k_flattened = K.reshape(pre_k, (-1, k_seq_len, d_model))

        # projected_k (batch, seq, head, dim)
        projected_k = K.reshape(
            K.dot(k_flattened, self.k_projection),
            (-1, k_seq_len, self.num_heads, d_model//self.num_heads))

        # {q,k}_assignments (batch, seq, head)
        q_assignments = K.softmax(
            K.dot(q_flattened, self.q_assign_weights) + self.q_assign_bias)
        k_assignments = K.softmax(
            K.dot(k_flattened, self.k_assign_weights) + self.k_assign_bias)

        # k_head_contributions (batch, seq, head, dim)
        k_head_contributions = projected_k*K.expand_dims(k_assignments, -1)
        if self.use_masking:
            k_normalization = K.clip(K.cumsum(k_assignments, -2), 1e-3, np.inf)
            # agglomerated_values (batch, seq, head, embedding)
            agglomerated_values = (K.cumsum(k_head_contributions, -3)/
                                   K.expand_dims(k_normalization, -1))
        else:
            # agglomerated_values (batch, 1, head, embedding)
            k_normalization = K.clip(K.sum(k_assignments, -2, keepdims=True), 1e-3, np.inf)
            agglomerated_values = (K.sum(k_head_contributions, -3, keepdims=True)/
                                   K.expand_dims(k_normalization, -1))

        result = K.expand_dims(q_assignments, -1)*agglomerated_values
        result = K.reshape(result, (-1, q_seq_len, d_model))
        result = self.apply_dropout_if_needed(result, training=training)
        return K.dot(result, self.output_weights)
def GetSubMask(s):
    '''
	shape: [B, Q, K], lower triangle because the i-th row should have i 1s.
	'''
    len_s = tf.shape(s)[1]
    bs = tf.shape(s)[:1]
    mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
    return mask
Ejemplo n.º 10
0
def get_model():
    seq_inp = KL.Input(shape=(14, 2))

    x = KL.Conv1D(8, 3, activation="relu")(seq_inp)
    x = KL.Conv1D(32, 3, activation="relu", strides=3)(x)
    x = KL.Conv1D(128, 3, activation="relu")(x)

    x = KL.Flatten()(x)

    out1 = KL.Dense(3, activation="relu")(x)
    out1 = KL.Lambda(lambda x: K.cumsum(x, axis=1), name=targets[0])(out1)

    out2 = KL.Dense(3, activation="relu")(x)
    out2 = KL.Lambda(lambda x: K.cumsum(x, axis=1), name=targets[1])(out2)

    model = Model(inputs=seq_inp, outputs=[out1, out2])
    return model
Ejemplo n.º 11
0
def positions_func(inputs, pad=0):
    """
    A layer filling i-th column of a 2D tensor with
    1+ln(1+i) when it contains a meaningful symbol
    and with 0 when it contains PAD
    """
    position_inputs = K.cumsum(K.ones_like(inputs, dtype="float32"), axis=1)
    position_inputs *= K.cast(K.not_equal(inputs, pad), "float32")
    return K.log(1.0 + position_inputs)
Ejemplo n.º 12
0
 def call(self, x):
     y_embed = K.cumsum(K.ones_like(x[:, :, :, 0]), 1)
     x_embed = K.cumsum(K.ones_like(x[:, :, :, 0]), 2)
     if self.normalize:
         eps = 1e-6
         y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
         x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
     dim_t = K.arange(self.num_pos_feats, dtype='float')
     dim = self.temperature ** (2 * (dim_t // 2) /self.num_pos_feats)
     pos_x = x_embed[:, :, :, None] / dim_t
     pos_y = y_embed[:, :, :, None] / dim_t
     pos_x = K.stack((K.sin(pos_x[:, :, :, 0::2]), K.cos(pos_x[:, :, :, 1::2])), axis=4)
     b, h, w, d, c = K.int_shape(pos_x)
     pos_x = K.reshape(pos_x, (-1, h, w, d*c))
     pos_y = K.stack((K.sin(pos_y[:, :, :, 0::2]), K.cos(pos_y[:, :, :, 1::2])), axis=4)
     pos_y = K.reshape(pos_y, (-1, h, w, d*c))
     pos = K.permute_dimensions(K.concatenate((pos_y, pos_x), axis=3), (0, 3, 1, 2))
     return pos
Ejemplo n.º 13
0
def get_decoder_mask(self_attn_inputs):
    """Returns causal mask to apply for self-attention layer.

    Args:
    self_attn_inputs: Inputs to self attention layer to determine mask shape
    """
    len_s = tf.shape(self_attn_inputs)[1]
    bs = tf.shape(self_attn_inputs)[:1]
    mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
    return mask
Ejemplo n.º 14
0
def cumax(x, axis=-1):
    """Cumulative sum of softmax activation.
    # Arguments
        x: Input tensor.
        axis: Integer, axis along which the operation is applied.
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """

    return K.cumsum(activations.softmax(x, axis), axis)
Ejemplo n.º 15
0
 def Mask(self, inputs, seq_len, mode='mul'):  #按照seq_len实际长度对inputs进行计算
     if seq_len == None:
         return inputs
     else:
         mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
         mask = 1 - K.cumsum(mask, 1)
         for _ in range(len(inputs.shape) - 2):
             mask = K.expand_dims(mask, 2)
         if mode == 'mul':
             return inputs * mask
         if mode == 'add':
             return inputs - (1 - mask) * 1e12
Ejemplo n.º 16
0
 def Mask(self, inputs, seq_len, mode="mul"):
     if seq_len == None:
         return inputs
     else:
         mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
         mask = 1 - K.cumsum(mask, 1)
         for _ in range(len(inputs.shape) - 2):
             mask = K.expand_dims(mask, 2)
         if mode == "mul":
             return inputs * mask
         if mode == "add":
             return inputs - (1 - mask) * 1e12
Ejemplo n.º 17
0
 def compute_copy_loss(self, inputs, mask=None):
     _, y_mask, _, y_true, y_pred = inputs
     y_mask = tf.cast(y_mask, y_pred.dtype)
     y_true = tf.cast(y_true, y_pred.dtype)
     y_mask = K.cumsum(y_mask[:, ::-1], axis=1)[:, ::-1]
     y_mask = K.cast(K.greater(y_mask, 0.5), K.floatx())
     y_mask = y_mask[:, 1:]  # mask标记,减少一位
     y_pred = y_pred[:, :-1]  # 预测序列,错开一位
     y_true = y_true[:, :-1]  # 预测序列,错开一位
     loss = K.sparse_categorical_crossentropy(y_true, y_pred)
     loss = K.sum(loss * y_mask) / K.sum(y_mask)
     return loss
Ejemplo n.º 18
0
    def negative_hazard_log_likelihood(cs, st, risk):
        # sort cs and risk by st
        sorting_indices = tf.argsort(st)[::-1]
        sorted_cs = tf.gather(cs, sorting_indices) # (?)
        sorted_risk = tf.gather(risk, sorting_indices) # (?)

        hazard_ratio = K.exp(sorted_risk)
        log_risk = K.log(K.cumsum(hazard_ratio))
        uncensored_likelihood = sorted_risk - log_risk
        censored_likelihood = uncensored_likelihood * sorted_cs
        neg_likelihood = -K.sum(censored_likelihood)

        return neg_likelihood
 def call(self, inputs):
     (x, src_seq, enc_output), tgt_embs = inputs[:3], inputs[3:]
     enc_mask = K.cast(K.greater(src_seq, 0), 'float32')
     llen = tf.shape(tgt_embs[0])[1]
     col_mask = K.cast(K.equal(
         K.cumsum(K.ones_like(tgt_embs[0], dtype='int32'), axis=1), llen),
                       dtype='float32')
     rs = [x]
     for i, dec_layer in enumerate(self.layers):
         tgt_emb = tgt_embs[i] + x * col_mask
         x, _, _ = dec_layer(x,
                             enc_output,
                             enc_mask=enc_mask,
                             dec_last_state=tgt_emb)
         rs.append(x)
     return rs
Ejemplo n.º 20
0
 def call(self, x):
     if (self.size == None) or (self.mode == 'sum'):
         self.size = int(x.shape[-1])
     batch_size,seq_len = K.shape(x)[0],K.shape(x)[1]
     position_j = 1. / K.pow(10000., \
                              2 * K.arange(self.size / 2, dtype='float32' \
                            ) / self.size)
     position_j = K.expand_dims(position_j, 0)
     position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 #K.arange不支持变长,只好用这种方法生成
     position_i = K.expand_dims(position_i, 2)
     position_ij = K.dot(position_i, position_j)
     position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
     if self.mode == 'sum':
         return position_ij + x
     elif self.mode == 'concat':
         return K.concatenate([position_ij, x], 2)
Ejemplo n.º 21
0
        def event_nhll(cs_st_risk):
            event_cs = cs_st_risk[0]  # (?)
            event_st = cs_st_risk[1]  # (?)
            event_risk = cs_st_risk[2]  # (?)

            # sort cs by st
            sorting_indices = tf.argsort(event_st)[::-1]
            sorted_event_cs = tf.gather(event_cs, sorting_indices)  # (?)
            sorted_event_risk = tf.gather(event_risk, sorting_indices)  # (?)

            hazard_ratio = K.exp(sorted_event_risk)
            log_risk = K.log(K.cumsum(hazard_ratio))
            uncensored_likelihood = sorted_event_risk - log_risk
            censored_likelihood = uncensored_likelihood * sorted_event_cs
            neg_likelihood = -K.sum(censored_likelihood)

            return neg_likelihood
Ejemplo n.º 22
0
def MyWeightedAvg(inputs, binsize, xmin):
    ones = K.ones_like(inputs[0, :])  # [1, 1, 1, 1....]   (size Nouts)
    idx = K.cumsum(ones)  # [1, 2, 3, 4....]   (size Nouts)
    norm = K.sum(
        inputs, axis=1, keepdims=True
    )  # normalization of all outputs by batch. shape is 1D array of size batch (n.b. keepdims=True is critical!)
    wsum = K.sum(
        idx * inputs, axis=1, keepdims=True
    ) / norm  # array of size batch with weighted avg. of mean in units of bins (n.b. keepdims=True is critical!)
    output = (binsize * (wsum - 0.5)
              ) + xmin  # convert from bins to physical units (shape batch,1)

    #	 print('MyWeightedAvg:')
    #	 print('       binsize = %f' % binsize)
    #	 print('          xmin = %f' % xmin)
    #	 print('   input shape = %s' % str(inputs.shape))
    #	 print('  output shape = %s' % str(output.shape))

    return output
Ejemplo n.º 23
0
    ret = []
    for i in range(len(static)):
        dim = static[i]
        if dim is None:
            dim = shape[i]
        ret.append(dim)
    return ret


x_val = [[1, 7, 6, 8, 0, 0], [6, 7, 5, 0, 0, 0]]
x = tf.constant(x_val)

len_s = tf.shape(x)[1]
bs = tf.shape(x)[:1]

mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
with tf.Session(config=tf.ConfigProto(log_device_placement=True,
                                      allow_soft_placement=False)) as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    mask_val = sess.run(mask)
    print('Result from K.cumsum (No-GPU supported) = ' + str(mask_val))

ones_part = tf.ones([bs[0], len_s, len_s])
mask = tf.linalg.band_part(ones_part, -1, 0)
with tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                      allow_soft_placement=False)) as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    mask_val = sess.run(mask)
    print('Result from K.range (GPU supported) ' + str(mask_val))
Ejemplo n.º 24
0
def GetSubMask(s):
    len_s = tf.shape(s)[1]
    bs = tf.shape(s)[:1]
    mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
    return mask
 def get_pos_seq(self, x):
     mask = K.cast(K.not_equal(x, 0), 'int32')
     pos = K.cumsum(K.ones_like(x, 'int32'), 1)
     return pos * mask
Ejemplo n.º 26
0
    def _build_nrms(self):

        hparams = self.hparams

        his_input_title = keras.Input(shape=(hparams.his_size,
                                             hparams.doc_size),
                                      dtype="int32")
        pred_input_title = keras.Input(shape=(hparams.npratio + 1,
                                              hparams.doc_size),
                                       dtype="int32")

        # input_ids = keras.Input(
        #     shape=(hparams.npratio + 1, hparams.doc_size), dtype="int32"
        # )
        c_input_masks = keras.Input(shape=(hparams.npratio + 1,
                                           hparams.doc_size),
                                    dtype="int32")
        c_segments = keras.Input(shape=(hparams.npratio + 1, hparams.doc_size),
                                 dtype="int32")
        c_length = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32")
        h_input_masks = keras.Input(shape=(hparams.his_size, hparams.doc_size),
                                    dtype="int32")
        h_segments = keras.Input(shape=(hparams.his_size, hparams.doc_size),
                                 dtype="int32")
        h_length = keras.Input(shape=(hparams.his_size, 1), dtype="int32")
        one_input_masks = keras.Input(shape=(1, hparams.doc_size),
                                      dtype="int32")
        one_segments = keras.Input(shape=(1, hparams.doc_size), dtype="int32")
        one_length = keras.Input(shape=(1, 1), dtype="int32")

        pred_input_title_one = keras.Input(shape=(
            1,
            hparams.doc_size,
        ),
                                           dtype="int32")
        pred_title_one_reshape = K.reshape(pred_input_title_one,
                                           (-1, hparams.doc_size))

        imp_indexes = keras.Input(shape=(1, ), dtype="int32")
        user_indexes = keras.Input(shape=(1, ), dtype="int32")
        all_his = keras.Input(shape=(1, ), dtype="int32")
        all_can = keras.Input(shape=(1, ), dtype="int32")

        embedding_layer = layers.Embedding(
            #hparams.word_size,
            #hparams.word_emb_dim,
            self.word_size,
            self.word_emb_dim,
            weights=[self.word2vec_embedding],
            trainable=True,
        )

        titleencoder = self._build_newsencoder(embedding_layer)

        newsencoder = titleencoder

        #=userencoder
        #merge_input=

        #news_present = layers.TimeDistributed(newsencoder)([pred_input_title,c_input_masks,c_segments])
        # pred_input_title_reshape=layers.Reshape((hparams.npratio + 1,hparams.doc_size,))(
        #     pred_input_title
        # )
        # c_input_masks_reshape=layers.Reshape((hparams.npratio + 1,1,hparams.doc_size))(
        #     c_input_masks
        # )
        # c_segments_reshape=layers.Reshape((hparams.npratio + 1,1,hparams.doc_size))(
        #     c_segments
        # )
        pred_input_title_reshape = K.reshape(pred_input_title,
                                             (-1, hparams.doc_size))
        print('???pred_input_title_reshape: ', pred_input_title_reshape)
        c_input_masks_reshape = K.reshape(c_input_masks,
                                          (-1, hparams.doc_size))
        c_segments_reshape = K.reshape(c_segments, (-1, hparams.doc_size))
        c_length_reshape = K.reshape(c_length, (-1, 1))

        # news_present = newsencoder([pred_input_title_reshape,c_input_masks_reshape,c_segments_reshape])
        # print(news_present)
        # news_present=layers.Reshape((hparams.npratio + 1,news_present.shape[-1]))(
        #     news_present
        # )
        # print(news_present)
        # merge_input=keras.layers.concatenate([pred_input_title_reshape,c_input_masks_reshape],axis=2)
        # print('???merge_input: ',merge_input)
        # merge_input=keras.layers.concatenate([merge_input,c_segments_reshape],axis=2)
        # print('???merge_input2: ',merge_input)
        # merge_input=keras.Input(shape=(5,hparams.doc_size,), dtype="int32")
        # print('???merge_input3: ',merge_input)

        news_present1 = newsencoder([
            pred_input_title_reshape, c_input_masks_reshape,
            c_segments_reshape, c_length_reshape
        ])
        print("???news_present1: ", news_present1)
        news_present = K.reshape(
            news_present1, (-1, hparams.npratio + 1, news_present1.shape[-1]))

        #news_present = layers.TimeDistributed(newsencoder)(merge_input)
        print("???news_present2: ", news_present)

        userencoder = self._build_userencoder(titleencoder)
        user_present = userencoder(
            [his_input_title, h_input_masks, h_segments, h_length, all_his])

        one_input_masks_reshape = K.reshape(one_input_masks,
                                            (-1, hparams.doc_size))
        one_segments_reshape = K.reshape(one_segments, (-1, hparams.doc_size))
        one_length_reshape = K.reshape(one_length, (-1, 1))

        news_present_one = newsencoder([
            pred_title_one_reshape, one_input_masks_reshape,
            one_segments_reshape, one_length_reshape
        ])

        preds = layers.Dot(axes=-1)([news_present, user_present])

        mask = K.one_hot(indices=all_can[:, 0], num_classes=K.shape(preds)[1])
        mask = 1 - K.cumsum(mask, axis=1)
        preds = preds - (1 - mask) * 1e12

        preds = layers.Activation(activation="softmax")(preds)

        pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
        pred_one = layers.Activation(activation="sigmoid")(pred_one)

        model = keras.Model([
            imp_indexes, user_indexes, his_input_title, pred_input_title,
            c_input_masks, c_segments, h_input_masks, h_segments, c_length,
            h_length, all_his, all_can
        ],
                            preds,
                            name="model")
        scorer = keras.Model([
            imp_indexes, user_indexes, his_input_title, pred_input_title_one,
            one_input_masks, one_segments, h_input_masks, h_segments,
            one_length, h_length, all_his, all_can
        ],
                             pred_one,
                             name="score")

        # self.test1=keras.Model([imp_indexes, user_indexes, his_input_title, pred_input_title,c_input_masks,c_segments,h_input_masks,h_segments], news_present, name="test")

        return model, scorer
Ejemplo n.º 27
0
    if x.get_shape().dims is None:
        return tf.shape(x)
    static = x.get_shape().as_list()
    shape = tf.shape(x)
    ret = []
    for i in range(len(static)):
        dim = static[i]
        if dim is None:
            dim = shape[i]
        ret.append(dim)
    return ret


x_val = [[1, 7, 6, 8, 0, 0], [6, 7, 5, 0, 0, 0]]
x = tf.constant(x_val)
pos = K.cumsum(K.ones_like(x, 'int32'), 1)
with tf.Session(config=tf.ConfigProto(log_device_placement=True,
                                      allow_soft_placement=False)) as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    pos_val = sess.run(pos)
    print('Result from K.cumsum (No-GPU supported) = ' + str(pos_val))

tensor_shape = shape_list(x)
print('length = ' + str(tensor_shape[1]))
pos2 = tf.add(tf.range(tensor_shape[1]), 1)
pos2 = tf.tile(pos2, [tensor_shape[0]])
pos2 = tf.reshape(pos2, tensor_shape)
with tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                      allow_soft_placement=False)) as sess:
    init = tf.global_variables_initializer()
Ejemplo n.º 28
0
def EMD_loss(y_true, y_pred, axis):
    cdf_ytrue = K.cast(K.cumsum(y_true, axis=axis), dtype=tf.float32)
    cdf_ypred = K.cumsum(y_pred, axis=axis)
    samplewise_emd = K.sqrt(
        K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=axis))
    return K.mean(samplewise_emd)
Ejemplo n.º 29
0
 def convert_intervals_to_ration_mask(interval):
     p = K.cumsum(interval, axis=1)
     rations = K.clip(p[:, :, 0] - p[:, :, 1] + interval[:, :, 1], 0, 1)
     return rations
Ejemplo n.º 30
0
def earth_movers_distance(y_true, y_pred):
    cdf_true = K.cumsum(y_true, axis=-1)
    cdf_pred = K.cumsum(y_pred, axis=-1)
    emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1))
    return K.mean(emd)