def is_finished(self, step_idx, source_length, alive_log_probs, finished_scores, finished_in_finished):
        """
            is_finished
        """
        base_1 = layers.cast(source_length, 'float32') + 55.0
        base_1 /= 6.0
        max_length_penalty = layers.pow(base_1, self.alpha)

        flat_alive_log_probs = layers.reshape(alive_log_probs, [-1])
        lower_bound_alive_scores_1 = layers.gather(flat_alive_log_probs, [self.get_alive_index])
        
        lower_bound_alive_scores = lower_bound_alive_scores_1 / max_length_penalty
        
        lowest_score_of_finished_in_finish = layers.reduce_min(finished_scores * finished_in_finished, dim=1)

        finished_in_finished = layers.cast(finished_in_finished, 'bool')
        lowest_score_of_finished_in_finish += \
                        ((1.0 - layers.cast(layers.reduce_any(finished_in_finished, 1), 'float32')) * -INF)
        
        #print lowest_score_of_finished_in_finish
        bound_is_met = layers.reduce_all(layers.greater_than(lowest_score_of_finished_in_finish, 
                                                             lower_bound_alive_scores))

        decode_length = source_length + 50
        length_cond = layers.less_than(x=step_idx, y=decode_length)

        return layers.logical_and(x=layers.logical_not(bound_is_met), y=length_cond)
Esempio n. 2
0
def _push_to_stack(gmr_desc, gmr_pos, gmr_lens, gmr_stack_info):
    """push grammar id in gmr_desc from gmr_pos to gmr_lens to
    gmr_stack. and update step_gmr_pos

    Args:
        gmr_desc (TYPE): NULL
        gmr_pos (TYPE): NULL
        gmr_lens (TYPE): NULL
        gmr_stack_info (tuple): [in/out] (gmr_stack, gmr_stack_pos)

    Returns: tuple (gmr_stack, gmr_stack_pos)

    Raises: NULL
    """
    gmr_stack, gmr_stack_pos = gmr_stack_info
    mv_step = layers.cast(layers.greater_than(gmr_lens,
                                              layers.zeros_like(gmr_lens)),
                          dtype=gmr_lens.dtype)
    gmr_mv_pos = layers.elementwise_sub(gmr_lens, mv_step)

    cond = layers.reduce_any(layers.greater_than(gmr_mv_pos, gmr_pos))
    while_op = layers.While(cond)
    with while_op.block():
        gmr_ids = nn_utils.batch_gather(gmr_desc, gmr_mv_pos)
        gmr_stack_tmp, gmr_stack_pos_tmp = data_structure.Stack.push(
            gmr_stack_info, gmr_ids, in_place=False)

        mv_cond = layers.greater_than(gmr_mv_pos, gmr_pos)
        gmr_mv_pos_tmp = fluider.elementwise_sub(gmr_mv_pos,
                                                 mv_cond,
                                                 force=True)
        new_gmr_stack, new_gmr_stack_pos = nn_utils.ifelse(
            mv_cond, [gmr_stack_tmp, gmr_stack_pos_tmp],
            [gmr_stack, gmr_stack_pos])
        layers.utils.map_structure(layers.assign,
                                   [new_gmr_stack, new_gmr_stack_pos],
                                   [gmr_stack, gmr_stack_pos])
        layers.assign(gmr_mv_pos_tmp, gmr_mv_pos)
        layers.assign(
            layers.reduce_any(layers.greater_than(gmr_mv_pos, gmr_pos)), cond)
    return gmr_stack, gmr_stack_pos
Esempio n. 3
0
    def forward(self, pred, target):
        target = 1 - target[:, 0]
        batch_size, vector_size = pred.shape[0], pred.shape[1]

        pred = L.l2_normalize(pred, axis=1, epsilon=1e-10)

        square_norm = L.reduce_sum(L.square(pred), dim=1)
        dist = L.elementwise_add(-2.0 * L.matmul(pred, pred, transpose_y=True),
                                 square_norm,
                                 axis=0)
        dist = L.elementwise_add(dist, square_norm, axis=1)
        dist = L.elementwise_max(dist, L.zeros_like(dist))
        dist = L.sqrt(dist)

        ap_dist = L.reshape(dist, (0, 0, 1))
        an_dist = L.reshape(dist, (0, 1, -1))

        loss = L.expand(ap_dist, (1, 1, batch_size)) - L.expand(
            an_dist, (1, batch_size, 1)) + self.magin

        indice_equal = L.diag(
            L.fill_constant((batch_size, ), dtype='float32', value=1.0))
        indice_not_equal = 1.0 - indice_equal

        broad_matrix = L.expand(L.reshape(target, (-1, 1)),
                                (1, batch_size)) + L.expand(
                                    L.reshape(target, (1, -1)),
                                    (batch_size, 1))

        pp = L.cast(L.equal(broad_matrix, L.zeros_like(broad_matrix)),
                    dtype='float32')
        pp = L.reshape(indice_not_equal * pp, (0, 0, 1))

        pn = L.cast(L.equal(broad_matrix,
                            L.zeros_like(broad_matrix) + 1),
                    dtype='float32')
        pn = L.reshape(indice_not_equal * pn, (1, 0, -1))

        apn = L.expand(pp,
                       (1, 1, batch_size)) * L.expand(pn, (batch_size, 1, 1))

        loss = loss * L.cast(apn, dtype='float32')
        loss = L.elementwise_max(loss, L.zeros_like(loss))

        num_tri = L.reduce_sum(
            L.cast(L.greater_than(loss, L.zeros_like(loss)), dtype='float32'))

        loss = L.reduce_sum(loss) * self.loss_weight / (num_tri + 1e-16)

        return loss
Esempio n. 4
0
    def pairwise_hinge(self):
        """pairwise model"""
        poi_repr = L.split(self.poi_repr, 2, dim=0)
        pos_repr, neg_repr = poi_repr
        pos_pred = L.cos_sim(self.query_repr, pos_repr)
        neg_pred = L.cos_sim(self.query_repr, neg_repr)

        mode = 'hinge_loss'
        # log(1 + e-z), max(0, 1 - z)
        if 'hinge_loss' == mode:
            theta_z = L.relu(1 + neg_pred - pos_pred)
        elif 'logistic_loss' == mode:
            theta_z = L.log(1 + L.exp(neg_pred - pos_pred))
        self.loss = L.reduce_mean(theta_z)
        pos_cnt = L.reduce_sum(L.cast(L.greater_than(pos_pred, neg_pred), dtype="float32"))
        neg_cnt = L.reduce_sum(L.cast(L.less_than(pos_pred, neg_pred), dtype="float32"))
        self.order = pos_cnt / (1e-5 + neg_cnt)
        self.metrics = [self.loss, self.order]
Esempio n. 5
0
 def pairwise_loss(self):
     """pairwise model"""
     # TODO: for neg_num neg poi, split num should be (neg_num + 1) on dim 0
     poi_repr = L.split(self.poi_repr, [1 * self.batch_size, self.neg_num * self.batch_size], dim=0)
     pos_repr, neg_repr = poi_repr
     # size [-1 x emb_size]
     # size [-1*n x emb_size]
     prefix_expand = L.reshape(L.expand(self.query_repr, [1, self.neg_num]), [-1, self.hidden_size])
     # size [-1*n x 1]
     neg_pred_n = self.safe_cosine_sim(neg_repr, prefix_expand)
     # size [-1 x 1]
     pos_pred = self.safe_cosine_sim(pos_repr, self.query_repr)
     cost = self.loss_neg_log_of_pos(pos_pred, L.reshape(neg_pred_n, [-1, self.neg_num]), 15)
     self.loss = L.mean(x=cost)
     # size [-1 x 1]
     neg_avg = L.reduce_mean(L.reshape(neg_pred_n, [-1, self.neg_num]), dim=1, keep_dim=True)
     pos_cnt = L.reduce_sum(L.cast(L.greater_than(pos_pred, neg_avg), dtype="float32"))
     neg_cnt = L.reduce_sum(L.cast(L.less_than(pos_pred, neg_avg), dtype="float32"))
     # equal to positive and negative order
     self.order = pos_cnt / (1e-5 + neg_cnt)
     self.metrics = [self.loss, self.order]
Esempio n. 6
0
        def early_finish(alive_log_probs, finished_scores,
                         finished_in_finished):
            max_length_penalty = np.power(((5. + max_len) / 6.), alpha)
            # The best possible score of the most likely alive sequence
            lower_bound_alive_scores = alive_log_probs[:,
                                                       0] / max_length_penalty

            # Now to compute the lowest score of a finished sequence in finished
            # If the sequence isn't finished, we multiply it's score by 0. since
            # scores are all -ve, taking the min will give us the score of the lowest
            # finished item.
            lowest_score_of_fininshed_in_finished = layers.reduce_min(
                finished_scores * finished_in_finished, 1)
            # If none of the sequences have finished, then the min will be 0 and
            # we have to replace it by -ve INF if it is. The score of any seq in alive
            # will be much higher than -ve INF and the termination condition will not
            # be met.
            lowest_score_of_fininshed_in_finished += (
                1. - layers.reduce_max(finished_in_finished, 1)) * -inf
            bound_is_met = layers.reduce_all(
                layers.greater_than(lowest_score_of_fininshed_in_finished,
                                    lower_bound_alive_scores))

            return bound_is_met
Esempio n. 7
0
def get_mask(seq, padding_idx=0):
    pix = layers.unsqueeze(layers.ones_like(seq) * padding_idx, axes=2)
    mask = layers.cast(layers.greater_than(layers.unsqueeze(seq, axes=2), pix), 'float32')
    return mask