예제 #1
0
                    def _normalize_attention(attmat):
                        att = attmat[0]
                        mat = attmat[1]
                        ques_len = attmat[2]
                        if transpose:
                            att = K.permute_dimensions(att, (0, 2, 1))
                        # 3d softmax
                        e = K.exp(att - K.max(att, axis=-1, keepdims=True))
                        g = e * ques_len

                        s = K.sum(g, axis=-1, keepdims=True)
                        sm_att = g / s

                        if Flag:
                            if model_param.k_value_ans == -1:
                                threshold = 1.0 / K.sum(
                                    ques_len, axis=-1, keepdims=True)
                            else:
                                threshold = model_param.k_value_ans
                        else:
                            if model_param.k_value_ques == -1:
                                threshold = 1.0 / K.sum(
                                    ques_len, axis=-1, keepdims=True)
                            else:
                                threshold = model_param.k_value_ques

                        k_threshold_e = K.switch(
                            K.lesser_equal(sm_att, threshold), 0.0, sm_att)
                        new_s = K.clip(
                            K.sum(k_threshold_e, axis=-1, keepdims=True),
                            0.00001, 1024)
                        new_sm_att = k_threshold_e / new_s

                        return K.batch_dot(new_sm_att, mat)
예제 #2
0
 def class_loss_regr_fixed_num(y_true, y_pred):
     x = y_true[:, :, 4 * num_classes:] - y_pred
     x_abs = K.abs(x)
     x_bool = K.cast(K.lesser_equal(x_abs, 1.0), 'float32')
     return lambda_cls_regr * K.sum(
         y_true[:, :, :4 * num_classes] *
         (x_bool * (0.5 * x * x) + (1 - x_bool) *
          (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4 * num_classes])
예제 #3
0
 def rpn_loss_regr_fixed_num(y_true, y_pred):
     if K.image_dim_ordering() == 'th':
         x = y_true[:, 4 * num_anchors:, :, :] - y_pred
         x_abs = K.abs(x)
         x_bool = K.lesser_equal(x_abs, 1.0)
         return lambda_rpn_regr * K.sum(
             y_true[:, :4 * num_anchors, :, :] *
             (x_bool * (0.5 * x * x) + (1 - x_bool) *
              (x_abs - 0.5))) / K.sum(epsilon +
                                      y_true[:, :4 * num_anchors, :, :])
     else:
         x = y_true[:, :, :, 4 * num_anchors:] - y_pred
         x_abs = K.abs(x)
         x_bool = K.cast(K.lesser_equal(x_abs, 1.0), 'float32')
         return lambda_rpn_regr * K.sum(
             y_true[:, :, :, :4 * num_anchors] *
             (x_bool * (0.5 * x * x) + (1 - x_bool) *
              (x_abs - 0.5))) / K.sum(epsilon +
                                      y_true[:, :, :, :4 * num_anchors])
예제 #4
0
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		x = y_true[:, 4 * num_anchors:, :, :] - y_pred
		x_abs = K.abs(x)
		x_bool = K.lesser_equal(x_abs, 1.0)
		return lambda_rpn_regr * K.sum(
			y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / 256.
예제 #5
0
def softplus_stoch(x):
    y = softplus(x)
    shape_x = K.shape(y)
    active_bool = K.lesser_equal(K.random_uniform(shape_x),y)
    res = tf.where(active_bool, K.ones_like(y), K.zeros_like(y))
    return res
예제 #6
0
def stoch_activation_function(x):
    shape_x = K.shape(x)
    active_bool = K.lesser_equal(K.random_uniform(shape_x),x)
    res = tf.where(active_bool, K.ones_like(x), K.zeros_like(x))
    return res
def maxStat(stat):
    return kb.cast(kb.lesser_equal(stat, 1.0), "float32") * stat
예제 #8
0
	def class_loss_regr_fixed_num(y_true, y_pred):
		x = y_true[:, :, 4:] - y_pred
		x_abs = K.abs(x)
		x_bool = K.lesser_equal(x_abs, 1.0)
		return lambda_cls_regr * K.sum(y_true[:, :, :4] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / num_rois
예제 #9
0
def neighbor(y_true, y_pred, n=2):  ##### NOT WORKING #####
    ''' Trying to do what neighbor_accuracy does later '''
    return K.cast(
        K.lesser_equal(
            K.abs(K.argmax(y_pred, axis=-1) - K.argmax(y_true, axis=-1)), n),
        K.floatx())
    def get_updates(self, params, constraints, loss):
        # Note: get_updates is called *once* by keras. Its job is to return a set of 'update
        # operations' to any K.variable (e.g. model weights or self.num_games). Updates are applied
        # whenever Keras' train_function is evaluated, i.e. in every batch. Model.fit_on_batch()
        # will trigger exactly one update. All updates use the 'old' value of parameters - there is
        # no dependency on the order of the list of updates.
        self.updates = []
        # Get expressions for gradients of model parameters.
        grads = self.get_gradients(loss, params)
        # Create a set of accumulated gradients, one for each game.
        shapes = [K.get_variable_shape(p) for p in params]
        self.cumulative_gradients = [[K.zeros(shape) for shape in shapes]
                                     for _ in range(self.num_games)]

        def conditional_update(cond, variable, new_value):
            '''Helper function to create updates that only happen when cond is True. Writes to
            self.updates and returns the new variable.

            Note: K.update(x, x) is cheap, but K.update_add(x, K.zeros_like(x)) can be expensive.
            '''
            maybe_new_value = K.switch(cond, new_value, variable)
            self.updates.append(K.update(variable, maybe_new_value))
            return maybe_new_value

        # Update cumulative gradient at index game_idx. This is done by returning an update for all
        # gradients that is a no-op everywhere except for the game_idx'th one. When game_idx is
        # changed by a call to set_current_game(), it will change the gradient that is getting
        # accumulated.
        # new_cumulative_gradients keeps references to the updated variables for use below in
        # updating parameters with the freshly-accumulated gradients.
        new_cumulative_gradients = [[None] * len(cgs)
                                    for cgs in self.cumulative_gradients]
        for i, cgs in enumerate(self.cumulative_gradients):
            for j, (g, cg) in enumerate(zip(grads, cgs)):
                new_gradient = conditional_update(K.equal(self.game_idx, i),
                                                  cg, cg + g)
                new_cumulative_gradients[i][j] = new_gradient

        # Compute the net update to parameters, taking into account the sign of each cumulative
        # gradient.
        net_grads = [K.zeros_like(g) for g in grads]
        for i, cgs in enumerate(new_cumulative_gradients):
            for j, cg in enumerate(cgs):
                net_grads[j] += self.gradient_sign[i] * cg

        # Trigger a full update when all games have finished.
        self.trigger_update = K.lesser_equal(self.running_games, 0)

        # Update model parameters conditional on trigger_update.
        for p, g in zip(params, net_grads):
            new_p = p + g * self.lr
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            conditional_update(self.trigger_update, p, new_p)

        # 'reset' game counter and gradient signs when parameters are updated.
        for sign in self.gradient_sign:
            conditional_update(self.trigger_update, sign, K.variable(0))
        conditional_update(self.trigger_update, self.running_games,
                           K.variable(self.num_games))
        return self.updates