예제 #1
0
    def call(self, inputs, mask=None):
        if not isinstance(inputs, list) or len(inputs) <= 1:
            raise TypeError('SpkLifeLongMemory must be called on a list of tensors '
                            '(at least 2). Got: ' + str(inputs))
        # (None(batch), 1), index of speaker
        target_spk_l = inputs[0]
        target_spk_l = K.reshape(target_spk_l, (target_spk_l.shape[0], ))
        if K.dtype(target_spk_l) != 'int32':
            target_spk_l = K.cast(target_spk_l, 'int32')
        # (None(batch), embed_dim)
        spk_vector_l = inputs[1]
        # Start to update life-long memory based on the learned speech vector
        # First do normalization
        spk_vector_eps = K.switch(K.equal(spk_vector_l, 0.), np.spacing(1), spk_vector_l)  # avoid zero
        spk_vector_eps = K.sqrt(K.sum(spk_vector_eps**2, axis=1))
        spk_vector_eps = spk_vector_eps.dimshuffle((0, 'x'))
        spk_vector = T.true_div(spk_vector_l, K.repeat_elements(spk_vector_eps, self.vec_dim, axis=1))
        # Store speech vector into life-long memory according to the speaker identity.
        life_long_mem = T.inc_subtensor(self.life_long_mem[target_spk_l, :], spk_vector)
        # Normalization for memory
        life_long_mem_eps = K.switch(K.equal(life_long_mem, 0.), np.spacing(1), life_long_mem)  # avoid 0
        life_long_mem_eps = K.sqrt(K.sum(life_long_mem_eps**2, axis=1))
        life_long_mem_eps = life_long_mem_eps.dimshuffle((0, 'x'))
        life_long_mem = T.true_div(life_long_mem, K.repeat_elements(life_long_mem_eps, self.vec_dim, axis=1))

        # (None(batch), spk_size, embed_dim)
        return life_long_mem
예제 #2
0
def rec_L(y_true, y_pred):
	s_flow = K.variable(np.array([1,0]))
	p = K.cast(K.equal(K.argmax(s_flow, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	n = K.cast(K.not_equal(K.argmax(s_flow, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	t = K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	f = K.cast(K.not_equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	tn = t*n
	fp = f*p
	return K.sum(tn) / (K.sum(tn) + K.sum(fp))
예제 #3
0
def rec_S(y_true, y_pred):
	s_flow = K.variable(np.array([1,0]))
	p = K.cast(K.equal(K.argmax(s_flow, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	n = K.cast(K.not_equal(K.argmax(s_flow, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	t = K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	f = K.cast(K.not_equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
	tp = t*p
	fn = f*n
	return K.sum(tp) / (K.sum(tp) + K.sum(fn))
예제 #4
0
    def fn(y_true, y_pred):
        class_id_true = K.argmax(y_true, axis=-1)
        class_id_preds = K.argmax(y_pred, axis=-1)
        # Replace class_id_preds with class_id_true for recall here
        accuracy_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')
#        accuracy_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')

        class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds), 'int32') * accuracy_mask
        class_acc = K.sum(class_acc_tensor) / K.maximum(K.sum(accuracy_mask), 1)
        return class_acc
예제 #5
0
def _sample_weights(y, mask=None):
    """Compute sample weights."""
    if mask is None:
        weights = K.ones_like(y)
    else:
        weights = 1 - K.cast(K.equal(y, mask), K.floatx())
    return weights
def time_distributed_masked_max(x, m):
    """
    Computes max along the first (time) dimension.

    In:
        x - input; a 3D tensor
        m - mask
        m_value - value for masking
    """
    # place infinities where mask is off
    m_value = 0.0
    tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
    x_with_inf = x + K.expand_dims(tmp)
    x_max = K.max(x_with_inf, axis=1) 
    r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
    return r 
def recall_loss(y_true, y_pred):
    '''
    input: y_true (theano Tensor), y_pred (theano Tensor)
    output: recall_loss (float)
    '''
    # print(K.ndim(y_true), K.ndim(y_pred))
    return -np.log(K.mean(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1))))
예제 #8
0
def _loss_tensor(y_true, y_pred):
    max_val = K.max(y_pred,axis=-2) #temporal axis!
    max_val = K.repeat(max_val,K.shape(y_pred)[-2])
    print(K.eval(max_val))
    mask = K.cast(K.equal(max_val,y_pred),K.floatx())
    y_pred = mask * y_pred + (1-mask) * y_true
    return squared_hinge(y_true,y_pred)
예제 #9
0
    def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
예제 #10
0
    def build_model(self, p):
        S = Input(p['input_shape'], name='input_state')
        A = Input((1,), name='input_action', dtype='int32')
        R = Input((1,), name='input_reward')
        T = Input((1,), name='input_terminate', dtype='int32')
        NS = Input(p['input_shape'], name='input_next_sate')

        self.Q_model = self.build_cnn_model(p)
        self.Q_old_model = self.build_cnn_model(p, False)  # Q hat in paper
        self.Q_old_model.set_weights(self.Q_model.get_weights())  # Q' = Q

        Q_S = self.Q_model(S)  # batch * actions
        Q_NS = disconnected_grad(self.Q_old_model(NS))  # disconnected gradient is not necessary

        y = R + p['discount'] * (1-T) * K.max(Q_NS, axis=1, keepdims=True)  # batch * 1

        action_mask = K.equal(Tht.arange(p['num_actions']).reshape((1, -1)), A.reshape((-1, 1)))
        output = K.sum(Q_S * action_mask, axis=1).reshape((-1, 1))
        loss = K.sum((output - y) ** 2)  # sum could also be mean()

        optimizer = adam(p['learning_rate'])
        params = self.Q_model.trainable_weights
        update = optimizer.get_updates(params, [], loss)

        self.training_func = K.function([S, A, R, T, NS], loss, updates=update)
        self.Q_func = K.function([S], Q_S)
예제 #11
0
파일: base.py 프로젝트: EderSantana/seya
 def get_output(self, train=False):
     X = self.get_input(train)
     if train:
         M = K.max(X, axis=(2, 3), keepdims=True)
         R = K.switch(K.equal(X, M), X, 0.)
         return R
     else:
         return X
예제 #12
0
 def _get_anchor_positive_triplet_mask(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
     # mask label(a) != label(p)
     mask1 = K.equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1))
     mask1 = K.cast(mask1, K.dtype(pairwise_dist))
     # mask a == p
     mask2 = K.not_equal(pairwise_dist, 0.0)
     mask2 = K.cast(mask2, K.dtype(pairwise_dist))
     return mask1 * mask2
예제 #13
0
def cat_acc(y, z):
    """Compute categorical accuracy given one-hot matrices."""
    weights = _cat_sample_weights(y)
    _acc = K.cast(K.equal(K.argmax(y, axis=-1),
                          K.argmax(z, axis=-1)),
                  K.floatx())
    _acc = K.sum(_acc * weights) / K.sum(weights)
    return _acc
예제 #14
0
def w_categorical_crossentropyold(y_true, y_pred, weights):
    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=0)
    y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
    y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max), K.floatx())
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):
        final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
    return K.categorical_crossentropy(y_pred, y_true) * final_mask
예제 #15
0
def squeezed_accuracy(y_true, y_pred):

        class_id_true = K.argmax(K.squeeze(y_true,axis=0), axis=-1)
        class_id_preds = K.argmax(K.squeeze(y_pred,axis=0), axis=-1)
        # Replace class_id_preds with class_id_true for recall here
#        accuracy_mask = K.cast(K.equal(class_id_preds, interesting_class_id), 'int32')
#        class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds), 'int32')
        class_acc = K.mean(K.equal(class_id_true, class_id_preds))
        return class_acc
예제 #16
0
 def call(self, x, mask=None):
     # x[0]: (batch_size, input_length, input_dim)
     # x[1]: (batch_size, 1) indices of prepositions
     # Optional: x[2]: (batch_size, input_length - 2)
     assert isinstance(x, list) or isinstance(x, tuple)
     encoded_sentence = x[0]
     prep_indices = K.squeeze(x[1], axis=-1)  #(batch_size,)
     batch_indices = K.arange(K.shape(encoded_sentence)[0])  # (batch_size,)
     if self.with_attachment_probs:
         # We're essentially doing K.argmax(x[2]) here, but argmax is not differentiable!
         head_probs = x[2]
         head_probs_padding = K.zeros_like(x[2])[:, :2]  # (batch_size, 2)
         # (batch_size, input_length)
         padded_head_probs = K.concatenate([head_probs, head_probs_padding])
         # (batch_size, 1)
         max_head_probs = K.expand_dims(K.max(padded_head_probs, axis=1))
         # (batch_size, input_length, 1)
         max_head_prob_indices = K.expand_dims(K.equal(padded_head_probs, max_head_probs))
         # (batch_size, input_length, input_dim)
         masked_head_encoding = K.switch(max_head_prob_indices, encoded_sentence, K.zeros_like(encoded_sentence))
         # (batch_size, input_dim)
         head_encoding = K.sum(masked_head_encoding, axis=1)
     else:
         head_indices = prep_indices - 1  # (batch_size,)
         head_encoding = encoded_sentence[batch_indices, head_indices, :]  # (batch_size, input_dim)
     prep_encoding = encoded_sentence[batch_indices, prep_indices, :]  # (batch_size, input_dim)
     child_encoding = encoded_sentence[batch_indices, prep_indices+1, :]  # (batch_size, input_dim)
     '''
     prep_indices = x[1]
     sentence_mask = mask[0]
     if sentence_mask is not None:
         if K.ndim(sentence_mask) > 2:
             # This means this layer came after a Bidirectional layer. Keras has this bug which
             # concatenates input masks instead of output masks.
             # TODO: Fix Bidirectional instead.
             sentence_mask = K.any(sentence_mask, axis=(-2, -1))
     head_encoding, prep_encoding, child_encoding = self.get_split_averages(encoded_sentence, sentence_mask,
                                                                            prep_indices)
     '''
     head_projection = K.dot(head_encoding, self.proj_head)  # (batch_size, proj_dim)
     prep_projection = K.dot(prep_encoding, self.proj_prep)  # (batch_size, proj_dim)
     child_projection = K.dot(child_encoding, self.proj_child)  # (batch_size, proj_dim)
     #(batch_size, proj_dim)
     if self.composition_type == 'HPCT':
         composed_projection = K.tanh(head_projection + prep_projection + child_projection)
     elif self.composition_type == 'HPC':
         prep_child_projection = K.tanh(prep_projection + child_projection)  # (batch_size, proj_dim)
         composed_projection = K.tanh(head_projection + prep_child_projection)
     else:
         # Composition type in HC
         composed_projection = K.tanh(head_projection + child_projection)
     for hidden_layer in self.hidden_layers:
         composed_projection = K.tanh(K.dot(composed_projection, hidden_layer))  # (batch_size, proj_dim)
     # (batch_size, num_classes)
     class_scores = K.dot(composed_projection, self.scorer)
     label_probabilities = K.softmax(class_scores)
     return label_probabilities
def masked_categorical_accuracy(y_true, y_pred, mask):

    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)

    error = K.equal(y_true, y_pred)

    mask_template = T.and_(T.neq(y_true,  mask), T.neq(y_true, 0)).nonzero()

    return K.mean(error[mask_template])
예제 #18
0
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))
예제 #19
0
 def call(self, x, mask=None):
     if mask is not None:
         mask = K.cast(mask, K.floatx())
         mask = K.expand_dims(mask, axis=-1)
         s = K.sum(mask, axis=1)
         if K.equal(s, K.zeros_like(s)) is None:
             return K.mean(x, axis=1)
         else:
             return K.cast(K.sum(x * mask, axis=1) / K.sqrt(s), K.floatx())
     else:
         return K.sum(x, axis=1)/K.sqrt(len(x))
예제 #20
0
def one_hot(x):
    '''
    Sparse-ifies 3-dimensional tensor by making the largest value 1 and the rest 0.
    Aka, make one hot.

    Args: 
        x (3d theano tensor)

    Returns:
       3d theano tensor 
    '''
    return K.cast(K.equal(K.max(x, axis=-1, keepdims=True), x), K.floatx())
예제 #21
0
 def call(self, x, mask=None):
     if mask is not None:
         mask = K.cast(mask, K.floatx())
         mask = K.expand_dims(mask, axis=-1)
         s = K.sum(mask, axis=1)
         if K.equal(s, K.zeros_like(s)) is None:
             return K.mean(x, axis=1)
         else:
             return K.cast(K.sum(x * mask, axis=1) / (K.sqrt(s) + K.constant(1e-10, dtype=K.floatx())), K.floatx())
     else:
         print (x)
         return K.mean(x, axis=1)
예제 #22
0
def contingency_table(y, z):
    """Compute contingency table."""
    y = K.round(y)
    z = K.round(z)

    def count_matches(a, b):
        tmp = K.concatenate([a, b])
        return K.sum(K.cast(K.all(tmp, -1), K.floatx()))

    ones = K.ones_like(y)
    zeros = K.zeros_like(y)
    y_ones = K.equal(y, ones)
    y_zeros = K.equal(y, zeros)
    z_ones = K.equal(z, ones)
    z_zeros = K.equal(z, zeros)

    tp = count_matches(y_ones, z_ones)
    tn = count_matches(y_zeros, z_zeros)
    fp = count_matches(y_zeros, z_ones)
    fn = count_matches(y_ones, z_zeros)

    return (tp, tn, fp, fn)
예제 #23
0
 def loss(y_true, y_pred):
     from plasma.conf import conf
     fac = MaxHingeTarget.fac
     #overall_fac = np.prod(np.array(K.shape(y_pred)[1:]).astype(np.float32))
     overall_fac = K.prod(K.cast(K.shape(y_pred)[1:],K.floatx()))
     max_val = K.max(y_pred,axis=-2) #temporal axis!
     max_val1 = K.repeat(max_val,K.shape(y_pred)[-2])
     mask = K.cast(K.equal(max_val1,y_pred),K.floatx())
     y_pred1 = mask * y_pred + (1-mask) * y_true
     weight_mask = K.mean(y_true,axis=-1)
     weight_mask = K.cast(K.greater(weight_mask,0.0),K.floatx()) #positive label!
     weight_mask = fac*weight_mask + (1 - weight_mask)
     #return weight_mask*squared_hinge(y_true,y_pred1)
     return conf['model']['loss_scale_factor']*overall_fac*weight_mask*hinge(y_true,y_pred1)
예제 #24
0
 def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
     emb_c, emb_r = inputs
     bs = K.shape(emb_c)[0]
     embeddings = K.concatenate([emb_c, emb_r], 0)
     dot_product = K.dot(embeddings, K.transpose(embeddings))
     square_norm = K.batch_dot(embeddings, embeddings, axes=1)
     distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
     distances = K.slice(distances, (0, bs), (bs, bs))
     distances = K.clip(distances, 0.0, None)
     mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
     distances = distances + mask * 1e-16
     distances = K.sqrt(distances)
     distances = distances * (1.0 - mask)
     return distances
예제 #25
0
파일: cwrnn.py 프로젝트: braingineer/ikelos
    def step(self, x, states):
        prev_output = states[0]
        time_step = states[1]
        B_U = states[2]
        B_W = states[3]
        period = states[4]

        if self.consume_less == 'cpu':
            h = x
        else:
            h = K.dot(x * B_W, self.W) + self.b

        output = self.activation(h + K.dot(prev_output * B_U, self.U))
        output = K.switch(K.equal(time_step % period, 0.), output, prev_output)
        return output, [output, time_step+1]
예제 #26
0
    def step(self, x, states):
        # assert len(states) == 3
        h_tm1 = states[0]
        t = states[1]
        p_tm1 = states[2]
        
        x_t = K.dot(x, self.xh) + self.b

        p = x_t + K.dot(h_tm1, self.hh * self.mask)

        p_t = K.switch(K.equal(t[0] % self.period, 0), p, p_tm1)
        
        h = self.activation(p_t)

        return h, [h, t+1, p_t]
예제 #27
0
def binary_accuracy_with_threshold(y_true, y_pred, y_threshold):
    '''
    :param y_true: binary tensor of shape nb_samples, input_dim or nb_samples, time_steps, input_dim
    :param y_pred: prediction tensor of the same shape of y_true
    :param y_threshold: threshold tensor, only if the y_pred is no less than the threshold, predict 1; otherwise 0
    :return: binary accuracy, a scalar
    '''
    ndim_threshold = K.ndim(y_threshold) #ndim_threshold = 0 or 1
    if ndim_threshold == 1: # expand dims to match the shape of y_true
        ndim_y =  K.ndim(y_true) # ndim_y = 2  nb_samples, input_dim; or 3:
        if ndim_y == 2:
            y_threshold = K.expand_dims(y_threshold,0)
        elif ndim_y == 3:
            y_threshold = K.expand_dims(y_threshold, 0)
            y_threshold = K.expand_dims(y_threshold, 0)
    y_pred =greater_equal( y_pred , y_threshold)
    return K.mean(K.equal(y_true, y_pred))
예제 #28
0
        def __call__(self, y_true, y_pred):
            """Computes the number of true positives in a batch.

            # Arguments
                y_true: Tensor, batch_wise labels
                y_pred: Tensor, batch_wise predictions

            # Returns
                The total number of true positives seen this epoch at the
                    completion of the batch.
            """
            y_true = K.cast(y_true, 'int32')
            y_pred = K.cast(K.round(y_pred), 'int32')
            correct_preds = K.cast(K.equal(y_pred, y_true), 'int32')
            true_pos = K.cast(K.sum(correct_preds * y_true), 'int32')
            current_true_pos = self.true_positives * 1
            self.add_update(K.update_add(self.true_positives,
                                         true_pos),
                            inputs=[y_true, y_pred])
            return current_true_pos + true_pos
예제 #29
0
        def _dream_step(x, states):
            # input + states
            assert len(states) == 2*self.depth + 1
            x = states[-1]
            x = K.switch(K.equal(x, K.max(x, axis=-1,
                                          keepdims=True)), 1., 0.)
            states = states[:-1]

            h = []
            for i, (h_tm1, c_tm1) in enumerate(zip(states[:-1:2], states[1::2])):
                x, new_states = self.lstms[i].step(x, [h_tm1, c_tm1])
                h.extend(new_states)

            if self.readout:
                h += [self.readout_layer(h[-2])]
                final = h[-1]
            else:
                h += [h[-2]]
                final = h[-2]

            return final, h
예제 #30
0
def binary_accuracy(output_true, output_pred):
    return K.mean(K.equal(output_true, K.round(output_pred)), axis=-1)
예제 #31
0
def accw(y_true, y_pred):
    y_pred = K.clip(y_pred, -1, 1)
    return K.mean(K.equal(y_true, K.round(y_pred)))
예제 #32
0
 def accuracy_round(y_true, y_pred):
     y_true = (y_true + 1) / 2.0
     y_pred = (y_pred + 1) / 2.0
     equal = K.cast(K.equal(K.round(y_true), K.round(y_pred)), K.floatx())
     acc = K.sum(equal) / K.cast(tf.size(equal), K.floatx())
     return acc
예제 #33
0
def binary_accuracy_with_weights(y_true, y_pred):
    return real_sampled_mean(
        K.cast(K.equal(y_true[..., 0], K.round(y_pred[..., 0])), 'float32'),
        y_true[..., 1])
예제 #34
0
def binary_accuracy(y_true, y_pred):
    """
    Calculates the mean accuracy rate across all predictions for binary
    classification problems.
    """
    return K.mean(K.equal(y_true, K.round(y_pred)))
예제 #35
0
def binary_accuracy_with_weights_mt(y_true, y_pred, bin_size):
    return real_sampled_mean(
        K.cast(
            K.equal(y_true[..., 0:bin_size], K.round(y_pred[..., 0:bin_size])),
            'float32'), y_true[..., bin_size:])
def class_accuracy(class_label, y_true, y_pred):
    y_pred = normalize_y_pred(y_pred)
    return K.cast(K.equal(y_true[:, class_label], y_pred[:, class_label]),
                  K.floatx())
def true_positive(y_true, y_pred):
    y_pred = normalize_y_pred(y_pred)
    return K.cast(K.equal(y_true + y_pred, 2),
                  K.floatx())
예제 #38
0
def _tp(y_true, y_pred, typecast='float32'):
    good_preds = K.cast(K.equal(y_true, y_pred), typecast)
    true_pos = K.cast(K.sum(good_preds * y_true), typecast)
    return true_pos
예제 #39
0
def _tn(y_true, y_pred, typecast='float32'):
    good_preds = K.cast(K.equal(y_true, y_pred), typecast)
    true_neg = K.cast(K.sum(good_preds * K.cast(K.equal(y_true, 0), typecast)),
                      typecast)
    return true_neg
예제 #40
0
파일: loss.py 프로젝트: moomou/mlab
    def _binary_accuracy(y_true, y_pred):
        true = y_true == 1
        pred = y_pred >= dist

        return K.mean(K.equal(true, pred))
예제 #41
0
def acc2(y_true, y_pred):
    return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
def class_true_positive(class_label, y_true, y_pred):
    y_pred = normalize_y_pred(y_pred)
    return K.cast(K.equal(y_true[:, class_label] + y_pred[:, class_label], 2), K.floatx())
예제 #43
0
def get_categorical_accuracy_keras(y_true, y_pred):
    return K.mean(K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)))
예제 #44
0
 def accuracy(y_true, y_pred):
     equal = K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
     acc = K.sum(equal) / K.cast(tf.size(equal), K.floatx())
     return acc
예제 #45
0
def binary_accuracy_positive_with_weights(y_true, y_pred):
    # we do not use real_sampled_mean, because when y_true[..., 0] == 1.0, then y_true[..., 1] is never == 0
    pos = y_true[..., 0]
    return K.sum(
        K.cast(K.equal(y_true[..., 0], K.round(y_pred[..., 0])), K.floatx()) *
        pos) / K.maximum(K.sum(pos), 0.00001)
예제 #46
0
 def call(self, x, mask=None):
     R = T.reshape(x,(T.shape(x)[0],T.shape(x)[1]/self.OneOnX,self.OneOnX))
     M = K.max(R, axis=(2), keepdims=True)
     R = K.switch(K.equal(R, M), R, 0.)
     R = T.reshape(R,(T.shape(x)[0],T.shape(x)[1]))
     return R
예제 #47
0
def my_accuracy(y_true, y_pred):
    return K.mean(2 * K.abs(y_true - 0.5) * K.equal(y_true, K.round(y_pred)),
                  axis=-1)
예제 #48
0
def real_sampled_mean(item, weights):
    real_samples = 1.0 - K.cast(K.equal(weights, 0.0), 'float32')
    divisor = K.sum(real_samples)
    return K.switch(K.equal(divisor, 0.0), 0.0,
                    K.sum(item * real_samples) / divisor)
예제 #49
0
def accuracy(y_true, y_pred):
    num = K.sum(K.cast(K.equal(K.argmax(y_true[:,:,:,1:], axis=-1), K.argmax(y_pred[:,:,:,1:], axis=-1)), dtype="float32") * y_true[:,:,:,0])
    denom = K.sum(y_true[:,:,:,0])
    return num / (denom + 1) # make sure we don't get divide by zero
예제 #50
0
파일: utils.py 프로젝트: pigtamer/unet-ki67
 def focal_loss_fixed(y_true, y_pred):
     pt_1 = tf.where(K.equal(y_true, 1), y_pred, K.ones_like(y_pred))
     pt_0 = tf.where(K.equal(y_true, 0), y_pred, K.zeros_like(y_pred))
     return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean(
         (1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
예제 #51
0
 def call(self, x):
     return K.cast(K.equal(x, self.select_neuron), dtype="float32")
예제 #52
0
def age_group_accuracy(y_true, y_pred):
    array = np.array([0] * 13 + [1] * 2 + [2] * 10000000)
    age_to_group = K.variable(value=array, dtype='int32', name='age_to_group')
    ages_true = tf.gather(age_to_group, tf.cast(tf.rint(y_true), tf.int32))
    ages_pred = tf.gather(age_to_group, tf.cast(tf.rint(y_pred), tf.int32))
    return K.mean(K.equal(ages_true, ages_pred), axis=-1)
예제 #53
0
    def call(self, inputs, mask=None):
        """
        Calculate the probability of each answer option.

        Parameters
        ----------
        inputs: List of Tensors
            The inputs to the layer must be passed in as a list to the
            ``call`` function. The inputs expected are a Tensor of
            document indices, a Tensor of document probabilities, and
            a Tensor of options (in that order).
            The documents indices tensor is a 2D tensor of shape
            (batch size, document_length).
            The document probabilities tensor is a 2D Tensor of shape
            (batch size, document_length).
            The options tensor is of shape (batch size, num_options,
            option_length).
        mask: Tensor or None, optional (default=None)
            Tensor of shape (batch size, max number of options) representing
            which options are padding and thus have a 0 in the associated
            mask position.

        Returns
        -------
        options_probabilities : Tensor
            Tensor with shape (batch size, max number of options) with floats,
            where each float is the normalized probability of the option as
            calculated based on ``self.multiword_option_mode``.
        """
        document_indices, document_probabilities, options = inputs
        # This takes `document_indices` from (batch_size, document_length) to
        # (batch_size, num_options, option_length, document_length), with the
        # original indices repeated, so that we can create a mask indicating
        # which options are used in the probability computation. We do the
        # same thing for `document_probababilities` to select the probability
        # values corresponding to the words in the options.
        expanded_indices = K.expand_dims(K.expand_dims(document_indices, 1), 1)
        tiled_indices = K.repeat_elements(K.repeat_elements(
            expanded_indices, K.int_shape(options)[1], axis=1),
                                          K.int_shape(options)[2],
                                          axis=2)

        expanded_probabilities = K.expand_dims(
            K.expand_dims(document_probabilities, 1), 1)
        tiled_probabilities = K.repeat_elements(K.repeat_elements(
            expanded_probabilities, K.int_shape(options)[1], axis=1),
                                                K.int_shape(options)[2],
                                                axis=2)

        expanded_options = K.expand_dims(options, 3)
        tiled_options = K.repeat_elements(expanded_options,
                                          K.int_shape(document_indices)[-1],
                                          axis=3)

        # This generates a binary tensor of the same shape as tiled_options /
        # tiled_indices that indicates if index is option or padding.
        options_words_mask = K.cast(K.equal(tiled_options, tiled_indices),
                                    "float32")

        # This applies a mask to the probabilities to select the
        # indices for probabilities that correspond with option words.
        selected_probabilities = options_words_mask * tiled_probabilities

        # This sums up the probabilities to get the aggregate probability for
        # each option's constituent words.
        options_word_probabilities = K.sum(selected_probabilities, axis=3)

        sum_option_words_probabilities = K.sum(options_word_probabilities,
                                               axis=2)

        if self.multiword_option_mode == "mean":
            # This block figures out how many words (excluding
            # padding) are in each option.
            # Here we generate the mask on the input option.
            option_mask = K.cast(K.not_equal(options, K.zeros_like(options)),
                                 "float32")
            # This tensor stores the number words in each option.
            divisor = K.sum(option_mask, axis=2)
            # If the divisor is zero at a position, we add epsilon to it.
            is_zero_divisor = K.equal(divisor, K.zeros_like(divisor))
            divisor = switch(is_zero_divisor,
                             K.ones_like(divisor) * K.epsilon(), divisor)
        else:
            # Since we're taking the sum, we divide all sums by 1.
            divisor = K.ones_like(sum_option_words_probabilities)

        # Now we divide the sums by the divisor we generated above.
        option_probabilities = sum_option_words_probabilities / divisor
        return option_probabilities
예제 #54
0
def new_binary_accuracy(y_true,y_pred):
    #return K.mean(K.equal(y_true[:,:,:,1:], K.round(y_pred)), axis=-1)
    return K.mean(K.equal(y_true[:,:,:,1:], K.round(y_pred)), axis=-1)
예제 #55
0
from keras import backend as K
예제 #56
0
def soft_acc(y_true, y_pred):
    return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
예제 #57
0
def accuracy(y_true, y_pred):
	'''Compute classification accuracy with a fixed threshold on distances.
	'''
	return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
예제 #58
0
def percision_bacth(y_true, y_pred):
    return K.mean(K.cast(K.equal(y_pred, 0), "float32"))


#def serialize(rank_loss):
#    return rank_loss.__name__
예제 #59
0
def _wta(X):
    M = K.max(X, axis=-1, keepdims=True)
    R = K.switch(K.equal(X, M), X, 0.)
    return R
예제 #60
0
def accuracy(y_true, y_pred, threshold=0.5, eps=EPS):
    y_true, y_pred = _sanitize(y_true, y_pred, threshold=threshold)
    return K.mean(K.equal(y_true, y_pred))