Esempio n. 1
0
 def compute_mask(self, inputs, mask=None):
     if len(inputs[1].shape) == 3:
         output_mask = K.any(K.not_equal(inputs[1], self.mask_value),
                             axis=-1)
     else:
         output_mask = K.not_equal(inputs[1], self.mask_value)
     return output_mask
Esempio n. 2
0
    def __init__(self, src, trg=None, pad=0):
        self.src = src
        self.src_mask = K.expand_dims(K.not_equal(src, pad), axis=-2)

        if trg is not None:
            self.trg = trg[:, :-1]  # without last token of sentence
            self.trg_y = trg[:, 1:]  # without first token of sentence
            self.trg_mask = self.make_std_mask(self.trg, pad)
            self.ntokens = K.sum(
                K.cast(K.not_equal(self.trg_y, pad), dtype='uint8'))
Esempio n. 3
0
    def __call__(self, y_true, y_pred, sample_weight=None):
        """ Automatically calculates the weight of 0-1 activations using the number of
        activations in y_true.
        """
        num_ones = K.sum(y_true)
        num_zeros = K.sum(K.cast_to_floatx(K.not_equal(y_true, 0.0)))

        sw = K.cast_to_floatx(K.equal(y_true, 0.0)) * (num_ones / num_zeros)
        sw = math_ops.add(sw, K.cast_to_floatx(K.not_equal(y_true, 0.0)) * 1)
        return super().__call__(y_true, y_pred, sample_weight=sw)
Esempio n. 4
0
    def call(self, inputs):
        if len(inputs[1].shape) == 3:
            boolean_mask = K.any(K.not_equal(inputs[1], self.mask_value),
                                 axis=-1,
                                 keepdims=True)
        else:
            boolean_mask = K.expand_dims(
                K.not_equal(inputs[1], self.mask_value))

        return inputs[0] * K.cast(boolean_mask, K.dtype(inputs[0]))
 def _get_triplet_mask(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
     # mask label(a) != label(p)
     mask1 = K.expand_dims(K.equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)), 2)
     mask1 = K.cast(mask1, K.dtype(pairwise_dist))
     # mask a == p
     mask2 = K.expand_dims(K.not_equal(pairwise_dist, 0.0), 2)
     mask2 = K.cast(mask2, K.dtype(pairwise_dist))
     # mask label(n) == label(a)
     mask3 = K.expand_dims(K.not_equal(K.expand_dims(y_true, 0), K.expand_dims(y_true, 1)), 1)
     mask3 = K.cast(mask3, K.dtype(pairwise_dist))
     return mask1 * mask2 * mask3
Esempio n. 6
0
def exact_matched_accuracy(y_true, y_pred, mask_id):
    true_ids = bk.argmax(y_true, axis=-1)
    pred_ids = bk.argmax(y_pred, axis=-1)

    maskBool = bk.not_equal(true_ids, mask_id)
    maskInt64 = bk.cast(maskBool, 'int64')

    diff = (true_ids - pred_ids) * maskInt64
    matches = bk.cast(bk.not_equal(diff, bk.zeros_like(diff)), 'int64')
    matches = bk.sum(matches, axis=-1)
    matches = bk.cast(bk.equal(matches, bk.zeros_like(matches)), bk.floatx())
    return bk.mean(matches)
Esempio n. 7
0
def masked_loss(y_true, y_pred):
    max_args = argmax(y_true)
    mask = cast(not_equal(max_args, zeros_like(max_args)), dtype='float32')
    loss = switch(mask,
                  categorical_crossentropy(y_true, y_pred, from_logits=True),
                  zeros_like(mask, dtype=floatx()))
    return sum(loss) / (cast(sum(mask), dtype='float32') + epsilon())
Esempio n. 8
0
    def update_state(self, y_true, y_pred, sample_weight=None):
        # if log_metrics:
        #     wandb_log_report(report)
        # skip to count samples with label __unknown__
        mask = K.cast(K.not_equal(y_true, 1), K.floatx())
        if self.threshold is None:
            threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
            # make sure [0, 0, 0] doesn't become [1, 1, 1]
            # Use abs(x) > eps, instead of x != 0 to check for zero
            y_pred = tf.logical_and(y_pred >= threshold,
                                    tf.abs(y_pred) > 1e-12)
        else:
            y_pred = y_pred > self.threshold
        y_true = K.one_hot(K.cast(K.flatten(y_true), tf.int32),
                           y_pred.shape[1])
        y_true = tf.cast(y_true, self.dtype)
        y_pred = tf.cast(y_pred, self.dtype)

        # # skip counting samples where the PAD token is predicted

        def _weighted_sum(val, sample_weight):
            if sample_weight is not None:
                val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
            return tf.reduce_sum(val * mask, axis=self.axis)[2:]

        self.true_positives.assign_add(
            _weighted_sum(y_pred * y_true, sample_weight))
        self.false_positives.assign_add(
            _weighted_sum(y_pred * (1 - y_true), sample_weight))
        self.false_negatives.assign_add(
            _weighted_sum((1 - y_pred) * y_true, sample_weight))
        self.weights_intermediate.assign_add(
            _weighted_sum(y_true, sample_weight))
Esempio n. 9
0
def qscore(y_true, y_pred):
    from tensorflow.keras import backend as K
    error = K.cast(
        K.not_equal(K.max(y_true, axis=-1),
                    K.cast(K.argmax(y_pred, axis=-1), K.floatx())), K.floatx())
    error = K.sum(error) / K.sum(K.ones_like(error))
    return -10.0 * 0.434294481 * K.log(error)
Esempio n. 10
0
    def decorator(self, x):

        # Only call layer if there are input spikes. This is to prevent
        # accumulation of bias.
        self.impulse = tf.cond(k.any(k.not_equal(x, 0)), lambda: call(self, x),
                               lambda: k.zeros_like(self.mem))
        return self.update_neurons()
Esempio n. 11
0
    def call(self, inputs, mask=None):
        atoms, bonds, edges = inputs

        # Import dimensions
        num_samples = atoms.shape[0]
        max_atoms = atoms.shape[1]
        num_atom_features = atoms.shape[-1]
        num_bond_features = bonds.shape[-1]

        # Create a matrix that stores for each atom, the degree it is
        # atom_degrees = K.sum(K.not_equal(edges, -1), axis=-1, keepdims=True)
        # backend cast to floatx:
        atom_degrees = K.sum(K.cast(K.not_equal(edges, -1), 'float64'),
                             axis=-1,
                             keepdims=True)

        # For each atom, look up the features of it's neighbour

        neighbour_atom_features = neighbour_lookup(atoms,
                                                   edges,
                                                   include_self=True)

        # Sum along degree axis to get summed neighbour features
        summed_atom_features = K.sum(neighbour_atom_features, axis=-2)

        # Sum the edge features for each atom
        summed_bond_features = K.sum(bonds, axis=-2)

        # Concatenate the summed atom and bond features
        # summed_features = K.concatenate([summed_atom_features, summed_bond_features], axis=-1)
        # Tensorflow concat:
        summed_features = tf.concat(
            [summed_atom_features, summed_bond_features], axis=-1)

        # For each degree we convolve with a different weight matrix
        new_features_by_degree = []
        for degree in range(self.max_degree):

            # Create mask for this degree
            atom_masks_this_degree = K.cast(K.equal(atom_degrees, degree),
                                            K.floatx())

            # Multiply with hidden merge layer
            #   (use time Distributed because we are dealing with 2D input/3D for batches)
            # Add keras shape to let keras now the dimensions
            summed_features._keras_shape = (None, max_atoms,
                                            num_atom_features +
                                            num_bond_features)
            new_unmasked_features = self.inner_3D_layers[degree](
                summed_features)

            # Do explicit masking because TimeDistributed does not support masking
            new_masked_features = new_unmasked_features * atom_masks_this_degree

            new_features_by_degree.append(new_masked_features)

        # Finally sum the features of all atoms
        new_features = layers.add(new_features_by_degree)

        return new_features
Esempio n. 12
0
 def f(y_true, y_pred):
     mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())
     masked_squared_error = K.square(mask_true * (y_true - y_pred))
     # in case mask_true is 0 everywhere, the error would be nan, therefore divide by at least 1
     # this doesn't change anything as where sum(mask_true)==0, sum(masked_squared_error)==0 as well
     masked_mse = K.sum(masked_squared_error, axis=-1) / K.maximum(K.sum(mask_true, axis=-1), 1)
     return masked_mse
Esempio n. 13
0
def precisionK(y_true, y_pred):
    # works with non binary data as well as binary
    y_true_class = K.argmax(y_true, axis=-1)
    y_pred_class = K.argmax(y_pred, axis=-1)
    TP = K.cast(K.equal(y_true_class, y_pred_class), dtype='int32')
    nonzero_pred = K.cast(K.not_equal(y_pred_class, 0), dtype='int32')
    return K.sum(TP * nonzero_pred) / K.maximum(K.sum(nonzero_pred), 1)
Esempio n. 14
0
    def loss(self, y_true, y_pred):
        # We always delay import of Keras so that mhcflurry can be imported
        # initially without tensorflow debug output, etc.
        configure_tensorflow()
        from tensorflow.keras import backend as K
        y_true = K.flatten(y_true)
        y_pred = K.flatten(y_pred)

        # Handle (=) inequalities
        diff1 = y_pred - y_true
        diff1 *= K.cast(y_true >= 0.0, "float32")
        diff1 *= K.cast(y_true <= 1.0, "float32")

        # Handle (>) inequalities
        diff2 = y_pred - (y_true - 2.0)
        diff2 *= K.cast(y_true >= 2.0, "float32")
        diff2 *= K.cast(y_true <= 3.0, "float32")
        diff2 *= K.cast(diff2 < 0.0, "float32")

        # Handle (<) inequalities
        diff3 = y_pred - (y_true - 4.0)
        diff3 *= K.cast(y_true >= 4.0, "float32")
        diff3 *= K.cast(diff3 > 0.0, "float32")

        denominator = K.maximum(
            K.sum(K.cast(K.not_equal(y_true, 2.0), "float32"), 0),
            1.0)

        result = (
            K.sum(K.square(diff1)) +
            K.sum(K.square(diff2)) +
            K.sum(K.square(diff3))) / denominator

        return result
def inequaility_loss(y_true, y_pred):
    ## adapted from MHCflurry
    from tensorflow.keras import backend as K
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)

    # Handle (=) inequalities
    diff1 = y_pred - y_true
    diff1 *= K.cast(y_true >= 0.0, "float32")
    diff1 *= K.cast(y_true <= 1.0, "float32")

    # Handle (>) inequalities
    diff2 = y_pred - (y_true - 2.0)
    diff2 *= K.cast(y_true >= 2.0, "float32")
    diff2 *= K.cast(y_true <= 3.0, "float32")
    diff2 *= K.cast(diff2 > 0.0, "float32")

    # Handle (<) inequalities
    diff3 = y_pred - (y_true - 4.0)
    diff3 *= K.cast(y_true >= 4.0, "float32")
    diff3 *= K.cast(diff3 > 0.0, "float32")

    denominator = K.maximum(
        K.sum(K.cast(K.not_equal(y_true, 2.0), "float32"), 0),
        1.0)

    result = (
            K.sum(K.square(diff1)) +
            K.sum(K.square(diff2)) +
            K.sum(K.square(diff3))) / denominator

    return result
Esempio n. 16
0
 def ignore_accuracy(self, y_true, y_pred):
     pred = argmax(y_pred, axis=-1)
     true = argmax(y_true, axis=-1)
     ignore_mask = cast(not_equal(pred, 0), 'int32')
     matches = cast(equal(true, pred), 'int32') * ignore_mask
     accuracy = sum(matches) / maximum(sum(ignore_mask), 1)
     return accuracy
Esempio n. 17
0
def masked_accuracy(y_true, y_pred):
    max_args = argmax(y_true)
    mask = cast(not_equal(max_args, zeros_like(max_args)), dtype='float32')
    points = switch(
        mask,
        cast(equal(argmax(y_true, -1), argmax(y_pred, -1)), dtype='float32'),
        zeros_like(mask, dtype=floatx()))
    return sum(points) / cast(sum(mask), dtype='float32')
Esempio n. 18
0
def ignore_acc(y_true_class, y_pred_class, class_to_ignore=0):
    y_pred_class = K.cast(K.argmax(y_pred_class, axis=-1), 'int32')
    y_true_class = K.cast(y_true_class, 'int32')
    ignore_mask = K.cast(K.not_equal(y_true_class, class_to_ignore), 'int32')
    matches = K.cast(K.equal(y_true_class, y_pred_class),
                     'int32') * ignore_mask
    accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1)
    return accuracy
def GetPadMask(q, k):
    '''
	shape: [B, Q, K]
	'''
    ones = K.expand_dims(K.ones_like(q, 'float32'), -1)
    mask = K.cast(K.expand_dims(K.not_equal(k, 0), 1), 'float32')
    mask = K.batch_dot(ones, mask, axes=[2, 1])
    return mask
Esempio n. 20
0
def masked_mse(y_true, y_pred):
    # masked function
    mask_true = K.cast(K.not_equal(y_true, 0), K.floatx())
    # masked squared error
    masked_squared_error = K.square(mask_true * (y_true - y_pred))
    masked_mse = K.sum(masked_squared_error, axis=-1) / K.maximum(
        K.sum(mask_true, axis=-1), 1)
    return masked_mse
Esempio n. 21
0
def get_notnull_indices(tensor):

    #y_pred_flat = tf.reshape(y_pred, [-1])

    zero = K.constant(0, dtype=tf.float32)
    where = K.not_equal(tensor, zero)
    indices = tf.where(where)
    return indices
Esempio n. 22
0
def positions_func(inputs, pad=0):
    """
    A layer filling i-th column of a 2D tensor with
    1+ln(1+i) when it contains a meaningful symbol
    and with 0 when it contains PAD
    """
    position_inputs = K.cumsum(K.ones_like(inputs, dtype="float32"), axis=1)
    position_inputs *= K.cast(K.not_equal(inputs, pad), "float32")
    return K.log(1.0 + position_inputs)
Esempio n. 23
0
def loss_cls(y_true, y_pred):
    condition = K.not_equal(y_true, -1)
    indices = tf.where(condition)

    target = tf.gather_nd(y_true, indices)
    output = tf.gather_nd(y_pred, indices)

    loss = K.binary_crossentropy(target, output)
    return K.mean(loss)
Esempio n. 24
0
 def compute_mask(self, inputs, mask=None):
     if self.mode == self.MODE_EXPAND:
         if self.mask_zero:
             output_mask = K.not_equal(inputs, self.mask_zero)
         else:
             output_mask = None
     else:
         output_mask = mask
     return output_mask
Esempio n. 25
0
 def get_pos_seq(self, x):
     mask = K.cast(K.not_equal(x, 0), 'int32')
     # Nok: Replace K.cumsum with operations those can be run in CUDA (to support Data-Parallel Multi-GPU training)
     # pos = K.cumsum(K.ones_like(x, 'int32'), 1)
     tensor_shape = shape_list(x)
     pos = tf.add(tf.range(tensor_shape[1]), 1)
     pos = tf.tile(pos, [tensor_shape[0]])
     pos = tf.reshape(pos, tensor_shape)
     return pos * mask
Esempio n. 26
0
    def custom_accuracy(y_true, y_pred):
        y_true_class = K.argmax(y_true, axis=-1)
        y_pred_class = K.argmax(y_pred, axis=-1)

        ignore_mask = K.cast(K.not_equal(y_pred_class, to_ignore), 'int32')
        matches = K.cast(K.equal(y_true_class, y_pred_class),
                         'int32') * ignore_mask
        accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1)
        return accuracy
Esempio n. 27
0
 def f(y_true, y_pred):
     error = y_true - y_pred
     cond = K.abs(error) < clip_delta
     mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())
     masked_squared_error = 0.5 * K.square(mask_true *
                                           (y_true - y_pred))
     linear_loss = mask_true * (clip_delta * K.abs(error) - 0.5 *
                                (clip_delta**2))
     huber_loss = tf.where(cond, masked_squared_error, linear_loss)
     return K.sum(huber_loss) / K.sum(mask_true)
Esempio n. 28
0
def masked_mse(y, p, mask_val):
    mask = K.cast(K.not_equal(y, mask_val), K.floatx())
    if tf.__version__[0] == '2':
        masked_loss = tf.losses.mse(y * mask, p * mask)
    else:
        mask = K.cast(mask, 'float32')
        masked_loss = K.mean(tf.math.square(p * mask - y * mask), axis=-1)
        # masked_loss = tf.compat.v1.losses.mean_squared_error(y*mask, p*mask)

    return masked_loss
Esempio n. 29
0
def mask_aware_mean(inputs):
    # https://github.com/github/CodeSearchNet/blob/master/src/utils/tfutils.py#L107
    # recreate the masks - all zero rows have been masked
    mask = backend.not_equal(backend.sum(backend.abs(inputs), axis=2, keepdims=True), 0)
    # number of that rows are not all zeros
    num = backend.sum(backend.cast(mask, 'float32'), axis=1, keepdims=False)
    # compute mask-aware mean of inputs
    inputs_mean = backend.sum(inputs, axis=1, keepdims=False) / (num + 1E-8)

    return inputs_mean
Esempio n. 30
0
    def call(self, inputs, mask=None):
        atoms, bonds, edges = inputs

        # Import dimensions
        num_samples = atoms.shape[0]
        max_atoms = atoms.shape[1]
        num_atom_features = atoms.shape[-1]
        num_bond_features = bonds.shape[-1]

        # Create a matrix that stores for each atom, the degree it is, use it
        #   to create a general atom mask (unused atoms are 0 padded)
        # We have to use the edge vector for this, because in theory, a convolution
        #   could lead to a zero vector for an atom that is present in the molecule
        # atom_degrees = K.sum(K.not_equal(edges, -1), axis=-1, keepdims=True)
        # backend cast to floatx:
        atom_degrees = K.sum(K.cast(K.not_equal(edges, -1), 'float64'),
                             axis=-1,
                             keepdims=True)
        general_atom_mask = K.cast(K.not_equal(atom_degrees, 0), K.floatx())

        # Sum the edge features for each atom
        summed_bond_features = K.sum(bonds, axis=-2)

        # Concatenate the summed atom and bond features
        # atoms_bonds_features = K.concatenate([atoms, summed_bond_features], axis=-1)
        # Tensorflow concat:
        atoms_bonds_features = tf.concat([atoms, summed_bond_features],
                                         axis=-1)

        # Compute fingerprint
        atoms_bonds_features._keras_shape = (None, max_atoms,
                                             num_atom_features +
                                             num_bond_features)
        fingerprint_out_unmasked = self.inner_3D_layer(atoms_bonds_features)

        # Do explicit masking because TimeDistributed does not support masking
        fingerprint_out_masked = fingerprint_out_unmasked * general_atom_mask

        # Sum across all atoms
        final_fp_out = K.sum(fingerprint_out_masked, axis=-2)

        return final_fp_out