示例#1
0
    def call(self, inputs, **kwargs):
        if not (K.is_tensor(inputs[0]) and K.is_tensor(inputs[1])):
            #if not K.is_tensor(inputs):
            raise ValueError(
                'The layer can be called only with one tensor as an argument')
        query_input, self_attn_mask = inputs
        #query_input = inputs

        query_shape = K.shape(query_input)
        seq_len, d_model = query_shape[-2], query_shape[-1]
        # The first thing we need to do is to perform affine transformations
        # of the inputs to get the Queries, the Keys and the Values.
        qkv = K.dot(
            K.reshape(query_input, [-1, d_model]),
            self.qkv_weights)  # shape: (batch_size, seq_len, 3 * d_model)
        # splitting the keys, the values and the queries before further
        # processing
        pre_q, pre_k, pre_v = [
            K.reshape(qkv[:, i * d_model:(i + 1) * d_model],
                      (-1, seq_len, self.num_heads, d_model // self.num_heads))
            for i in range(3)
        ]
        attention_out = self.attention(pre_q,
                                       pre_v,
                                       pre_k,
                                       seq_len,
                                       d_model,
                                       self_attn_mask,
                                       training=kwargs.get('training'))
        return attention_out
示例#2
0
    def build(self, nodes=None, adjacency=None):
        """
        Checks the shapes and builds the GraphWrapper with the given 
        adjacency matrix and nodes. If nodes or adjacency matrix are 
        None, uses respectively self._nodes or self._adjacency to build 
        the object. Useful to automatically re-building the GraphWrapper
        when the nodes or adjacency setter is called. 

        Args: 
            - nodes: a (..., N, F) tensor,
            - adjacency: a (..., N, N) tensor

        Returns 'self'
        """
        if nodes is None and self._nodes is not None:
            nodes = self._nodes
        elif not K.is_tensor(nodes):
            raise ValueError("Nodes must be a tensor.")

        if adjacency is None and self._adjacency is not None:
            adjacency = self._adjacency
        else:
            _adjacency = to_list(adjacency)

            for a in _adjacency:
                if not K.is_tensor(a):
                    raise ValueError("Adjacency must be a tensor.")

        nodes_shape = K.int_shape(nodes)
        if isinstance(adjacency, list):
            adjacency_shape = [K.int_shape(a) for a in adjacency]
        else:
            adjacency_shape = K.int_shape(adjacency)

        # Creating a GraphShape object will handle shape checking
        if self._keras_shape is None:
            self._keras_shape = GraphShape(
                nodes_shape=nodes_shape, adjacency_shape=adjacency_shape)
        else:
            self._keras_shape.build(
                nodes_shape=nodes_shape, adjacency_shape=adjacency_shape)

        self._nodes = nodes
        self._adjacency = adjacency

        self._n_features = nodes_shape[-1]
        self._n_nodes = nodes_shape[-2]

        super(GraphWrapper, self)._clear()
        super(GraphWrapper, self)._extend(
            [self.nodes] + to_list(self.adjacency))

        self._built = True

        return self
示例#3
0
def test_model_layers_single_inputs(nst_with_pix):
    """Check that intermediate keras model layer has single input"""
    nst = nst_with_pix
    kmodel = nst.kmodel
    assert isinstance(kmodel, Sequential)
    # Check that input for the whole model
    assert K.is_tensor(kmodel.input)
    assert kmodel.built
    # Check that single input for intermediate layer
    layer = kmodel.get_layer('block2_conv2')
    assert K.is_tensor(layer.input)
示例#4
0
def test_compute_layer_style_cost():
    """Check that calculating layer style cost correctly
    - size of style will be different from one of generated matrix
    - except for number of channels
    """
    # Initializing
    sess = tf.Session()
    # Style input generated randomly
    a_s = np.array([[[[-1.683445, 1.8942857, 4.189092],
                      [1.3846824, 3.8925915, 2.3524866]],
                     [[-1.9202449, 4.6461368, -1.0375276],
                      [4.899456, -7.5360813, 3.4091651]]]],
                   dtype='float32')
    # Create a_g placeholder and its future value
    a_g = K.placeholder(shape=[1, 3, 3, 3])
    ag_value = np.array([[[[-0.39043474, -4.965909, -5.387548],
                           [4.572505, 1.1961036, 5.0099816],
                           [1.7304354, -0.13603461, -0.7514645]],
                          [[-3.0110965, 1.0130516, 7.4561086],
                           [0.51901615, -0.23328066, -0.8221154],
                           [0.69788367, 1.5624137, 0.11127031]],
                          [[3.7990131, -0.5115707, -5.364818],
                           [-4.8868036, -1.1914248, -0.12090659],
                           [7.0109277, -1.2259245, 4.2369]]]])
    # print(repr(sess.run(a_g)))
    j_layer_style = NeuralStyleTransfer._compute_layer_style_cost(
        a_s, a_g, sess)
    assert K.is_tensor(j_layer_style)
    np.testing.assert_allclose(
        sess.run(j_layer_style, feed_dict={a_g: ag_value}), 12.413997)
示例#5
0
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
    y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
    y_true = K.cast(y_true, y_pred.dtype)
    return K.mean(K.binary_crossentropy(y_true,
                                        y_pred,
                                        from_logits=from_logits),
                  axis=-1)
示例#6
0
def cumulative_score(y_true, y_pred, threshold=5):
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    return K.sum(K.cast(K.abs(y_pred - y_true) < threshold,
                        dtype='float32')) / K.cast(K.shape(y_true)[0],
                                                   dtype='float32')
示例#7
0
def custom_loss_mean_squared_error(y_true, y_pred):
    global Y_DENOMINATOR
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    return K.mean(K.square(y_pred * Y_DENOMINATOR - y_true * Y_DENOMINATOR),
                  axis=-1)
def accuracy(y_true, y_pred):
    y_true = y_true[:, :-1]
    y_pred = y_pred[:, :-1]
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    return K.cast(K.equal(y_true, y_pred), K.floatx())
示例#9
0
    def call(self, inputs, **kwargs):
        if not K.is_tensor(inputs):
            raise ValueError(
                'The layer can be called only with one tensor as an argument')
        _, seq_len, d_model = K.int_shape(inputs)

        # Perform affine transformations to get the Queries, the Keys and the Values.
        qkv = K.dot(inputs, self.qkv_weights)  # (-1,seq_len,d_model*3)
        qkv = K.reshape(qkv, [-1, d_model * 3])

        # splitting the keys, the values and the queries.
        pre_q, pre_k, pre_v = [
            K.reshape(qkv[:, i * d_model:(i + 1) * d_model],
                      (-1, seq_len, self.num_heads, d_model // self.num_heads))
            for i in range(3)
        ]

        attention_out = self.attention(pre_q,
                                       pre_v,
                                       pre_k,
                                       seq_len,
                                       d_model,
                                       training=kwargs.get('training'))
        # of shape (-1, seq_len, d_model)
        return attention_out
    def customLoss(y_true, y_pred):
        if not K.is_tensor(y_pred):
            y_pred = K.constant(y_pred)
        y_true = K.cast(y_true, y_pred.dtype)

        return K.mean(K.square(y_pred - y_true), axis=-1) + K.maximum(
            0.0, _beta * K.mean(-(y_pred - 0.5) * (y_true - 0.5), axis=-1))
示例#11
0
def mean_squared_relative_error(y_true, y_pred):
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    diff = K.square(
        (y_true - y_pred) / K.clip(K.square(y_true), K.epsilon(), None))
    return K.mean(diff, axis=-1)
def mean_squared_error_norm(y_true, y_pred):
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    return K.mean(K.square(
        (y_pred - y_true) / K.clip(K.abs(y_true), K.epsilon(), None)),
                  axis=-1)
示例#13
0
def log_mean_absolute_error(y_true, y_pred):
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    return -K.mean(
        K.log(1. - K.clip(K.abs(y_pred - y_true), 0, 1. - K.epsilon())),
        axis=-1)
示例#14
0
def to_tensor(x, dtype="int32"):
    """If x is a Tensor return it as is otherwise return a constant tensor of
    type dtype."""
    if K.is_tensor(x):
        return x

    return K.constant(x, dtype="int32")
示例#15
0
    def call(self, inputs, **kwargs):
        if not K.is_tensor(inputs):
            raise ValueError(
                'The layer can be called only with one tensor as an argument')
        _, seq_len, d_model = K.int_shape(inputs)

        # The first thing we need to do is to perform affine transformations
        # of the inputs to get the Queries, the Keys and the Values.
        qkv = K.dot(inputs, self.qkv_weights)  # (-1,seq_len,d_model*3)
        qkv = K.reshape(qkv, [-1, d_model * 3])

        # splitting the keys, the values and the queries before further
        # processing
        pre_q, pre_k, pre_v = [
            K.reshape(
                # K.slice(qkv, (0, i * d_model), (-1, d_model)),
                qkv[:, i * d_model:(i + 1) * d_model],
                (-1, seq_len, self.num_heads, d_model // self.num_heads))
            for i in range(3)
        ]

        attention_out = self.attention_zambaldi(
            pre_q,
            pre_v,
            pre_k,
            seq_len,
            d_model,
            training=kwargs.get('training'))
        # of shape (-1, seq_len, d_model)
        return attention_out
示例#16
0
def _calc_mean_metric(y_true, y_pred, metric, starting_label=0):
    switcher = {
        'mPrec': precision,
        # TODO 'mAP': average_precision,
        'mf1': f1,
        'mAcc': accuracy,
        'mBacc': balanced_accuracy,
        'mRec': recall,
        'mIOU': iou,
        'mSpec': specificity,
        'mMcc': mcc
    }

    try:
        func = switcher[metric]
    except KeyError:
        raise ValueError('Unkown Metric')

    if K.is_tensor(y_true):
        amount_labels = K.int_shape(y_pred)[-1]
        batch_size = K.int_shape(y_pred)[0]
        summation = K.variable(0)
        y_pred = K.round(y_pred)
    else:
        amount_labels = y_pred.shape[-1]
        batch_size = y_pred.shape[0]
        summation = 0

    for batch_num in range(batch_size):
        for label in range(starting_label, amount_labels):
            summation = summation + func(y_true[batch_num, :, :, label],
                                         y_pred[batch_num, :, :, label])

    return summation / (batch_size * amount_labels)
示例#17
0
 def test(y_true, y_pred):
     if not K.is_tensor(y_pred):  # if y_pred is not a keras tensor
         y_pred = K.constant(y_pred)  # change it to a constant tensor
         y_true = K.cast(
             y_true, y_pred.dtype
         )  # cast dtype of y_true to the same dtype as y_pred
     return K.mean(K.square(y_true - y_pred), axis=-1)
示例#18
0
def hRVF(Y,Yh):
    '''Residual Variance Fraction
    return K.sum(K.square(Yh - Y)) / K.sum(K.square(Y))
    '''
    if not K.is_tensor(Yh): Yh = K.constant(Yh)
    Y = K.cast(Y, Yh.dtype)
    return K.sum(K.square(Yh - Y)) / K.sum(K.square(Y))
示例#19
0
 def loss(y_true, y_pred):
     y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
     y_true = K.cast(y_true, y_pred.dtype)
     y_pred = (y_pred + 1.0) / 2.0
     y_true = (y_true + 1.0) / 2.0
     return K.mean(K.binary_crossentropy(y_true, y_pred, from_logits=False),
                   axis=-1)
示例#20
0
def test_compute_variation_cost(nst_with_pix):
    """Check that the variation cost computation works properly"""
    nst = nst_with_pix
    sess = K.get_session()
    j_var = nst._compute_variation_cost(sess)
    assert K.is_tensor(j_var)
    assert K.shape(j_var).eval(session=sess).shape[0] == 0
    np.testing.assert_allclose(j_var.eval(session=sess), 0.)
示例#21
0
def relu_loss(y_true, y_pred, threshold=5):
    if not K.is_tensor(y_pred):
        y_pred = K.constant(y_pred)
    y_true = K.cast(y_true, y_pred.dtype)
    cs = K.cast(K.greater(K.abs(y_pred - y_true), threshold), K.floatx())
    cse = cs * K.abs(y_pred - y_true)
    mse = K.mean(K.square(y_pred - y_true), axis=-1)
    return K.sum(cse)**2 + mse
 def weighted_categorical_crossentropy(self, y_true, y_pred):
     wmap = y_true[:, :, :, self.n_classes:]
     y_true = y_true[:, :, :, :self.n_classes]
     y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
     y_true = K.cast(y_true, y_pred.dtype)
     loss = K.categorical_crossentropy(y_true, y_pred, from_logits=False)
     weighted_loss = loss * wmap
     return K.mean(weighted_loss, axis=-1)
示例#23
0
def _check_type(array):
    if isinstance(array, np.ndarray):
        type_obj = 'numpy'
    elif K.is_tensor(array):
        type_obj = 'tensorflow'
    else:
        type_obj = 'unknown'
    return type_obj
示例#24
0
    def __hook__(self, x, new_x):
        with K.name_scope(self.name_scope):
            if not K.is_tensor(new_x):
                new_x = K.constant(new_x, dtype=K.dtype(x))

            new_x = K.switch(self.condition, new_x, x)

        return self.__original__(x, new_x)
    def focal_loss(y_true, y_pred):
        y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
        y_true = K.cast(y_true, y_pred.dtype)

        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())

        return K.sum(-y_true * K.pow(1 - y_pred, gamma) * K.log(y_pred),
                     axis=-1)
示例#26
0
def test_compute_content_cost(nst_with_pix):
    """Check that calculating content cost correctly"""
    nst = nst_with_pix
    sess = K.get_session()
    j_content = nst._compute_content_cost(nst.input_content, session=sess)
    assert K.is_tensor(j_content)
    assert K.shape(j_content).eval(session=sess).shape[0] == 0
    np.testing.assert_allclose(j_content.eval(session=sess), 0.)
def denormalize_arr(data, param_dict):
    """Denormalizes data after training

    Args:
        data: Numpy array of data to denorm.
        param_dict (dict): Dictionary of parameters used during normalization,
            to be used for denormalizing. Eg, mean, stddev, method, etc.

    Returns:
        data: Numpy array of denormalized data.
    """
    eps = np.finfo('float32').eps
    for key, val in param_dict.items():
        if K.is_tensor(val):
            val = np.array(K.eval(val))
    if param_dict['method'] == 'StandardScaler':
        return data * np.maximum(param_dict['std'], eps) + param_dict['mean']
    elif param_dict['method'] == 'MinMax':
        return data * np.maximum(
            (param_dict['armax'] - param_dict['armin']), eps)
        +param_dict['armin']
    elif param_dict['method'] == 'MaxAbs':
        return data * np.maximum(param_dict['maxabs'], eps)
    elif param_dict['method'] == 'RobustScaler':
        return data * np.maximum(param_dict['iqr'], eps) + param_dict['median']
    elif param_dict['method'] == 'PowerTransform':
        y = data * np.maximum(param_dict['std'], eps) + param_dict['mean']

        def np_yeo_johnson_inverse_transform(x, lmbda):
            """Return inverse-transformed input x following Yeo-Johnson inverse
            transform with parameter lambda. From Scipy
            """
            x_inv = np.zeros_like(x)
            pos = x >= 0
            # when x >= 0
            if np.abs(lmbda) < np.finfo(np.float32).eps:
                x_inv[pos] = np.exp(x[pos]) - 1
            else:  # lmbda != 0
                x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
            # when x < 0
            if np.abs(lmbda - 2) > np.finfo(np.float32).eps:
                x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 /
                                           (2 - lmbda))
            else:  # lmbda == 2
                x_inv[~pos] = 1 - np.exp(-x[~pos])
            return x_inv

        if param_dict['lambda'].size > 1:
            for i, l in enumerate(param_dict['lambda']):
                y[:, i] = np_yeo_johnson_inverse_transform(y[:, i], l)
        else:
            y = np_yeo_johnson_inverse_transform(
                y.flatten(), param_dict['lambda']).reshape(y.shape)
        return y
    elif param_dict['method'] is None or param_dict['method'] == 'None':
        return data
    else:
        raise ValueError("Unknown normalization method")
示例#28
0
    def __hook__(self, x, decrement):
        with K.name_scope(self.name_scope):
            if not K.is_tensor(decrement):
                decrement = K.constant(decrement, dtype=K.dtype(x))

            decrement = K.switch(self.condition, decrement,
                                 K.constant(0, dtype=K.dtype(x)))

        return self.__original__(x, decrement)
    def loss(y_true, y_pred):
        Kweights = K.constant(weights)
        if not K.is_tensor(y_pred): y_pred = K.constant(y_pred)
        y_true = K.cast(y_true, y_pred.dtype)

        wcce_loss = K.categorical_crossentropy(y_true, y_pred) * K.sum(y_true * Kweights, axis=-1)
        dice_loss = dice_lesion_loss(y_true, y_pred)

        return 0.5*wcce_loss + 0.5*dice_loss
示例#30
0
 def __call__(self,Y,Yh):
     '''Weighted Mean Square Error (MSE)
     '''
     if not K.is_tensor(Yh): Yh = K.constant(Yh)
     Y = K.cast(Y, Yh.dtype)
     mask = K.not_equal(Y,self.Mask) # OK! could use NaN <-> 0
     # return K.mean(K.square(Yh[mask]-Y[mask])) # BUT: No boolean indexing in Keras!
     mask = K.cast(mask,K.dtype(Y)) # OK!
     return K.sum(K.square(K.abs(Yh - Y)*mask))/K.sum(mask) # OK!