Example #1
0
    def global_mi(self, y_true, y_pred):
        warnings.warn(
            'This loss will be deprecated. Consider switching to ne.metrics.MutualInformation.'
        )
        if self.crop_background:
            # does not support variable batch size
            thresh = 0.0001
            padding_size = 20
            filt = tf.ones([padding_size, padding_size, padding_size, 1, 1])

            smooth = tf.nn.conv3d(y_true, filt, [1, 1, 1, 1, 1], "SAME")
            mask = smooth > thresh
            # mask = K.any(K.stack([y_true > thresh, y_pred > thresh], axis=0), axis=0)
            y_pred = tf.boolean_mask(y_pred, mask)
            y_true = tf.boolean_mask(y_true, mask)
            y_pred = K.expand_dims(K.expand_dims(y_pred, 0), 2)
            y_true = K.expand_dims(K.expand_dims(y_true, 0), 2)

        else:
            # reshape: flatten images into shape (batch_size, heightxwidthxdepthxchan, 1)
            y_true = K.reshape(y_true, (-1, K.prod(K.shape(y_true)[1:])))
            y_true = K.expand_dims(y_true, 2)
            y_pred = K.reshape(y_pred, (-1, K.prod(K.shape(y_pred)[1:])))
            y_pred = K.expand_dims(y_pred, 2)

        nb_voxels = tf.cast(K.shape(y_pred)[1], tf.float32)

        # reshape bin centers to be (1, 1, B)
        o = [1, 1, np.prod(self.vol_bin_centers.get_shape().as_list())]
        vbc = K.reshape(self.vol_bin_centers, o)

        # compute image terms
        I_a = K.exp(-self.preterm * K.square(y_true - vbc))
        I_a /= K.sum(I_a, -1, keepdims=True)

        I_b = K.exp(-self.preterm * K.square(y_pred - vbc))
        I_b /= K.sum(I_b, -1, keepdims=True)

        # compute probabilities
        I_a_permute = K.permute_dimensions(I_a, (0, 2, 1))
        pab = K.batch_dot(
            I_a_permute,
            I_b)  # should be the right size now, nb_labels x nb_bins
        pab /= nb_voxels
        pa = tf.reduce_mean(I_a, 1, keepdims=True)
        pb = tf.reduce_mean(I_b, 1, keepdims=True)

        papb = K.batch_dot(K.permute_dimensions(pa,
                                                (0, 2, 1)), pb) + K.epsilon()
        return K.sum(K.sum(pab * K.log(pab / papb + K.epsilon()), 1), 1)
Example #2
0
def class_tversky(y_true, y_pred):
    smooth = 1

    y_true = K.permute_dimensions(y_true, (3, 1, 2, 0))
    y_pred = K.permute_dimensions(y_pred, (3, 1, 2, 0))

    y_true_pos = K.batch_flatten(y_true)
    y_pred_pos = K.batch_flatten(y_pred)
    true_pos = K.sum(y_true_pos * y_pred_pos, 1)
    false_neg = K.sum(y_true_pos * (1 - y_pred_pos), 1)
    false_pos = K.sum((1 - y_true_pos) * y_pred_pos, 1)
    alpha = 0.7
    return (true_pos + smooth) / (true_pos + alpha * false_neg +
                                  (1 - alpha) * false_pos + smooth)
Example #3
0
    def predict(self, X, y, epoch=None):
        y_probs = self.model.call(X, training=False)

        if self.is_full:
            agnostic_predictions = get_last_channel(
                y_probs[0]) > self.hparams.predict_frame_threshold
            multi_stack_predictions = convert_multi_instrument_probs_to_predictions(
                y_probs[0], self.hparams.predict_frame_threshold,
                self.hparams.multiple_instruments_threshold)[0]
            permuted_stack_predictions = K.permute_dimensions(
                multi_stack_predictions,
                (tf.rank(multi_stack_predictions) - 1,
                 *K.arange(tf.rank(multi_stack_predictions) - 1)))
            permuted_predictions = K.concatenate(
                [permuted_stack_predictions, agnostic_predictions], axis=0)
        else:
            timbre_probs = y_probs[0]
            top_probs = K.cast(
                tf.one_hot(K.argmax(timbre_probs),
                           K.int_shape(timbre_probs)[-1]), 'bool')
            frame_predictions = tf.logical_or(
                timbre_probs > self.hparams.multiple_instruments_threshold,
                tf.logical_and(top_probs, timbre_probs > 0.5))
            permuted_predictions = K.permute_dimensions(
                frame_predictions, (tf.rank(frame_predictions) - 1,
                                    *K.arange(tf.rank(frame_predictions) - 1)))
        y_relevant = y[0][0]
        permuted_true = K.permute_dimensions(
            y_relevant,
            (tf.rank(y_relevant) - 1, *K.arange(tf.rank(y_relevant) - 1)))

        instrument_metrics = dict()
        for i in range(self.num_classes):
            instrument_metric = calculate_frame_metrics(
                permuted_true[i], permuted_predictions[i])
            instrument_metrics[
                constants.FAMILY_IDX_STRINGS[i]] = instrument_metric

        if self.is_full:
            if not os.path.exists(self.save_dir):
                os.makedirs(self.save_dir)
            # Save agnostic midi.
            self.save_midi([get_last_channel(p) for p in y_probs],
                           [get_last_channel(t) for t in y], epoch)
            self.save_stack_midi([get_last_channel(p) for p in y_probs],
                                 [get_last_channel(t) for t in y], epoch)

        del y_probs
        return instrument_metrics
Example #4
0
 def dropped_inputs():
     outputs = inputs
     if self.data_format == 'channels_first':
         outputs = K.permute_dimensions(outputs, [0, 2, 3, 1])
     shape = K.shape(outputs)
     if self.sync_channels:
         mask = self._compute_drop_mask(
             [shape[0], shape[1], shape[2], 1])
     else:
         mask = self._compute_drop_mask(shape)
     outputs = outputs * mask *\
         (K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
     if self.data_format == 'channels_first':
         outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
     return outputs
Example #5
0
    def multilabel_dice_coefficient_fixed(y_true, y_pred):
        y_dims = K.int_shape(y_pred)

        number_of_labels = y_dims[len(y_dims) - 1]

        if dimensionality == 2:
            # 2-D image
            y_true_permuted = K.permute_dimensions(y_true,
                                                   pattern=(3, 0, 1, 2))
            y_pred_permuted = K.permute_dimensions(y_pred,
                                                   pattern=(3, 0, 1, 2))
        elif dimensionality == 3:
            # 3-D image
            y_true_permuted = K.permute_dimensions(y_true,
                                                   pattern=(4, 0, 1, 2, 3))
            y_pred_permuted = K.permute_dimensions(y_pred,
                                                   pattern=(4, 0, 1, 2, 3))
        else:
            raise ValueError("Specified dimensionality not implemented.")

        y_true_label = K.gather(y_true_permuted, indices=(1))
        y_pred_label = K.gather(y_pred_permuted, indices=(1))

        y_true_label_f = K.flatten(y_true_label)
        y_pred_label_f = K.flatten(y_pred_label)
        intersection = y_true_label_f * y_pred_label_f
        union = y_true_label_f + y_pred_label_f - intersection

        numerator = K.sum(intersection)
        denominator = K.sum(union)

        if number_of_labels > 2:
            for j in range(2, number_of_labels):
                y_true_label = K.gather(y_true_permuted, indices=(j))
                y_pred_label = K.gather(y_pred_permuted, indices=(j))
                y_true_label_f = K.flatten(y_true_label)
                y_pred_label_f = K.flatten(y_pred_label)

                intersection = y_true_label_f * y_pred_label_f
                union = y_true_label_f + y_pred_label_f - intersection

                numerator = numerator + K.sum(intersection)
                denominator = denominator + K.sum(union)

        unionOverlap = numerator / denominator

        return (-1.0 * (2.0 * unionOverlap + smoothing_factor) /
                (1.0 + unionOverlap + smoothing_factor))
Example #6
0
    def call(self, inputs, **kwargs):
        """This is where the layer's logic lives.

        Parameters
        ----------
        inputs: tensor
            Input tensor, or list/tuple of input tensors
        kwargs: dict
            Additional keyword arguments

        Returns
        -------
        tensor
            A tensor or list/tuple of tensors
        """
        input_shape = K.int_shape(inputs)
        if len(input_shape) != 4:
            raise ValueError(
                'Inputs should have rank ' + str(4) +
                '; Received input shape:', str(input_shape))

        if self.data_format == 'channels_first':
            batch_size, channels, height, width = input_shape
            if batch_size is None:
                batch_size = -1
            r_height, r_width = self.size
            o_height, o_width = height * r_height, width * r_width
            o_channels = channels // (r_height * r_width)

            out = K.reshape(
                inputs,
                (batch_size, r_height, r_width, o_channels, height, width))
            out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
            out = K.reshape(out, (batch_size, o_channels, o_height, o_width))
        elif self.data_format == 'channels_last':
            batch_size, height, width, channels = input_shape
            if batch_size is None:
                batch_size = -1
            r_height, r_width = self.size
            o_height, o_width = height * r_height, width * r_width
            o_channels = channels // (r_height * r_width)

            out = K.reshape(
                inputs,
                (batch_size, height, width, r_height, r_width, o_channels))
            out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
            out = K.reshape(out, (batch_size, o_height, o_width, o_channels))
        return out
Example #7
0
    def call(self, x):
        x_orig = x

        # x reshape
        this_bs_int = K.shape(x)[0]
        this_bs = tf.cast(this_bs_int, 'float32')  # this batch size
        prev_count = self.count
        x = K.batch_flatten(x)  # B x N

        # update mean
        new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap)        

        # new C update. Should be B x N x N
        x = K.expand_dims(x, -1)
        C_delta = K.batch_dot(x, K.permute_dimensions(x, [0, 2, 1]))

        # update cov
        prev_cap = K.minimum(prev_count, self.cap)
        C = self.cov * (prev_cap - 1) + K.sum(C_delta, 0)
        new_cov = C / (prev_cap + this_bs - 1)

        # updates
        updates = [(self.count, new_count), (self.mean, new_mean), (self.cov, new_cov)]
        self.add_update(updates, x_orig)

        # prep for broadcasting :(
        p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.cov)), 0)
        z = K.ones(p)

        return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_cov, 0))
Example #8
0
    def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        #final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(u_hat_vecs[:,:,:,0]) #shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            c = softmax(b, 1)
            # o = K.batch_dot(c, u_hat_vecs, [2, 2])
            o = tf.einsum('bin,binj->bij', c, u_hat_vecs)
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            if i < self.routings - 1:
                o = K.l2_normalize(o, -1)
                # b = K.batch_dot(o, u_hat_vecs, [2, 3])
                b = tf.einsum('bij,binj->bin', o, u_hat_vecs)
                if K.backend() == 'theano':
                    b = K.sum(b, axis=1)

        return self.activation(o)
Example #9
0
    def call(self, x):
        def hw_flatten(x):
            return K.reshape(x,
                             shape=[
                                 K.shape(x)[0],
                                 K.shape(x)[1] * K.shape(x)[2],
                                 K.shape(x)[3]
                             ])

        f = K.conv2d(x, kernel=self.kernel_f, strides=(1, 1),
                     padding='same')  # [bs, h, w, c']
        g = K.conv2d(x, kernel=self.kernel_g, strides=(1, 1),
                     padding='same')  # [bs, h, w, c']
        h = K.conv2d(x, kernel=self.kernel_h, strides=(1, 1),
                     padding='same')  # [bs, h, w, c]

        s = K.batch_dot(hw_flatten(g),
                        K.permute_dimensions(hw_flatten(f),
                                             (0, 2, 1)))  # # [bs, N, N]

        beta = K.softmax(s, axis=-1)  # attention map

        o = K.batch_dot(beta, hw_flatten(h))  # [bs, N, C]

        o = K.reshape(o, shape=K.shape(x))  # [bs, h, w, C]
        x = self.gamma * o + x

        return x
Example #10
0
    def _space_to_depth(self, input_tensor):
        """ Space to depth implementation.

        PlaidML does not have a space to depth operation, so calculate if backend is amd
        otherwise returns the :func:`tensorflow.space_to_depth` operation.

        Parameters
        ----------
        input_tensor: tensor
            The tensor to be manipulated

        Returns
        -------
        tensor
            The manipulated input tensor
        """
        if get_backend() == "amd":
            batch, height, width, depth = input_tensor.shape.dims
            new_height = height // self.scale
            new_width = width // self.scale
            reshaped = K.reshape(input_tensor,
                                 (batch, new_height, self.scale, new_width, self.scale, depth))
            retval = K.reshape(K.permute_dimensions(reshaped, [0, 1, 3, 2, 4, 5]),
                               (batch, new_height, new_width, -1))
        else:
            retval = tf.space_to_depth(input_tensor, block_size=self.scale, data_format="NHWC")
        logger.debug("Input Tensor: %s, Output Tensor: %s", input_tensor, retval)
        return retval
Example #11
0
def gramMatrix(x):
    if K.image_data_format() == "channels_first":
        features = K.flatten(x)
    else:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram
Example #12
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)

        reduction_axes = list(range(0, len(input_shape)))
        if self.axis is not None:
            del reduction_axes[self.axis]
        del reduction_axes[0]

        # Put axis last
        inputs = K.permute_dimensions(
            inputs, tuple([0] + reduction_axes + [self.axis]))

        # Collapse all other dims into dim 1
        cinp = K.reshape(inputs,
                         (K.shape(inputs)[0], -1, input_shape[self.axis]))
        n_reduced = K.shape(cinp)[1]

        # Calculate dot product
        pure_gram = K.batch_dot(cinp, cinp, 1)
        scaled_gram = pure_gram / K.cast(
            2 * n_reduced * input_shape[self.axis], 'float32')

        return scaled_gram
        #return K.sqrt(scaled_gram)

        # Calculate covariance
        means = K.mean(cinp, [1], keepdims=True)
        mean_mat = K.batch_dot(means, means, 1)
        cov = scaled_gram - mean_mat

        return cov
    def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            o = self.activation(caps_batch_dot(c, hat_inputs))
            if i < self.routings - 1:
                b = caps_batch_dot(o, hat_inputs)
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o
Example #14
0
    def call(self, x, mask=None):

        assert(len(x) == 2)

        img = x[0]
        rois = x[1]

        outputs = []

        for roi_idx in range(self.num_rois):
            x = rois[0, roi_idx, 0]
            y = rois[0, roi_idx, 1]
            w = rois[0, roi_idx, 2]
            h = rois[0, roi_idx, 3]

            x = K.cast(x, 'int32')
            y = K.cast(y, 'int32')
            w = K.cast(w, 'int32')
            h = K.cast(h, 'int32')

            rs = tf.image.resize_images(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size))
            outputs.append(rs)

        final_output = K.concatenate(outputs, axis=0)
        final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))

        final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4))

        return final_output
Example #15
0
def split_heads(r: int, x: KTensor) -> KTensor:
    r"""
    Split sequential data along the last dimension (entries/embeddings)
    into a set of subspaces and flatten the result:
    Given $r \in \mathbb{N}$ and a tensor $X$ of shape $b \times l \times d$,
    where $b$ – batch size, $l$ – the number of entries in a sequences, $d$ is
    entry length (embedding dimensions) such that $d \bmod r = 0$:
    1. Calculate subspace size $d_r = d \bmod r$;
    2. Add a new dimension along the entry (embedding) dimension such that
    each entry (embedding) vector is replaced by an $r \times ${d}_{r}$
    matrix: the resulting tensor will have shape [b, l, r, d_r]. Permute
    dimensions from $[b, l, r, d_r]$ to $[r, b, l, d_r]$. In other
    words, we end up with $r$ consecutive views (entry/embedding splits) of the
    original batch. Each view retains the structure of the original
    batch.
    4. Flatten the output along the 0-axis. The output will be
    $[r \times b, l, d_r]$
    :param r: the number of heads
    :param x: a tensor of shape
    """
    b, l, d = K.int_shape(x)
    d_r = d // r
    # split each entry of shape d into r splits of shape d_r
    splits = K.reshape(x, [-1, l, r, d_r])
    # permute to [r, b, l, d_r]
    head_batches = K.permute_dimensions(splits, [2, 0, 1, 3])
    # drop the r-dimension: [r, b, l, d_r] -> [r*b, l, d_r]
    return K.reshape(head_batches, [-1, l, d_r])
Example #16
0
	def call(self, x):
		assert(len(x)==2)
		#feature map
		img = x[0]
		#锚框坐标
		rois = x[1]

		outputs = []

		for roi_idx in range(self.num_rois):
			x = rois[0, roi_idx, 0]
			y = rois[0, roi_idx, 1]
			w = rois[0, roi_idx, 2]
			h = rois[0, roi_idx, 3]

			x = K.cast(x, 'int32')
			y = K.cast(y, 'int32')
			w = K.cast(w, 'int32')
			h = K.cast(h, 'int32')

			#在feature map中,取出roi, 并缩放到14*14
			rs = tf.compat.v1.image.resize_images(img[:,y:y+h,x:x+w,:], (self.pool_size, self.pool_size))
			outputs.append(rs)
		#类似,tf.concat(),这里转化为tensor
		final_output = K.concatenate(outputs, axis=0)
		final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))

		#permute:置换、dimensions:尺寸
		final_output = K.permute_dimensions(final_output, (0,1,2,3,4))
		return final_output
Example #17
0
 def extract_image_patches(self,
                           x,
                           ksizes,
                           ssizes,
                           padding='same',
                           data_format='channels_last'):
     """
     Extract the patches from an image
     # Parameters
         x : The input image
         ksizes : 2-d tuple with the kernel size
         ssizes : 2-d tuple with the strides size
         padding : 'same' or 'valid'
         data_format : 'channels_last' or 'channels_first'
     # Returns
         The (k_w, k_h) patches extracted
         TF ==> (batch_size, w, h, k_w, k_h, c)
         TH ==> (batch_size, w, h, c, k_w, k_h)
     """
     kernel = [1, ksizes[0], ksizes[1], 1]
     strides = [1, ssizes[0], ssizes[1], 1]
     padding = self._preprocess_padding(padding)
     if data_format == 'channels_first':
         x = K.permute_dimensions(x, (0, 2, 3, 1))
     patches = extract_image_patches(x, kernel, strides, [1, 1, 1, 1],
                                     padding)
     return patches
Example #18
0
    def _multiplicative_similarity(self, source, query):
        qp = K.dot(query, self._weights["w_a"])
        similarity = K.batch_dot(K.permute_dimensions(qp, [0, 2, 1]),
                                 source,
                                 axes=[1, 2])

        return similarity
Example #19
0
 def _reshape_from_batches(x, head_num):
     input_shape = K.shape(x)
     batch_size, seq_len, feature_dim = input_shape[0], input_shape[1], input_shape[2]
     x = K.reshape(x, (batch_size // head_num,
                       head_num, seq_len, feature_dim))
     x = K.permute_dimensions(x, [0, 2, 1, 3])
     return K.reshape(x, (batch_size // head_num, seq_len, feature_dim * head_num))
Example #20
0
 def split_heads(x, n_heads: int, k: bool = False):
     x_shape = shape_list(x)
     m = x_shape[-1]
     new_x_shape = x_shape[:-1] + [n_heads, m // n_heads]
     new_x = K.reshape(x, new_x_shape)
     return K.permute_dimensions(new_x,
                                 [0, 2, 3, 1] if k else [0, 2, 1, 3])
 def call(self, inputs):
     activation = Kb.dot(inputs, self._weights)  # (batch, in_dim) x (nb_ker, in_dim, ker_dim) = (batch, nb_ker, ker_dim)
     diffs = (Kb.expand_dims(activation, 3)  # (batch, nb_ker, ker_dim, 1)
                 - Kb.expand_dims(Kb.permute_dimensions(activation, [1, 2, 0]), 0))  # (1, nb_ker, ker_dim, batch)
     abs_diffs = Kb.sum(Kb.abs(diffs), axis=2)  # (batch, nb_ker, batch) Sum over rows to get L1
     minibatch_features = Kb.sum(Kb.exp(-abs_diffs), axis=2)  # (batch, nb_ker)
     return minibatch_features  # (batch, in_dim+nb_ker)
Example #22
0
    def call(self, input):

        point_cloud = input

        point_cloud_transpose = K.permute_dimensions(point_cloud, [0, 2, 1])

        # Compute distances.
        point_cloud_inner = tf.matmul(point_cloud, point_cloud_transpose)
        point_cloud_inner = -2 * point_cloud_inner
        point_cloud_square = tf.reduce_sum(tf.square(point_cloud),
                                           axis=-1,
                                           keepdims=True)
        point_cloud_square_tranpose = tf.transpose(point_cloud_square,
                                                   perm=[0, 2, 1])
        adj_matrix = point_cloud_square + point_cloud_inner + point_cloud_square_tranpose

        # Compute indices.
        neg_adj = -adj_matrix
        _, nn_idx = tf.nn.top_k(neg_adj, k=self.k)

        # Compute the neighbors.
        batch_size = tf.shape(point_cloud)[
            0]  # Note: Treat batch-size differently.
        num_points = point_cloud.get_shape()[1]
        num_dims = point_cloud.get_shape()[2]
        idx_ = tf.range(batch_size) * num_points
        idx_ = tf.reshape(idx_, [-1, 1, 1])
        point_cloud_flat = tf.reshape(point_cloud, [-1, num_dims])
        point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx + idx_)

        return point_cloud_neighbors
Example #23
0
def gconv2d(x, kernel, gconv_indices, gconv_shape_info, strides=(1, 1), padding='valid',
            data_format=None, dilation_rate=(1, 1), transpose=False, output_shape=None):
    """2D group equivariant convolution.

    # Arguments
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
            Whether to use Theano or TensorFlow data format
            for inputs/kernels/ouputs.
        dilation_rate: tuple of 2 integers.

    # Returns
        A tensor, result of 2D convolution.

    # Raises
        ValueError: if `data_format` is neither `channels_last` or `channels_first`.
    """
    # Transform the filters
    transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
    if transpose:
        output_shape = (K.shape(x)[0], output_shape[1], output_shape[2], output_shape[3])
        transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
        transformed_filter = K.permute_dimensions(transformed_filter, [0, 1, 3, 2])
        return K.conv2d_transpose(x=x, kernel=transformed_filter, output_shape=output_shape, strides=strides,
                                padding=padding, data_format=data_format)
    return K.conv2d(x=x, kernel=transformed_filter, strides=strides, padding=padding, data_format=data_format,
                    dilation_rate=dilation_rate)
Example #24
0
    def call(self, inputs):
        

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            o = self.activation(keras.backend.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = keras.backend.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o
Example #25
0
def boxes_from_deltas(pred_box_delta, config):
    """
    Converts prediction deltas to bounding boxes
    
    Arguments:
        pred_box_delta {[type]} -- tensor of deltas
        config {[type]} -- hyperparameter dict
    
    Returns:
        [type] -- tensor of bounding boxes
    """



    # Keras backend allows no unstacking

    delta_x = pred_box_delta[:, :, 0]
    delta_y = pred_box_delta[:, :, 1]
    delta_w = pred_box_delta[:, :, 2]
    delta_h = pred_box_delta[:, :, 3]

    # get the coordinates and sizes of the anchor boxes from config

    anchor_x = config.ANCHOR_BOX[:, 0]
    anchor_y = config.ANCHOR_BOX[:, 1]
    anchor_w = config.ANCHOR_BOX[:, 2]
    anchor_h = config.ANCHOR_BOX[:, 3]

    # as we only predict the deltas, we need to transform the anchor box values before computing the loss

    box_center_x = tf.identity(
        anchor_x + delta_x * anchor_w)
    box_center_y = tf.identity(
        anchor_y + delta_y * anchor_h)
    box_width = tf.identity(
        anchor_w * safe_exp(delta_w, config.EXP_THRESH))
    box_height = tf.identity(
        anchor_h * safe_exp(delta_h, config.EXP_THRESH))

    # tranform into a real box with four coordinates

    xmins, ymins, xmaxs, ymaxs = bbox_transform([box_center_x, box_center_y, box_width, box_height])

    # trim boxes if predicted outside

    xmins = K.minimum(
        K.maximum(0.0, xmins), config.IMAGE_WIDTH - 1.0)
    ymins = K.minimum(
        K.maximum(0.0, ymins), config.IMAGE_HEIGHT - 1.0)
    xmaxs = K.maximum(
        K.minimum(config.IMAGE_WIDTH - 1.0, xmaxs), 0.0)
    ymaxs = K.maximum(
        K.minimum(config.IMAGE_HEIGHT - 1.0, ymaxs), 0.0)

    det_boxes = K.permute_dimensions(
        K.stack(bbox_transform_inv([xmins, ymins, xmaxs, ymaxs])),
        (1, 2, 0)
    )
    
    return (det_boxes)
Example #26
0
    def compute_p_c_z(self, z):
        assert z.shape[1:] == (
            self.latent_dim, ), 'z.shape[1:] {} != {}'.format(
                z.shape[1:], (self.latent_dim, ))
        Z = K.permute_dimensions(K.repeat(z, self.n_clusters), [0, 2, 1])
        assert Z.shape[1:] == (self.latent_dim,
                               self.n_clusters), 'Z.shape[1:] {} != {}'.format(
                                   Z.shape[1:],
                                   (self.latent_dim, self.n_clusters))

        u_tensor3 = self.compute_u_tensor3()
        lambda_tensor3 = self.compute_lambda_tensor3()

        assert self.theta_p.shape == (
            self.n_clusters, ), 'self.theta_p.shape {} != {}'.format(
                self.theta_p.shape, (self.n_clusters, ))
        theta_tensor3 = K.expand_dims(
            K.expand_dims(self.theta_p, axis=0), axis=0) * K.ones(
                (self.batch_size, self.latent_dim, self.n_clusters))
        assert theta_tensor3.shape == (
            self.batch_size, self.latent_dim,
            self.n_clusters), 'theta_tensor3.shape {} != {}'.format(
                theta_tensor3.shape,
                (self.batch_size, self.latent_dim, self.n_clusters))

        p_c_z=K.exp(K.sum((K.log(theta_tensor3)-0.5*K.log(2*math.pi*lambda_tensor3)-\
                           K.square(Z-u_tensor3)/(2*lambda_tensor3)),axis=1))+1e-10
        assert p_c_z.shape[1:] == (
            self.n_clusters, ), 'p_c_z.shape[1:] {} != {}'.format(
                p_c_z.shape[1:], (self.n_clusters, ))
        return p_c_z / K.sum(p_c_z, axis=-1, keepdims=True)
Example #27
0
 def _call_multiplicative_emission(self, inputs):
     # e_{t, t'} = x_t^T W_a x_{t'} + b_a
     e = K.batch_dot(K.dot(inputs, self.Wa),
                     K.permute_dimensions(inputs, (0, 2, 1)))
     if self.use_attention_bias:
         e = e + self.ba
     return e
Example #28
0
def channel_shuffle(x):
    height, width, channels = x.shape.as_list()[1:]
    channels_per_split = channels // 2
    x = K.reshape(x, [-1, height, width, 2, channels_per_split])
    x = K.permute_dimensions(x, (0,1,2,4,3))
    x = K.reshape(x, [-1, height, width, channels])
    return x
Example #29
0
    def call(self, x):
        # soft-assignment.
        s = K.conv2d(x, self.kernel, padding='same') + self.bias
        print('s.shape=', s.shape)
        a = K.softmax(s)
        self.amap = K.argmax(a, -1)
        # print 'amap.shape', self.amap.shape

        # Dims used hereafter: batch, H, W, desc_coeff, cluster
        a = K.expand_dims(a, -2)
        # print 'a.shape=',a.shape

        # Core
        v = K.expand_dims(x, -1) + self.C
        # print 'v.shape', v.shape
        v = a * v
        # print 'v.shape', v.shape
        v = K.sum(v, axis=[1, 2])
        # print 'v.shape', v.shape
        v = K.permute_dimensions(v, pattern=[0, 2, 1])
        # print 'v.shape', v.shape
        #v.shape = None x K x D

        # Normalize v (Intra Normalization)
        v = K.l2_normalize(v, axis=-1)
        v = K.batch_flatten(v)
        v = K.l2_normalize(v, axis=-1)

        # return [v, self.amap]
        return v
Example #30
0
def channel_shuffle(x, groups):
    """
    Parameters
    ----------
    x:
        Input tensor of with `channels_last` data format
    groups: int
        number of groups per channel
    Returns
    -------
        channel shuffled output tensor
    Examples
    --------
    Example for a 1D Array with 3 groups
    >>> d = np.array([0,1,2,3,4,5,6,7,8])
    >>> x = np.reshape(d, (3,3))
    >>> x = np.transpose(x, [1,0])
    >>> x = np.reshape(x, (9,))
    '[0 1 2 3 4 5 6 7 8] --> [0 3 6 1 4 7 2 5 8]'
    """
    height, width, in_channels = x.shape.as_list()[1:]
    #handle dynamic image shape case
    if height is None:
        height = K.shape(x)[1]
    if width is None:
        width = K.shape(x)[2]
    channels_per_group = in_channels // groups

    x = K.reshape(x, [-1, height, width, groups, channels_per_group])
    x = K.permute_dimensions(x, (0, 1, 2, 4, 3))  # transpose
    x = K.reshape(x, [-1, height, width, in_channels])

    return x