예제 #1
0
    def call(self, input_tensor):
        """
        target: batch x  ih x iw (queryL=ihxiw) x idf
        source: batch x sourceL(seq_len) x idf
        mask: batch x sourceL 
            -inf or 0 が入っている
        """
        target, source, mask = input_tensor
        idf = self.output_dim
        ih = self.ih
        iw = self.iw
        queryL = self.queryL
        sourceL = self.sourceL

        # --> batch x queryL x idf
        target = K.reshape(target, (-1, queryL, idf))
        # --> batch x idf x sourceL
        sourceT = ktf.transpose(source, perm=[0, 2, 1])
        # Get attention
        # (batch x queryL x idf)(batch x idf x sourceL)
        # -->batch x queryL x sourceL
        attn = ktf.matmul(target, sourceT)
        addmask = K.switch(
            self.use_mask,
            lambda: K.repeat_elements(
                K.reshape(mask, (-1, 1, sourceL)), rep=queryL, axis=1),
            lambda: 0.0)
        attn = attn + addmask
        attn = K.softmax(attn)
        # (batch x queryL x sourceL)(batch x sourceL x idf)
        # --> batch x queryL x idf
        weightedContext = ktf.matmul(attn, source)
        weightedContext = K.reshape(weightedContext, (-1, ih, iw, idf))
        attn = K.reshape(attn, (-1, ih, iw, sourceL))  #計算ではこの後未使用
        return [weightedContext, attn]
예제 #2
0
def max_singular_val(w,
                     u,
                     fully_differentiable=False,
                     ip=1,
                     transpose=lambda x: K.transpose(x)):
    if not fully_differentiable:
        w_ = K.stop_gradient(w)
    else:
        w_ = w
    u = K.expand_dims(u, axis=-1)

    u_bar = u
    for _ in range(ip):
        v_bar = ktf.matmul(transpose(w_), u_bar)
        v_bar = K.l2_normalize(v_bar, axis=(-1, -2))

        u_bar_raw = ktf.matmul(w_, v_bar)
        u_bar = K.l2_normalize(u_bar_raw, axis=(-1, -2))
    sigma = ktf.matmul(transpose(u_bar), ktf.matmul(w, v_bar))

    sigma = K.squeeze(sigma, axis=-1)
    sigma = K.squeeze(sigma, axis=-1)

    u_bar = K.squeeze(u_bar, axis=-1)
    return sigma, u_bar
 def get_inv_sqrt(ff):
     with ktf.device('/cpu:0'):
         S, U, _ = ktf.svd(ff + ktf.eye(c) * self.epsilon,
                           full_matrices=True)
     D = ktf.diag(ktf.pow(S, -0.5))
     inv_sqrt = ktf.matmul(D, U, transpose_b=True)
     D = ktf.diag(ktf.pow(S, 0.5))
     sqrt = ktf.matmul(D, U, transpose_b=True)
     return sqrt, inv_sqrt
예제 #4
0
def _upsampled_dft(data, upsampled_region_size, upsample_factor, axis_offsets):
    """
    Upsampled DFT by matrix multiplication.
    This code is intended to provide the same result as if the following
    operations were performed:
        - Embed the array "data" in an array that is ``upsample_factor`` times
          larger in each dimension.  ifftshift to bring the center of the
          image to (1,1).
        - Take the FFT of the larger array.
        - Extract an ``[upsampled_region_size]`` region of the result, starting
          with the ``[axis_offsets+1]`` element.
    It achieves this result by computing the DFT in the output array without
    the need to zeropad. Much faster and memory efficient than the zero-padded
    FFT approach if ``upsampled_region_size`` is much smaller than
    ``data.size * upsample_factor``.
    Parameters
    ----------
    data : 2D ndarray
        The input data array (DFT of original data) to upsample.
    upsampled_region_size : integer or tuple of integers, optional
        The size of the region to be sampled.  If one integer is provided, it
        is duplicated up to the dimensionality of ``data``.
    upsample_factor : integer, optional
        The upsampling factor.  Defaults to 1.
    axis_offsets : tuple of integers, optional
        The offsets of the region to be sampled.  Defaults to None (uses
        image center)
    Returns
    -------
    output : 2D ndarray
            The upsampled DFT of the specified region.
    """
    data_shape = tf.shape(data)

    col_kernel = _col_kernel(upsampled_region_size, upsample_factor,
                             axis_offsets, data_shape)
    row_kernel = _row_kernel(upsampled_region_size, upsample_factor,
                             axis_offsets, data_shape)

    upsampled_dft = tf.matmul(tf.matmul(row_kernel, data), col_kernel)

    return upsampled_dft
예제 #5
0
파일: utils.py 프로젝트: liatli/deepposekit
def check_angles(x, rotation_guess):
    x = tf.reshape(x, (-1, 1))
    x = angle_mod(x)
    rA = radians(x)
    rA = tf.concat([tf.cos(rA), tf.sin(rA)], axis=-1)
    rI = tf.reshape(rotation_guess, (-1, 1))
    rI = radians(rI)
    rI = tf.concat([tf.cos(rI), tf.sin(rI)], axis=-1)
    guess_test = tf.matmul(rA, rI, transpose_b=True)
    x = tf.where(guess_test < 0, angle_mod(x - 180), x)
    return x
        def train():
            ff_apr = ktf.matmul(f, f, transpose_b=True) / (
                ktf.cast(bs * w * h, ktf.float32) - 1.)
            if self.decomposition in ['pca-cor', 'zca-cor']:
                dinv = ktf.diag(ktf.sqrt(ktf.diag_part(ff_apr)))
                ff_apr = ktf.matmul(ktf.matmul(dinv, ff_apr),
                                    ktf.matrix_inverse(dinv),
                                    transpose_b=True)
            self.add_update([
                K.moving_average_update(self.moving_mean, m, self.momentum),
                K.moving_average_update(self.moving_cov, ff_apr, self.momentum)
            ], inputs)
            ff_apr_shrinked = (
                1 - self.epsilon) * ff_apr + ktf.eye(c) * self.epsilon

            if self.renorm:
                l, l_inv = get_inv_sqrt(ff_apr_shrinked)
                ff_mov = (1 - self.epsilon
                          ) * self.moving_cov + ktf.eye(c) * self.epsilon
                _, l_mov_inverse = get_inv_sqrt(ff_mov)
                l_ndiff = K.stop_gradient(l)
                return ktf.matmul(ktf.matmul(l_mov_inverse, l_ndiff), l_inv)

            return get_inv_sqrt(ff_apr_shrinked)[1]
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        _, w, h, c = input_shape
        bs = K.shape(inputs)[0]

        #if c < self.group:
        #    raise ValueError('Input channels should be larger than group size' +
        #                     '; Received input channels: ' + str(c) +
        #                     '; Group size: ' + str(self.group)
        #                    )
        #x = K.reshape(inputs, (batch_size, h, w, self.group, c // self.group))

        x_t = ktf.transpose(inputs, (3, 0, 1, 2))
        #x_t = ktf.transpose(x, (3, 4, 0, 1, 2))

        # BxCxHxW -> CxB*H*W
        x_flat = ktf.reshape(x_t, (c, -1))

        # Covariance
        m = ktf.reduce_mean(x_flat, axis=1, keepdims=True)
        m = K.in_train_phase(m, self.moving_mean)
        f = x_flat - m

        if self.decomposition == 'cholesky':

            def get_inv_sqrt(ff):
                sqrt = ktf.cholesky(ff)
                inv_sqrt = ktf.matrix_triangular_solve(sqrt, ktf.eye(c))
                return sqrt, inv_sqrt
        elif self.decomposition in ['zca', 'zca-cor']:

            def get_inv_sqrt(ff):
                with ktf.device('/cpu:0'):
                    S, U, _ = ktf.svd(ff + ktf.eye(c) * self.epsilon,
                                      full_matrices=True)
                D = ktf.diag(ktf.pow(S, -0.5))
                inv_sqrt = ktf.matmul(ktf.matmul(U, D), U, transpose_b=True)
                D = ktf.diag(ktf.pow(S, 0.5))
                sqrt = ktf.matmul(ktf.matmul(U, D), U, transpose_b=True)
                return sqrt, inv_sqrt
        elif self.decomposition in ['pca', 'pca-cor']:

            def get_inv_sqrt(ff):
                with ktf.device('/cpu:0'):
                    S, U, _ = ktf.svd(ff + ktf.eye(c) * self.epsilon,
                                      full_matrices=True)
                D = ktf.diag(ktf.pow(S, -0.5))
                inv_sqrt = ktf.matmul(D, U, transpose_b=True)
                D = ktf.diag(ktf.pow(S, 0.5))
                sqrt = ktf.matmul(D, U, transpose_b=True)
                return sqrt, inv_sqrt
        else:
            assert False

        def train():
            ff_apr = ktf.matmul(f, f, transpose_b=True) / (
                ktf.cast(bs * w * h, ktf.float32) - 1.)
            if self.decomposition in ['pca-cor', 'zca-cor']:
                dinv = ktf.diag(ktf.sqrt(ktf.diag_part(ff_apr)))
                ff_apr = ktf.matmul(ktf.matmul(dinv, ff_apr),
                                    ktf.matrix_inverse(dinv),
                                    transpose_b=True)
            self.add_update([
                K.moving_average_update(self.moving_mean, m, self.momentum),
                K.moving_average_update(self.moving_cov, ff_apr, self.momentum)
            ], inputs)
            ff_apr_shrinked = (
                1 - self.epsilon) * ff_apr + ktf.eye(c) * self.epsilon

            if self.renorm:
                l, l_inv = get_inv_sqrt(ff_apr_shrinked)
                ff_mov = (1 - self.epsilon
                          ) * self.moving_cov + ktf.eye(c) * self.epsilon
                _, l_mov_inverse = get_inv_sqrt(ff_mov)
                l_ndiff = K.stop_gradient(l)
                return ktf.matmul(ktf.matmul(l_mov_inverse, l_ndiff), l_inv)

            return get_inv_sqrt(ff_apr_shrinked)[1]

        def test():
            ff_mov = (
                1 - self.epsilon) * self.moving_cov + ktf.eye(c) * self.epsilon
            return get_inv_sqrt(ff_mov)[1]

        inv_sqrt = K.in_train_phase(train, test)
        f_hat = ktf.matmul(inv_sqrt, f)

        decorelated = K.reshape(f_hat, [c, bs, w, h])
        decorelated = ktf.transpose(decorelated, [1, 2, 3, 0])

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]
        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            decorelated = decorelated * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            decorelated = decorelated + broadcast_beta

        return decorelated
예제 #8
0
파일: utils.py 프로젝트: liatli/deepposekit
def gaussian_kernel_2d(size, sigma):
    kernel = gaussian_kernel_1d(size, sigma)
    kernel = tf.matmul(kernel, kernel, transpose_b=True)
    return kernel