예제 #1
0
    def forward(self, input, finished=None):
        gamma_h = torch.tanh(self.linear_2(input))
        weights = torch.sum(self.phi_hidden_state * gamma_h,
                            dim=2,
                            keepdim=True)
        weights = torch.exp(weights -
                            torch.max(weights, dim=0, keepdim=True)[0])
        weights = torch.divide(
            weights, (1e-6 + torch.sum(weights, dim=0, keepdim=True)))
        if self.field_size is not None:
            alpha_h = torch.tanh(self.linear_5(input))
            field_weights = torch.sum(self.phi_field_state * alpha_h,
                                      dim=2,
                                      keepdim=True)
            field_weights = torch.exp(
                field_weights -
                torch.max(field_weights, dim=0, keepdim=True)[0])
            field_weights = torch.divide(
                field_weights,
                (1e-6 + torch.sum(field_weights, dim=0, keepdim=True)))
            weights = torch.divide(
                weights * field_weights,
                (1e-6 +
                 torch.sum(weights * field_weights, dim=0, keepdim=True)))

        context = torch.sum(self.hidden_state * weights, dim=0)
        out = self.linear_3(torch.cat([context, input], -1))

        if finished is not None:
            out = torch.where(finished, torch.zeros_like(out), out)
        return out, weights
예제 #2
0
    def _label_attention(self, x):
        x_hat = torch.divide(x, torch.norm(x, dim=2, keepdim=True) + 1e-3)
        l_hat = torch.divide(self.C,
                             torch.norm(self.C, dim=1, keepdim=True) + 1e-3)

        G_hat = torch.matmul(l_hat.unsqueeze(dim=0), x_hat.permute(0, 2, 1))
        return G_hat
예제 #3
0
def compute_wass_dist(a,
                      b,
                      M,
                      reg,
                      weights=None,
                      numItermax=1000,
                      stopThr=1e-4,
                      verbose=False,
                      log=False):
    A = torch.vstack((a, b)).T
    if weights is None:
        weights = torch.ones(A.shape[1], dtype=torch.float64,
                             device=device) / A.shape[1]
    else:
        assert (len(weights) == 2)

    if log:
        log = {'err': []}

    # M = M/np.median(M) # suggested by G. Peyre
    #print (M)
    K = torch.exp(torch.div(-M, reg))

    #print (K)
    cpt = 0
    err = 1
    # print (K.shape)
    # print (torch.divide(A.T, torch.sum(K, axis=0)).T.shape)
    UKv = torch.mm(K, torch.divide(A.T, torch.sum(K, axis=0)).T)
    u = (geometricMean(UKv) / UKv.T).T

    while (err > stopThr and cpt < numItermax):
        cpt = cpt + 1
        UKv = u * torch.mm(K, torch.divide(A, torch.mm(K, u)))
        u = (u.T * geometricBar2(weights, UKv)).T / UKv
        # print (u.shape)

        if cpt % 10 == 1:
            err = torch.sum(torch.std(UKv, axis=1))

            # log and verbose print
            if log:
                log['err'].append(err)

            if verbose:
                if cpt % 200 == 0:
                    print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' +
                          '-' * 19)
                print('{:5d}|{:8e}|'.format(cpt, err))

    if log:
        log['niter'] = cpt
        return geometricBar2(weights, UKv), log
    else:
        result = geometricBar2(weights, UKv)
        #print(torch.sum(result*K))
        print(reg * torch.sum(result))
        #print((torch.add(torch.log(result), -1)))
        return torch.sum(result * M)
def equi_basic(name, input_depth, output_depth, inputs):
    '''
    :param name: name of layer
    :param input_depth: D
    :param output_depth: S
    :param inputs: N x D x m x m tensor
    :return: output: N x S x m x m tensor
    '''
    basis_dimension = 4
#     with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:

    # initialization values for variables
    coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
#     coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
    #coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
    # define variables
    coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
#     coeffs = tf.get_variable('coeffs', initializer=coeffs_values)

    m = inputs.shape[3].type(torch.IntTensor)  # extract dimension
#     m = tf.to_int32(tf.shape(inputs)[3])  # extract dimension
    float_dim = m.type(torch.FloatTensor)
#     float_dim = tf.to_float(m)


    # apply ops
    ops_out = []
    # w1 - identity
    ops_out.append(inputs)
    # w2 - sum cols
    sum_of_cols = torch.divide(torch.sum(inputs, dim=2), float_dim)  # N x D x m
#     sum_of_cols = tf.divide(tf.reduce_sum(inputs, axis=2), float_dim)  # N x D x m
    ops_out.append(torch.unsqueeze(sum_of_cols, dim=2).repeat(1, 1, m, 1))  # N x D x m x m
#     ops_out.append(tf.tile(tf.expand_dims(sum_of_cols, axis=2), [1, 1, m, 1]))  # N x D x m x m
    # w3 - sum rows
    sum_of_rows = torch.divide(torch.sum(inputs, dim=3), float_dim)  # N x D x m
#     sum_of_rows = tf.divide(tf.reduce_sum(inputs, axis=3), float_dim)  # N x D x m
    ops_out.append(torch.unsqueeze(sum_of_rows, dim=3).repeat(1, 1, 1, m))  # N x D x m x m
#     ops_out.append(tf.tile(tf.expand_dims(sum_of_rows, axis=3), [1, 1, 1, m]))  # N x D x m x m
    # w4 - sum all
    sum_all = torch.divide(torch.sum(sum_of_rows, dim=2), torch.square(float_dim))  # N x D
#     sum_all = tf.divide(tf.reduce_sum(sum_of_rows, axis=2), tf.square(float_dim))  # N x D
    ops_out.append(torch.unsqueeze(torch.unsqueeze(sum_all, dim=2), dim=3).repeat(1, 1, m, m))  # N x D x m x m
#     ops_out.append(tf.tile(tf.expand_dims(tf.expand_dims(sum_all, axis=2), axis=3), [1, 1, m, m]))  # N x D x m x m

    ops_out = torch.stack(ops_out, dim=2)
#     ops_out = tf.stack(ops_out, axis=2)
    output = torch.einsum('dsb,ndbij->nsij', coeffs, ops_out)  # N x S x m x m
#     output = tf.einsum('dsb,ndbij->nsij', coeffs, ops_out)  # N x S x m x m

    # bias
    bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=True)
#     bias = tf.get_variable('bias', initializer=tf.zeros([1, output_depth, 1, 1], dtype=tf.float32))
    output = output + bias

    return output
예제 #5
0
def cal_loss(fs_list, ft_list, criterion):
    tot_loss = 0
    for i in range(len(ft_list)):
        fs = fs_list[i]
        ft = ft_list[i]
        _, _, h, w = fs.shape
        fs_norm = torch.divide(fs, torch.norm(fs, p=2, dim=1, keepdim=True))
        ft_norm = torch.divide(ft, torch.norm(ft, p=2, dim=1, keepdim=True))
        f_loss = (0.5 / (w * h)) * criterion(fs_norm, ft_norm)
        tot_loss += f_loss
    return tot_loss
예제 #6
0
파일: aux.py 프로젝트: jan-v97/pytorch
def get_regularized_slice_dist_matrix(x,y,z,reg):
    points1 =  torch.vstack((torch.arange(x, device=device, dtype=torch.float32),torch.zeros(x, device=device))).T
    dist_matrix1 = (torch.cdist(points1,points1)/(x-1))**2
    K1 = torch.divide(dist_matrix1, -reg)
    torch.exp(K1, out=K1)
    
    points2 = torch.vstack((torch.arange(y, device=device, dtype=torch.float32),torch.zeros(y, device=device))).T
    dist_matrix2 = (torch.cdist(points2,points2)/(y-1))**2
    K2 = torch.divide(dist_matrix2, -reg)
    torch.exp(K2, out=K2)
    return K1,K2
예제 #7
0
def var(input: Tensor,
        dim: DimOrDims = None,
        unbiased: Optional[bool] = False,
        *,
        keepdim: Optional[bool] = False,
        dtype: Optional[DType] = None,
        mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

The identity value of sample variance operation is undefined.  The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype
        if not (dtype.is_floating_point or dtype.is_complex):
            dtype = torch.float32
    compute_dtype = dtype
    if not (compute_dtype.is_floating_point or compute_dtype.is_complex):
        compute_dtype = torch.float32
    if input.layout == torch.strided:
        inmask = _input_mask(input, mask=mask)
        count = sum(inmask.new_ones(input.shape, dtype=torch.int64),
                    dim,
                    keepdim=True,
                    mask=inmask)
        sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask)
        # TODO: replace torch.subtract/divide/square/maximum with
        # masked subtract/divide/square/maximum when these will be
        # available.
        sample_mean = torch.divide(sample_total, count)
        x = torch.subtract(input, sample_mean)
        total = sum(x * x.conj(),
                    dim,
                    keepdim=keepdim,
                    dtype=compute_dtype,
                    mask=inmask)
        if not keepdim:
            count = count.reshape(total.shape)
        if unbiased:
            count = torch.subtract(count, 1)
            count = torch.maximum(count, count.new_zeros([]))
        return torch.divide(total, count).to(dtype=dtype)
    else:
        raise ValueError(
            f'masked var expects strided tensor (got {input.layout} tensor)')
예제 #8
0
def cal_anomaly_map(fs_list, ft_list, out_size=256):
    pdist = torch.nn.PairwiseDistance(p=2, keepdim=True)
    anomaly_map = torch.ones([ft_list[0].shape[0], 1, out_size,
                              out_size]).to(device)
    for i in range(len(ft_list)):
        fs = fs_list[i]
        ft = ft_list[i]
        fs_norm = torch.divide(fs, torch.norm(fs, p=2, dim=1, keepdim=True))
        ft_norm = torch.divide(ft, torch.norm(ft, p=2, dim=1, keepdim=True))
        a_map = 0.5 * pdist(fs_norm, ft_norm)**2
        a_map = F.interpolate(a_map, size=out_size, mode='bilinear')
        anomaly_map *= a_map
    return anomaly_map
    def update(self, X, y, learning_rate):
        z = torch.mul(self.forward(X), y)
        
        grad_W = torch.divide(torch.mul(X, y), (1 + torch.exp(z)))
        grad_b = torch.divide(y, 1 + torch.exp(z))

        grad_W = -1 * torch.mean(grad_W, dim=0).unsqueeze(dim=1)
        grad_b = -1 * torch.mean(grad_b)

        self.W -= learning_rate * grad_W
        self.b -= learning_rate * grad_b
        
        return grad_W, grad_b
예제 #10
0
def elbo_grad(Gl, logWl):
    nodes = list(
        set([
            list(G.keys())[i] for G in Gl for i in range(len(list(G.keys())))
        ]))
    g_hat = {}
    L = len(logWl)
    for node in nodes:
        F = []
        for l in range(L):
            if node in list(Gl[l].keys()):
                F.append(torch.multiply(Gl[l][node], logWl[l]))
            else:
                F.append(torch.tensor([0., 0.]))
                Gl[l][node] = torch.tensor([0., 0.])
        b_hat = sum([
            np.cov(F[l].detach(), Gl[l][node].detach())[0, 1] for l in range(L)
        ]) / sum([
            np.var([Gl[l][node][i].detach().numpy() for l in range(L)])
            for i in range(len(Gl[l][node]))
        ])
        # b_hat = 0

        temp = [np.multiply(-1 * b_hat, Gl[l][node]) for l in range(L)]
        Ftemp = [torch.add(F[i], temp[i]) for i in range(len(F))]
        sumFtemp = torch.stack(Ftemp, dim=0).sum(dim=0)
        g_hat[node] = torch.divide(sumFtemp, L)
    return g_hat
예제 #11
0
    def forward(self, x, adj):

        feature_dim = int(adj.shape[-1])
        eye = torch.eye(feature_dim).cuda()

        if x is None:
            AXW = torch.tensordot(
                adj, self.kernel,
                [[-1], [0]])  # batch_size * num_node * feature_dim
        else:
            XW = torch.tensordot(
                x, self.kernel, [[-1], [0]])  # batch *  num_node * feature_dim
            AXW = torch.matmul(adj, XW)  # batch *  num_node * feature_dim

        I_cAXW = eye + self.c * AXW
        y_relu = torch.nn.functional.relu(I_cAXW)
        temp = torch.mean(input=y_relu, dim=-2, keepdim=True) + 1e-6
        col_mean = temp.repeat([1, feature_dim, 1])
        y_norm = torch.divide(y_relu, col_mean)
        output = torch.nn.functional.softplus(y_norm)
        if self.neg_penalty != 0:
            neg_loss = torch.multiply(
                torch.tensor(self.neg_penalty),
                torch.sum(torch.nn.functional.relu(1e-6 - self.kernel)))
            self.losses.append(neg_loss)
        return output
예제 #12
0
    def step(self, reward, act_scalar=1.0, alpha=0.2, epsilon=0.5):
        # Update rewards
        self.rewards[self.offsets + self.indices] += torch.divide(
            reward - self.rewards[self.offsets + self.indices],
            self.divs[self.offsets + self.indices])
        self.divs[self.offsets + self.indices] += alpha

        # Find new indices
        self.indices = (
            torch.argmax(self.rewards.reshape(
                (self.num_params, self.resolution)),
                         dim=1).float() +
            torch.randn(self.num_params, device=self.device) * epsilon +
            0.5).long().clamp(min=0, max=self.resolution - 1)

        # Generate new parameters
        start_index = 0

        for p in self.module.parameters():
            p.data = (
                act_scalar *
                (self.indices[start_index:start_index + p.numel()].float() /
                 self.resolution * 2.0 - 1.0)).reshape(p.shape)

            start_index += p.numel()
예제 #13
0
def masked_mse_loss(y_pred, y_true, null_val):
    mask = torch.ne(y_true, null_val).float()
    mask = torch.divide(mask, torch.mean(mask))
    loss = torch.square(y_pred - y_true)
    loss = torch.mul(loss, mask)
    loss[torch.isnan(loss)] = 0.
    return torch.mean(loss)
예제 #14
0
def create_torch_stft(samples, FFT=False):
    '''
        ARGUMENTS
            > samples - a pytorch tensor of dimensions (# samples, sample length)
            > FFT - if True, FFT is calculated instead of STFT
        RETURNS
            > spec - a tensor of dimension (# samples, frequency bins)
        '''
    if not FFT:
        # Create Spectrogram
        spec = torch.stft(samples,
                          n_fft=2048,
                          hop_length=512,
                          normalized=True,
                          return_complex=True).abs()
        # Collapse spectrogram by summing along the time axis
        spec = torch.sum(spec, dim=2)
    else:
        spec = torch.fft.fft(samples, dim=1).abs()
        spec -= spec.min(1, keepdim=True)[0]
        spec /= spec.max(1, keepdim=True)[0]

    # Normalize
    spec_max = torch.max(spec, axis=1)[0].unsqueeze(1).repeat(1, spec.shape[1])
    spec_min = torch.min(spec, axis=1)[0].unsqueeze(1).repeat(1, spec.shape[1])
    spec = torch.subtract(spec, spec_min)
    spec = torch.divide(spec, torch.subtract(spec_max, spec_min))
    spec = spec[:, :11025]
    return spec.type(torch.float32)
예제 #15
0
def error_fn_l2_normalized(predictions, dataset):
    actual = dataset.X
    errors = predictions - actual
    error_norms = torch.linalg.norm(torch.tensor(errors), dim=-1, ord=2)
    actual_norms = torch.linalg.norm(torch.tensor(actual), dim=-1, ord=2)
    normalized_errors = torch.divide(error_norms, actual_norms)
    return normalized_errors
예제 #16
0
def get_group_delay(
    raw_data: torch.Tensor,
    sampling_rate_in_hz: int,
    window_length_in_s: float,
    window_shift_in_s: float,
    num_fft_points: int,
    window_type: str,
):
    X_stft_transform = _get_stft(raw_data,
                                 sampling_rate_in_hz,
                                 window_length_in_s,
                                 window_shift_in_s,
                                 num_fft_points,
                                 window_type=window_type)
    Y_stft_transform = _get_stft(
        raw_data,
        sampling_rate_in_hz,
        window_length_in_s,
        window_shift_in_s,
        num_fft_points,
        window_type=window_type,
        data_transformation="group_delay",
    )
    X_stft_transform_real = torch.real(X_stft_transform)
    X_stft_transform_imag = torch.imag(X_stft_transform)
    Y_stft_transform_real = torch.real(Y_stft_transform)
    Y_stft_transform_imag = torch.imag(Y_stft_transform)
    nominator = torch.multiply(
        X_stft_transform_real, Y_stft_transform_real) + torch.multiply(
            X_stft_transform_imag, Y_stft_transform_imag)
    denominator = torch.square(torch.abs(X_stft_transform))
    group_delay = torch.divide(nominator, denominator + 1e-10)
    assert not torch.isnan(
        group_delay).any(), "There are NaN values in group delay"
    return torch.transpose(group_delay, 0, 1)
예제 #17
0
def topk3d(tensor, k=1):
  n = tensor.size(0)
  c = tensor.size(1)
  d = tensor.size(2)
  idx = tensor.contiguous().view(n, -1).topk(k, dim=1)
  return torch.cat((torch.divide(idx, d**2, rounding_mode='trunc').view(-1, 1),
                    (idx % d**2 / d).view(-1, 1),
                    (idx % d**2 / d).view(-1, 1)), dim=1)
예제 #18
0
def sample2d(tensor, k=1):
  if tensor.dim() == 2:
    n = 1
    d = tensor.size(-1)
  else:
    n = tensor.size(0)
    d = tensor.size(-1)
  idx =  torch.multinomial(tensor.reshape(n, -1), k)
  return torch.cat((torch.divide(idx, d, rounding_mode='trunc').view(-1, 1), (idx % d).view(-1, 1)), dim=1)
예제 #19
0
    def batch_top_k(batch, k):
        bsz, dim = batch.shape
        flatten = batch.flatten()
        top_values, top_ids = torch.topk(flatten, k=k)

        batch_ids = torch.divide(top_ids, dim, rounding_mode="trunc")
        dim_ids = torch.remainder(top_ids, dim)

        return top_values, batch_ids, dim_ids
예제 #20
0
def tanimoto_loss(label, pred):
    square = torch.square(pred)
    sum_square = torch.sum(square)
    product = torch.multiply(pred, label)
    sum_product = torch.sum(product)
    denomintor = torch.subtract(torch.add(sum_square, 1), sum_product)
    loss = torch.divide(sum_product, denomintor)
    loss = torch.reduce_mean(loss)
    return 1.0 - loss
예제 #21
0
    def forward(self, inputs):

        inputs = inputs.permute(0, 2, 3, 4, 1).contiguous()
        input_shape = inputs.shape

        # Flatten input
        flat_inputs = inputs.view(-1, self._embedding_dim)

        dist = (
            torch.sum(flat_inputs.detach()**2, dim=1, keepdims=True) +
            torch.sum(self._embedding.weight**2, dim=1)) - 2 * torch.matmul(
                flat_inputs.detach(), self._embedding.weight.t())

        sij_numer = (1 + dist / self.df).pow(-0.5 * (self.df + 1))
        sij_denom = sij_numer.sum(dim=1, keepdims=True)

        sij = torch.divide(sij_numer, sij_denom)

        sij_Kappa = sij.pow(self.Kappa)

        sumi_sij = sij.sum(dim=0, keepdims=True)

        tij_numer = torch.divide(sij_Kappa, sumi_sij)
        tij_denom = tij_numer.sum(dim=1, keepdims=True)

        tij = torch.divide(tij_numer, tij_denom)

        log_sij = torch.log(sij)
        log_tij = torch.log(tij)

        CAH_loss = torch.sum(torch.multiply(tij, log_tij - log_sij))

        log_sie = torch.matmul(log_sij, self.adjacency)

        sumj_sij_log_sie = torch.sum(torch.multiply(sij, log_sie), dim=1)

        SOM_loss = -sumj_sij_log_sie.mean()
        total_loss = self.gamma * CAH_loss + self.beta * SOM_loss

        return ({
            "CAH_loss": CAH_loss,
            "SOM_loss": SOM_loss,
            "vq_loss": total_loss
        })
예제 #22
0
def std_var(input: Tensor,
            dim: DimOrDims = None,
            unbiased: Optional[bool] = False,
            *,
            keepdim: Optional[bool] = False,
            dtype: Optional[DType] = None,
            mask: Optional[Tensor] = None,
            take_sqrt: Optional[bool] = False) -> Tensor:
    if dtype is None:
        dtype = input.dtype
        if not (dtype.is_floating_point or dtype.is_complex):
            dtype = torch.float32
    compute_dtype = dtype
    if not (compute_dtype.is_floating_point or compute_dtype.is_complex):
        compute_dtype = torch.float32
    if input.layout == torch.strided:
        if mask is None:
            # TODO: compute count analytically
            count = sum(torch.ones(input.shape, dtype=torch.int64, device=input.device), dim, keepdim=True)
            sample_total = sum(input, dim, keepdim=True, dtype=dtype)
        else:
            inmask = _input_mask(input, mask=mask)
            count = sum(inmask.new_ones(input.shape, dtype=torch.int64), dim, keepdim=True, mask=inmask)
            sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask)
        # TODO: replace torch.subtract/divide/square/maximum with
        # masked subtract/divide/square/maximum when these will be
        # available.
        sample_mean = torch.divide(sample_total, count)
        x = torch.subtract(input, sample_mean)
        if mask is None:
            total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype)
        else:
            total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask)
        if not keepdim:
            count = count.reshape(total.shape)
        if unbiased:
            count = torch.subtract(count, 1)
            count = torch.maximum(count, count.new_zeros([]))
        output = torch.divide(total, count).to(dtype=dtype)
        if take_sqrt:
            output = torch.sqrt(output)
        return output
    else:
        raise ValueError(f'masked std/var expects strided tensor (got {input.layout} tensor)')
예제 #23
0
    def forward(self, x, a):
        """Performs spatial dilation of a periodic function by Fourier 
        transform. Uses the observation that the Fourier transform has the 
        following time scaling relation:

        f(x) <-----> F(k)

        f(ax) <----> 1/|a| F(k/a)

        Args:
            x (torch.tensor): Observations to be scaled. A tensor of shape 
            (n_batch, n_channels, n_grid_points). The scaling will be performed
            across the last dimension.
            a (float): Scaling parameter. x -> lambda * x
        """

        a_ = int(a)

        # Take Fourier transform
        x_dft = torch.fft.fft(x)

        # Scale the frequencies k -> k/a
        # Idea: we want a new DFT vector which has shape x_dft.shape[-1] * a
        # In this vector we want zeros for non-integer values of k/a and the
        # elements of x_dft for the integer values of k/a. We will do this via
        # 1d convolution with a kernel of zeros except for the first entry

        convo_kernel = torch.zeros(x_dft.shape[0], 1, a_, dtype=torch.cfloat)
        convo_kernel[:, 0, 0] = torch.ones_like(convo_kernel[:, 0, 0])
        convo_kernel.requires_grad = False

        print(convo_kernel)

        out_x_dft = torch.zeros(
            (x_dft.shape[0], x_dft.shape[1], a_ * x_dft.shape[-1]),
            dtype=torch.cfloat)
        # print(out_x_dft.shape)
        for i in range(x_dft.shape[-1]):
            lb = a_ * i
            ub = a_ * (i + 1)

            arg_1 = x_dft[:, :, i].view(x_dft.shape[0], x_dft.shape[1], 1)

            # ignoring the batch dimension, convo_kernel is of size (1, a)
            # and arg_1 is of size (n_channels, 1)
            out_x_dft[:, :, lb:ub] = torch.matmul(arg_1, convo_kernel)

        x_dft = out_x_dft
        # print(x_dft.shape)

        # Scale the whole thing by 1/|a|
        x = torch.divide(x_dft, np.abs(a))

        x_ifft = torch.fft.ifft(x)

        return x_ifft
예제 #24
0
def argmax4d(tensor):
  n = tensor.size(0)
  c1 = tensor.size(1)
  c2 = tensor.size(2)
  d = tensor.size(3)
  idx = tensor.contiguous().view(n, -1).topk(k, dim=1)
  return torch.cat((torch.divide(idx, d**2 * c2, rounding_mode='trunc').view(-1, 1),
                    (idx % (d**2 *c2) / d**2).view(-1, 1),
                    (((idx % (d**2 * c2)) % d**2) / d).view(-1, 1),
                    (((idx % (d**2 * c2)) % d**2) % d).view(-1, 1)), dim=1)
예제 #25
0
def average_precision(predictions, ground_truth, iou_threshold):
    # return the average precision of a class
    # predictions [train_idx, pred_class, confidence, x, y, w, h]
    predictions.sort(key=lambda x: x[2], reverse=True)

    TP_ = torch.zeros((len(predictions)))
    FP_ = torch.zeros((len(predictions)))

    gt_used = torch.zeros((len(ground_truth)))
    for pred_id, pred in enumerate(predictions):
        # get the label in same image
        # print("prediction: " + str(pred))
        gt_ = []
        for bid, bbox in enumerate(ground_truth):
            if int(bbox[0]) == int(pred[0]) and int(gt_used[bid]) == 0:
                nbb = [bid] + bbox
                # print(nbb)
                gt_.append(nbb)

        if len(gt_) == 0:
            FP_[pred_id] = 1
            continue

        best_iou = 0
        best_id = 0
        for gt in gt_:
            iou_ = intersection_over_union(torch.tensor(pred[3:]),
                                           torch.tensor(gt[4:]),
                                           box_format="midpoint")
            # print("iou_: " + str(iou_))
            if iou_ > best_iou:
                best_iou = iou_
                best_id = gt[0]

        if best_iou >= iou_threshold and int(
                gt_used[best_id]) == 0:  # ground truth haven't checked
            TP_[pred_id] = 1
            gt_used[best_id] = 1
        else:
            FP_[pred_id] = 1

    TP_cumsum = torch.cumsum(TP_, dim=0)
    FP_cumsum = torch.cumsum(FP_, dim=0)
    rec = TP_cumsum / (len(ground_truth) + 1e-6)
    pre = torch.divide(TP_cumsum, (TP_cumsum + FP_cumsum + 1e-6))
    pre = torch.cat((torch.tensor([1]), pre))
    rec = torch.cat((torch.tensor([0]), rec))
    # print("gt_used: " + str(gt_used))
    # print("pre: " + str(pre))
    # print("rec: " + str(rec))
    auc = torch.trapz(pre, rec, dim=-1).item()
    # print("AP: " + str(auc))

    return auc
예제 #26
0
    def forward(self, x, frequencies, x_grid):
        # Input has size (n_batch, n_channels, n_x_points)
        n_batch, n_channels, n_x_points = x.shape
        x = x.type(torch.cfloat)
        exp_argument = torch.mul(frequencies, 1j * 2 * np.pi)
        exp_multiplicand = torch.exp(torch.outer(x_grid, exp_argument))

        # x has shape b,c,s and exp_multiplicand has shape f,s and we want
        # output of shape b,c,f
        out = torch.einsum('bcs,fs->bcf', x, exp_multiplicand)

        out = torch.divide(out, n_x_points)

        return out
예제 #27
0
 def mean_average_precision(self, y_que, X_que, y_pool, X_pool):
     if self.rank == None:
         raise ("rank function is not provided.")
     n_que = len(y_que)
     n_pool = len(y_pool)
     ap = th.zeros(n_que, device=self.device)
     for i in range(n_que):
         y = y_que[i]
         ranks = self.rank(X_que[i], X_pool)
         rel = y_pool[ranks] == y
         pre_k = th.cumsum(rel, dim=0) / th.arange(
             1, n_pool + 1, device=self.device)
         ap[i] = th.divide(th.sum(pre_k * rel), th.sum(rel))
     return th.nansum(ap) / (n_que - th.sum(th.isnan(ap)))
예제 #28
0
    def forward(ctx,
                input,
                scale,
                scale_grad,
                thd_neg,
                thd_pos,
                act_mode=True):
        # scale grad, thd_neg, thd_pos is scaler else tensor
        x_d_s = t.divide(input / scale)
        x_ = t.round(t.clamp(x_d_s, thd_neg, thd_pos))
        output = x_ * scale

        ctx.save_for_backward(x_d_s, x_, scale_grad, thd_neg, thd_pos,
                              act_mode)
        return output
def compute_affinity_matrix(batch_patches: torch.tensor) -> torch.tensor:
    """
    Function that computes the affinity matrix for every patch in a batch
    :param batch_patches: tensor with shape (batch_size, patch_width, patch_height, no_of_channels)
    :return affinity_matrix: torch.tensor containing the affinity Matrix for every patch in the batch
    """
    _, h, w, c = batch_patches.shape
    x1: torch.tensor = batch_patches.reshape(-1, h * w, c).unsqueeze(1)
    x2: torch.tensor = batch_patches.reshape(-1, h * w, c).unsqueeze(2)
    diff_image = torch.linalg.norm(x2 - x1, dim=-1)

    kernel = torch.topk(diff_image, h * w).values
    kernel = torch.mean(kernel[:, :, (h * w) // 4], dim=1)
    kernel = torch.reshape(kernel, (-1, 1, 1))
    affinity_matrix = torch.exp(-(torch.divide(diff_image, kernel)**2))
    return affinity_matrix
예제 #30
0
    def infer_full_image(self, input, C_out, kernel_size=256, stride=128):
        self.generator.eval()
        B, C, W, H = input.shape
        pad_W = kernel_size - W % kernel_size
        pad_H = kernel_size - H % kernel_size

        x, _, _ = compute_pyramid_patch_weight_loss(kernel_size, kernel_size)

        input = F.pad(input, (0, pad_H, 0, pad_W), mode="reflect").squeeze(0)
        _, W_pad, H_pad = input.shape
        patches = input.unfold(1, kernel_size,
                               stride).unfold(2, kernel_size, stride)

        c, n_w, n_h, w, h = patches.shape
        patches = patches.contiguous().view(c, -1, kernel_size, kernel_size)

        dataset = torch.utils.data.TensorDataset(patches.permute(1, 0, 2, 3))
        batch_size = 4
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=batch_size)
        op = []
        for batch_idx, sample1 in enumerate(dataloader):
            patch_op = self.generator(sample1[0])
            op.append(patch_op)
        op = torch.cat(op).permute(1, 0, 2, 3)

        op = op.permute(0, 2, 3, 1).reshape(1, -1, n_w * n_h)
        weights_op = (torch.from_numpy(x).unsqueeze(0).unsqueeze(-1).repeat(
            1, C_out, 1, n_w * n_h).reshape(1, -1, n_w * n_h)).cuda()
        op = torch.mul(weights_op, op)
        op = F.fold(
            op,
            output_size=(W_pad, H_pad),
            kernel_size=(kernel_size, kernel_size),
            stride=(stride, stride),
        )
        weights_op = F.fold(
            weights_op,
            output_size=(W_pad, H_pad),
            kernel_size=(kernel_size, kernel_size),
            stride=(stride, stride),
        )
        op = torch.divide(op, weights_op)

        output = op  #torch.clamp(op, 0.0, 1.0)
        output = output[:, :, :W, :H]
        return output