コード例 #1
0
ファイル: biolayer.py プロジェクト: dcasbol/biolearn_torch
def _compute_step_conv(inputs, synapses, stride, padding, dilation, p, delta,
                       k, eps):
    with torch.no_grad():
        prec = no_grad_tensor(1e-30)
        hid = synapses.shape[0]
        N = synapses.shape[1] * synapses.shape[2] * synapses.shape[
            3]  # inputs to neuron
        batch_size = inputs.shape[0]

        sig = synapses.sign()
        tot_input = F.conv2d(inputs,
                             sig * synapses.abs().pow(p - 1),
                             stride=stride,
                             padding=padding,
                             dilation=dilation)
        # tot_input=torch.matmul(sig*synapses.abs().pow(p-1), inputs.t())
        # [H,D] x [D,N] --> [H,N]

        tot_input_flat = tot_input.transpose(0, 1).contiguous().view(hid, -1)
        # tot_input [batch,hid,H2,W2] --> [hid,batch*H2*W2]
        Num = tot_input_flat.shape[1]  # batch*H2*W2
        idx_batch = torch.arange(Num)
        values = tot_input_flat.clone()
        y1 = torch.argmax(values, 0)
        y = y1
        for i in range(k - 1):
            values[y, idx_batch] = -1e10
            y = torch.argmax(values, 0)
        y2 = y
        yl = torch.zeros(hid,
                         Num)  # Por cada neurona, tantas veces como se ejecutó
        yl[y1, idx_batch] = 1.0  # 1 a la que se activó más
        yl[y2, idx_batch] = -delta

        xx = (yl * tot_input_flat).sum(1)  # [hid]
        # [hid,Num] x [Num,D] --> [hid,D] (acumula refuerzo por sinapsis)
        # Num ~ batch*H2*W2
        # D ~ C*k1*k2

        kernel_size = (synapses.shape[2], synapses.shape[3])
        blocks = F.unfold(inputs,
                          kernel_size,
                          stride=stride,
                          padding=padding,
                          dilation=dilation)
        blocks = blocks.transpose(2, 1).contiguous().view(-1, N)
        flat_synapses = synapses.view(hid, N)
        ds = torch.matmul(
            yl, blocks) - xx.view(hid, 1).repeat(1, N) * flat_synapses

        nc = ds.abs().max()
        if nc < prec:
            nc = prec
        return (eps * (ds / nc)).view(hid, -1, kernel_size[0], kernel_size[1])
コード例 #2
0
ファイル: squeeze.py プロジェクト: ubamba98/SRFlow-pytorch
    def forward(self, x, reverse=False):
        _check_input_dim(x)

        N, C, H, W = x.shape
        if not reverse:
            assert H%2 == 0 and W%2 == 0
            x = F.unfold(x, kernel_size=2, stride=2).view(N, -1, H//2, W//2)
        else:
            x = x.view(N, C, -1)
            x = F.fold(x, output_size=(H*2, W*2), kernel_size=2, stride=2)
        return x
コード例 #3
0
ファイル: cfpn_gsf.py プロジェクト: yougoforward/localUp
    def forward(self, c1,out):
        n,c,h,w =c1.size()
        key = self.key(c1) # n, 64, h, w
        query = self.query(c1)

        unfold_up_key = F.unfold(key, 3, 1, 1, 1).permute(0,2,1).view(n, h*w, -1, 3*3)
        # torch.nn.functional.unfold(input, kernel_size, dilation=1, padding=0, stride=1)
        energy = torch.matmul(query.view(n, -1, h*w).permute(0,2,1).unsqueeze(2), unfold_up_key).squeeze(2) #n,h*w,3x3
        att = torch.softmax(energy, dim=-1)
        out = F.interpolate(out, (h,w), **self._up_kwargs)
        att = self.att(out)
        unfold_out = F.unfold(out, 3, 1, 1, 1).permute(0,2,1).view(n, h*w, -1, 3*3)
        out = att*torch.matmul(unfold_out, att.unsqueeze(3)).squeeze(3).permute(0,2,1).view(n,-1,h,w) + (1-att)*out
        
        
        # refine_out = self.val(out)
        # unfold_out = F.unfold(refine_out, 3, 2, 2, 1).permute(0,2,1).view(n, h*w, -1, 3*3)
        # refine_out = torch.matmul(unfold_out, att.unsqueeze(3)).squeeze(3).permute(0,2,1).view(n,-1,h,w)
        # out = self.relu(out + self.project(refine_out))
        return out
コード例 #4
0
ファイル: models.py プロジェクト: Omri-L/HQ_SOD_repo
    def forward(self, x):
        image, depth = x

        re_im = resample_data(image, self.factor)
        re_dp = resample_data(depth, self.factor)

        imkernel, imoffset = self.ImageKernel(re_im)
        depthkernel, depthoffset = self.DepthKernel(re_dp)

        weight = imkernel * depthkernel
        offset = imoffset * depthoffset

        ps = nn.PixelShuffle(4)
        weight = ps(weight)
        offset = ps(offset)

        if self.residual:
            weight -= torch.mean(weight, 1).unsqueeze(1).expand_as(weight)
        else:
            weight /= torch.sum(weight, 1).unsqueeze(1).expand_as(weight)

        b, h, w = image.size(0), image.size(2), image.size(3)
        k = self.filter_size
        r = self.kernel_size
        hw = h * w

        # weighted average
        # (b, 2*r**2, h, w) -> (b*hw, r, r, 2)
        offset = offset.permute(0, 2, 3, 1).contiguous().view(b * hw, r, r, 2)
        # (b, r**2, h, w) -> (b*hw, r**2, 1)
        weight = weight.permute(0, 2, 3, 1).contiguous().view(b * hw, r * r, 1)

        # (b*hw, r, r, 2)
        grid = grid_generator(k, r, b * hw)
        grid = grid.to(self.device)
        coord = grid + offset
        coord = (coord / k * 2) - 1

        # (b, k**2, hw) -> (b*hw, 1, k, k)
        depth_col = F.unfold(depth, k, padding=k // 2).permute(
            0, 2, 1).contiguous().view(b * hw, 1, k, k)

        # (b*hw, 1, k, k), (b*hw, r, r, 2) => (b*hw, 1, r^2)
        depth_sampled = F.grid_sample(depth_col, coord).view(b * hw, 1, -1)

        # (b*w*h, 1, r^2) x (b*w*h, r^2, 1) => (b, 1, h, w)
        out = torch.bmm(depth_sampled, weight).view(b, 1, h, w)

        if self.residual:
            out += depth

        out_sig = self.sigmoid(out)

        return out, out_sig
コード例 #5
0
    def forward(self, input, weight):
        n, c, h, w = input.size()
        weight = weight.view(n, c, 9, h * w)
        weight = torch.abs(weight) / torch.sum(torch.abs(weight),
                                               dim=2).unsqueeze(2)

        x = input
        for i in range(min(self.max_step, max(h, w))):
            x = F.unfold(x, kernel_size=3, padding=1).view(n, c, 9, h * w)
            x = (x * weight).sum(2).view(n, c, h, w)
        return x
コード例 #6
0
    def forward(self, c1, c2, out):
        n, c, h, w = c1.size()
        c1 = self.refine(c1)  # n, 64, h, w
        c2 = interpolate(c2, (h, w), **self._up_kwargs)
        c2 = self.refine2(c2)

        unfold_up_c2 = unfold(c2, 3, 1, 1,
                              1).permute(0, 2, 1).view(n, h * w, -1, 3 * 3)
        # torch.nn.functional.unfold(input, kernel_size, dilation=1, padding=0, stride=1)
        energy = torch.matmul(
            c1.view(n, -1, h * w).permute(0, 2, 1).unsqueeze(2),
            unfold_up_c2).squeeze(2)  #n,h*w,3x3
        att = torch.softmax(energy, dim=-1)
        out = interpolate(out, (h, w), **self._up_kwargs)
        unfold_out = unfold(out, 3, 1, 1,
                            1).permute(0, 2, 1).view(n, h * w, -1, 3 * 3)
        out = torch.matmul(unfold_out, att.unsqueeze(3)).squeeze(3).permute(
            0, 2, 1).view(n, -1, h, w)

        return out
コード例 #7
0
ファイル: framework.py プロジェクト: Tencent/TFace
    def forward(self, images):
        x_code, x_skip_code = self.encoder(images)

        dep = self.depth(x_skip_code)
        x_sim = self.similarity(x_skip_code)
        w = F.unfold(x_sim, kernel_size=1, stride=1, padding=0).permute(0, 2, 1)
        w_normed = w / (w * w + 1e-7).sum(dim=2, keepdim=True).sqrt()
        B, K = w.shape[:2]
        sim = torch.einsum('bij,bjk->bik', w_normed, w_normed.permute(0, 2, 1))
        
        return dep,sim
コード例 #8
0
ファイル: dropout.py プロジェクト: dqawami/mmaction2
def info_dropout(in_features,
                 kernel,
                 out_features,
                 drop_rate,
                 temperature=0.05,
                 eps=1e-12):
    assert isinstance(kernel, int)
    assert kernel % 2 == 1

    in_shape = in_features.size()
    assert len(in_shape) in (4, 5)

    with torch.no_grad():
        if len(in_shape) == 5:
            b, c, t, h, w = in_shape
            out_mask_shape = b, 1, t, h, w

            in_features = in_features.permute(0, 2, 1, 3, 4)
            b *= t
        else:
            b, c, h, w = in_shape
            out_mask_shape = b, 1, h, w
        in_features = in_features.reshape(-1, c, h, w)

        padding = (kernel - 1) // 2
        unfolded_features = F.unfold(in_features, kernel, padding=padding)
        unfolded_features = unfolded_features.view(b, c, kernel * kernel, -1)

        distances = ((unfolded_features -
                      in_features.view(-1, c, 1, h * w))**2).sum(dim=1)
        weights = (0.5 * distances / distances.mean(
            dim=(1, 2), keepdim=True).clamp_min(eps)).neg().exp()

        middle = kernel * kernel // 2
        log_info = (weights[:, :middle].sum(dim=1) +
                    weights[:, (middle + 1):].sum(dim=1) + eps).log()

        prob_weights = (1. / float(temperature) * log_info).exp() + eps
        probs = prob_weights / prob_weights.sum(dim=-1, keepdim=True)

        drop_num_samples = max(1, int(drop_rate * float(probs.size(-1))))
        drop_indices = torch.multinomial(probs,
                                         num_samples=drop_num_samples,
                                         replacement=True)

        out_mask = torch.ones_like(probs)
        out_mask[
            torch.arange(out_mask.size(0), device=out_mask.device).view(-1, 1),
            drop_indices] = 0.0

    out_scale = 1.0 / (1.0 - drop_rate)
    out = out_scale * out_features * out_mask.view(out_mask_shape)

    return out
コード例 #9
0
ファイル: pac.py プロジェクト: zebrajack/DAFStereoNets
    def forward(ctx,
                input,
                kernel,
                weight,
                bias=None,
                stride=1,
                padding=0,
                dilation=1,
                shared_filters=False):
        (bs, ch), in_sz = input.shape[:2], input.shape[2:]
        if kernel.size(1) > 1:
            raise ValueError(
                'Non-singleton channel is not allowed for kernel.')
        ctx.input_size = in_sz
        ctx.in_ch = ch
        ctx.kernel_size = tuple(weight.shape[-2:])
        ctx.dilation = _pair(dilation)
        ctx.padding = _pair(padding)
        ctx.stride = _pair(stride)
        ctx.shared_filters = shared_filters
        ctx.save_for_backward(
            input if
            (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]) else None,
            kernel if
            (ctx.needs_input_grad[0] or ctx.needs_input_grad[2]) else None,
            weight if
            (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]) else None)
        ctx._backend = type2backend[input.type()]

        cols = F.unfold(input, ctx.kernel_size, ctx.dilation, ctx.padding,
                        ctx.stride)

        in_mul_k = cols.view(bs, ch, *kernel.shape[2:]) * kernel

        # matrix multiplication, written as an einsum to avoid repeated view() and permute()
        if shared_filters:
            output = torch.einsum('ijklmn,zykl->ijmn', (in_mul_k, weight))
        else:
            output = torch.einsum('ijklmn,ojkl->iomn', (in_mul_k, weight))

        if bias is not None:
            output += bias.view(1, -1, 1, 1)

        #return output.clone()  # TODO understand why a .clone() is needed here
        #NOTE: changed by CCJ:
        #I have to remove the '.clone()' here, otherwise the error will be encountered:
        # * RuntimeError: one of the variables needed for gradient computation has been
        # * modified by an inplace operation: [torch.cuda.FloatTensor [2, 64, 64, 128]],
        # * which is output 0 of SliceBackward, is at version 109; expected version
        # * 108 instead. Hint: the backtrace further above shows the operation that failed
        # * to compute its gradient. The variable in question was changed in there
        # or anywhere later. Good luck!

        return output  # TODO understand why a .clone() is NOT needed here
コード例 #10
0
 def forward(self, x):
     x = nf.unfold(x, kernel_size=self.window_size, stride=self.window_size)
     # x = (x[:, 1] * torch.tensor([2], dtype=torch.float32) + x[:, 0]) * (x[:, 2] * torch.tensor([2], dtype=torch.float32) + x[:, 3])
     x = torch.cat([
         self.evaluate_node(x[:, :, i_node], self.inputs_list[i_node],
                            self.all_controls[i_node],
                            self.control_list[i_node])
         for i_node, controls in enumerate(self.control_list)
     ],
                   dim=1)
     return x.view(-1, self.out_size, self.out_size)
コード例 #11
0
ファイル: network.py プロジェクト: xut006/aitom
 def forward(self, *input):
     x = input[0]
     size = x.size()
     kernel = self.renet(x)
     kernel = F.softmax(kernel, 1)
     kernel = kernel.reshape(size[0], 100, -1)  # ([9, 100, 16])
     x = F.unfold(x, [1, 1], padding=[3, 3])  # (x, [10, 10], dilation=[3, 3])
     x = x.reshape(size[0], size[1], 10 * 10)
     x = torch.matmul(x, kernel)
     x = x.reshape(size[0], size[1], size[2], size[3])
     return x
コード例 #12
0
def batch_conv2d_local(input,
                       weight,
                       bias=None,
                       padding=0,
                       stride=1,
                       dilation=1):
    if input.dim() != 4:
        raise NotImplementedError(
            "Input Error: Only 4D input Tensors supported (got {}D)".format(
                input.dim()))
    if weight.dim() != 7:
        # outH x outW x outC x inC x kH x kW
        raise NotImplementedError(
            "Input Error: Only 7D weight Tensors supported (got {}D)".format(
                weight.dim()))

    B, outH, outW, outC, inC, kH, kW = weight.size()
    kernel_size = (kH, kW)

    print(input.shape)
    # N x [inC * kH * kW] x [outH * outW]
    cols = F.unfold(input,
                    kernel_size,
                    dilation=dilation,
                    padding=padding,
                    stride=2)

    # print(cols.shape) 2764800
    # print(cols.shape) stride=2 691200

    fasdf

    # cols: [B,inC*kH*kW,outH*outW] = [32,1*5*5,480*640]
    cols = cols.view(cols.size(0), cols.size(1), cols.size(2),
                     1).permute(0, 2, 3, 1)
    # cols: [B,outH*outW,1,inC*kH*kW] = [32,307200,1,25]
    # print (cols.shape)

    # weight: [B,outH*outW,outC,inC*kH*kW] = [32,307200,1,25]
    weight = weight.view(B, outH * outW, outC, inC * kH * kW)
    # print (weight.shape)
    weight = weight.permute(0, 1, 3, 2)
    # weight: [B,outH*outW,inC*kH*kW,outC] = [32,307200,25,1]

    #  [32,307200,1,25] * [32,307200,25,1] -> [32,307200,1,1]
    out = torch.matmul(cols, weight)
    # print (out.shape)
    out = out.view(cols.size(0), outH, outW, outC).permute(0, 3, 1, 2)
    #out: [32,1,480,640]
    # print (out.shape)
    # fad
    if bias is not None:
        out = out + bias.expand_as(out)
    return out
コード例 #13
0
    def backward(ctx, grad_output):
        grad_input = grad_kernel = grad_weight = grad_bias = None
        (bs, out_ch), out_sz = grad_output.shape[:2], grad_output.shape[2:]
        in_ch = ctx.in_ch
        pad = [(k - 1) * d - p
               for (k, d, p) in zip(ctx.kernel_size, ctx.dilation, ctx.padding)
               ]
        pad = [(p, p + op) for (p, op) in zip(pad, ctx.output_padding)]

        input, kernel, weight = ctx.saved_tensors
        if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
            if ctx.shared_filters:
                grad_in_mul_k = grad_output.view(bs, out_ch, 1, 1, out_sz[0], out_sz[1]) \
                                * weight.view(ctx.kernel_size[0], ctx.kernel_size[1], 1, 1)
            else:
                grad_in_mul_k = torch.einsum('iomn,jokl->ijklmn',
                                             (grad_output, weight))
        if ctx.needs_input_grad[1] or ctx.needs_input_grad[2]:
            w = input.new_ones((in_ch, 1, 1, 1))
            x = F.conv_transpose2d(input, w, stride=ctx.stride, groups=in_ch)
            x = F.pad(x, (pad[1][0], pad[1][1], pad[0][0], pad[0][1]))
            in_cols = F.unfold(x, ctx.kernel_size, ctx.dilation, _pair(0),
                               _pair(1))
            in_cols = in_cols.view(bs, in_ch, ctx.kernel_size[0],
                                   ctx.kernel_size[1], out_sz[0], out_sz[1])
        if ctx.needs_input_grad[0]:
            grad_im2col_output = grad_in_mul_k * kernel
            grad_im2col_output = grad_im2col_output.view(
                bs, -1, out_sz[0] * out_sz[1])
            im2col_input_sz = [
                o + (k - 1) * d
                for (o, k, d) in zip(out_sz, ctx.kernel_size, ctx.dilation)
            ]

            grad_input = F.fold(grad_im2col_output, im2col_input_sz[:2],
                                ctx.kernel_size, ctx.dilation, 0, 1)
            grad_input = grad_input[:, :, pad[0][0]:-pad[0][1]:ctx.stride[0],
                                    pad[1][0]:-pad[1][1]:ctx.stride[1]]
        if ctx.needs_input_grad[1]:
            grad_kernel = in_cols * grad_in_mul_k
            grad_kernel = grad_kernel.sum(dim=1, keepdim=True)
        if ctx.needs_input_grad[2]:
            in_mul_k = in_cols * kernel
            if ctx.shared_filters:
                grad_weight = torch.einsum('ijmn,ijklmn->kl',
                                           (grad_output, in_mul_k))
                grad_weight = grad_weight.view(
                    1, 1, ctx.kernel_size[0], ctx.kernel_size[1]).contiguous()
            else:
                grad_weight = torch.einsum('iomn,ijklmn->jokl',
                                           (grad_output, in_mul_k))
        if ctx.needs_input_grad[3]:
            grad_bias = torch.einsum('iomn->o', (grad_output, ))
        return grad_input, grad_kernel, grad_weight, grad_bias, None, None, None, None, None
コード例 #14
0
ファイル: layers.py プロジェクト: wentaozhu/D-TDNN
 def forward(self, x):
     if self.impl == 'conv':
         return F.conv1d(x, self.weight, self.bias, self.stride,
                         self.padding, self.dilation)
     else:
         x = F.pad(x, self.padding).unsqueeze(1)
         x = F.unfold(x, (self.in_channels, ) + self.kernel_size,
                      dilation=(1, ) + self.dilation,
                      stride=(1, ) + self.stride)
         return F.linear(x.transpose(1, 2), self.weight,
                         self.bias).transpose(1, 2)
コード例 #15
0
ファイル: utils.py プロジェクト: oleksost/nngeometry
def per_example_grad_conv(mod, x, gy):
    ks = (mod.weight.size(2), mod.weight.size(3))
    gy_s = gy.size()
    bs = gy_s[0]
    x_unfold = F.unfold(x,
                        kernel_size=ks,
                        stride=mod.stride,
                        padding=mod.padding,
                        dilation=mod.dilation)
    x_unfold_s = x_unfold.size()
    return torch.bmm(gy.view(bs, gy_s[1], -1),
                     x_unfold.view(bs, x_unfold_s[1], -1).permute(0, 2, 1))
コード例 #16
0
ファイル: pac.py プロジェクト: zebrajack/DAFStereoNets
def nd2col(input_nd,
           kernel_size,
           stride=1,
           padding=0,
           output_padding=0,
           dilation=1,
           transposed=False,
           use_pyinn_if_possible=False):
    """
    Shape:
        - Input: :math:`(N, C, L_{in})`
        - Output: :math:`(N, C, *kernel_size, *L_{out})` where
          :math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)` for non-transposed
          :math:`L_{out} = (L_{in} - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + 1 + output_padding` for transposed
    """
    n_dims = len(input_nd.shape[2:])
    kernel_size = (kernel_size, ) * n_dims if isinstance(
        kernel_size, Number) else kernel_size
    stride = (stride, ) * n_dims if isinstance(stride, Number) else stride
    padding = (padding, ) * n_dims if isinstance(padding, Number) else padding
    output_padding = (output_padding, ) * n_dims if isinstance(
        output_padding, Number) else output_padding
    dilation = (dilation, ) * n_dims if isinstance(dilation,
                                                   Number) else dilation

    if transposed:
        assert n_dims == 2, 'Only 2D is supported for fractional strides.'
        w_one = input_nd.new_ones(1, 1, 1, 1)
        pad = [(k - 1) * d - p
               for (k, d, p) in zip(kernel_size, dilation, padding)]
        input_nd = F.conv_transpose2d(input_nd, w_one, stride=stride)
        input_nd = F.pad(input_nd, (pad[1], pad[1] + output_padding[1], pad[0],
                                    pad[0] + output_padding[0]))
        stride = _pair(1)
        padding = _pair(0)

    (bs, nch), in_sz = input_nd.shape[:2], input_nd.shape[2:]
    out_sz = tuple([
        ((i + 2 * p - d * (k - 1) - 1) // s + 1)
        for (i, k, d, p,
             s) in zip(in_sz, kernel_size, dilation, padding, stride)
    ])
    # Use PyINN if possible (about 15% faster) TODO confirm the speed-up
    if n_dims == 2 and dilation == 1 and has_pyinn and torch.cuda.is_available(
    ) and use_pyinn_if_possible:
        # Added by CCJ:
        # NOTE: require dilation == 1 for pyinn_im2col,
        output = P.im2col(input_nd, kernel_size, stride, padding)
    else:
        output = F.unfold(input_nd, kernel_size, dilation, padding, stride)
        out_shape = (bs, nch) + tuple(kernel_size) + out_sz
        output = output.view(*out_shape).contiguous()
    return output
コード例 #17
0
    def calculateWindows(self, x):
        windows = F.unfold(x,
                           kernel_size=self.kernel_size,
                           padding=self.padding,
                           dilation=self.dilation,
                           stride=self.stride)

        windows = windows.transpose(1, 2).contiguous().view(
            -1, x.shape[1], self.kernal_size_number)
        windows = windows.transpose(0, 1)

        return windows
コード例 #18
0
def feature_region_reg_loss(gt_f_pairs):
    inp = np.zeros((15, 15), dtype=np.float32)
    inp[7, 7] = 1
    gaussian_kernel = fi.gaussian_filter(inp, 3.5)
    target = torch.cuda.FloatTensor(gaussian_kernel).view(1, 15 * 15, 1)
    target = (1.0 - target / target.max()) * 2.0
    loss = 0.0
    for (f_a, gt_f_wrap_b) in gt_f_pairs:
        N, C, H, W = f_a.shape
        unfold_gt_f_wrap_b = F.unfold(
            gt_f_wrap_b, kernel_size=15, padding=7,
            stride=4).view(N, C, 15 * 15,
                           H * W // 16)  # (N, C, 15*15, num_patches)
        unfold_f_a = F.unfold(f_a, kernel_size=1, padding=0, stride=4).view(
            N, C, 1, H * W // 16)  # (N, C, 1, num_patches)
        e = torch.norm(unfold_f_a - unfold_gt_f_wrap_b, p=2,
                       dim=1)  # (N, 15*15, num_patches)
        meann = torch.mean(e, 1, keepdim=True)
        e = e / meann
        loss += torch.mean((target - e)**2)
    return loss
コード例 #19
0
ファイル: image.py プロジェクト: Chesterfun/SiamPose
def unfold_mask(mask, idx):

    # mask = mask.transpose(1, 2, 0)
    mask = np.expand_dims(mask, -1)
    mask = totensor(mask)
    mask = mask.unsqueeze(0)
    # idx = idx[0] * 32 + idx[1]
    mask_uf = F.unfold(mask, (256, 256), padding=0, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, 256, 256)
    mask_uf = mask_uf[idx]
    mask_uf = mask_uf.numpy()
    return mask_uf  # , mask_uf_temp
コード例 #20
0
    def _make_block_mask(self,
                         data,
                         sparse_block_shape,
                         zeros_per_block,
                         mask=None):
        r"""Creates a block-level mask.

        Block-level mask is described as a mask, where the granularity of sparsification of the
        largest patch is the sparse_block_shape. That means that for a given mask and a
        sparse_block_shape, the sparsity is computed only within a patch of a size sparse_block_shape.

        In this context the `zeros_per_block` describes the number of zeroed-out elements within a patch.
        """
        if mask is None:
            mask = torch.ones(data.shape, device=data.device)
        h, w = data.shape[-2:]
        block_h, block_w = sparse_block_shape
        dh = (block_h - h % block_h) % block_h
        dw = (block_w - w % block_w) % block_w
        values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)

        if values_per_block == zeros_per_block:
            # Everything should be sparsified
            mask.data = torch.zeros_like(mask)
            return mask

        # create a new padded tensor like data (to match the block_shape)
        padded_data = torch.ones(h + dh,
                                 w + dw,
                                 dtype=data.dtype,
                                 device=data.device)
        padded_data.fill_(torch.nan)
        padded_data[:h, :w] = data
        unfolded_data = F.unfold(padded_data[None, None, :],
                                 kernel_size=sparse_block_shape,
                                 stride=sparse_block_shape)

        # Temp reshape for mask
        mask_reshape = mask.reshape(unfolded_data.shape)
        _, sorted_idx = torch.topk(unfolded_data,
                                   k=zeros_per_block,
                                   dim=1,
                                   largest=False)

        self._scatter_fold_block_mask(dim=1,
                                      indices=sorted_idx,
                                      output_shape=padded_data.shape,
                                      block_shape=sparse_block_shape,
                                      mask=mask_reshape)

        mask.data = mask_reshape.squeeze().reshape(
            mask.shape)[:h, :w].contiguous()
        return mask
コード例 #21
0
    def extract_region_vector(self, x):
        """
        Downsamples and extracts square regions from x.
        Returns the flattened vectors of length radius*radius.
        """

        x = self.downsample(x)
        stride = self.stride if self.downsampling_method == 'region-extraction' else 1

        x_regions = F.unfold(x, kernel_size=self.radius, stride=stride)
        x_regions = x_regions.view((*x.shape[:2], self.radius ** 2, -1))
        return x_regions
コード例 #22
0
    def evolve(self, driven_factors, iter_times=None):
        assert (
            np.all(driven_factors.shape[:-1] == self.state_size)
            and driven_factors.shape[-1] == self.num_classes
        )
        self.iter_cnt = 0
        iter_times = self.iter_times if iter_times is None else iter_times

        driven_factors_view = torch.from_numpy(driven_factors).float().view(-1, self.num_classes)

        # while not self.stop_criterion:
        while self.iter_cnt < iter_times:
            cell_state_view = F.unfold(self.cell_state, self.neig_size, padding=self.center).transpose(1, 2)
            cell_state_view = cell_state_view.view(-1, np.prod(self.neig_size))
            # random factor
            factor_eps = torch.rand((cell_state_view.size(0), self.num_classes))
            # factor_eps = 1 / (1 + torch.exp(-3 * torch.rand((cell_state_view.size(0), self.num_classes))))
            factor_eps = factor_eps / factor_eps.sum(dim=1, keepdim=True).clamp_min(0.001)
            # suitable factor
            factor_suit = driven_factors_view
            # adjacent influence factor
            factor_neig = (
                torch.ones_like(driven_factors_view).scatter_add_(
                    1,
                    cell_state_view[:, self.neig_indices].long(),
                    torch.from_numpy(self.local_weight[None])
                    .expand_as(cell_state_view[:, self.neig_indices])
                    .float(),
                )
                / self.local_weight.sum()
            )
            # factor_neig = 1 / (1 + torch.exp(-factor_neig))
            # multiple all factors
            transition = (
                (factor_eps * factor_neig * factor_suit)
                .transpose(0, 1)
                .view(1, self.num_classes, *self.state_size)
            )
            mask = transition.max((1), keepdim=True)[0] > 0.05  # 04222kernel 0.05, 04223kernel 0.0
            # mask = (
            #     factor_suit.transpose(0, 1)
            #     .view(1, self.num_classes, *self.state_size)[:, 1:]
            #     .max((1), keepdim=True)[0]
            #     >= 0.4
            # )
            if self.mask is not None:
                mask = mask & self.mask
            # transist to the new state
            self.cell_state[mask] = torch.argmax(transition, dim=1, keepdim=True)[mask].float()
            # step once
            self.step_one()

        return self.cell_state.char().squeeze().numpy()
コード例 #23
0
ファイル: kfac.py プロジェクト: kngwyu/Rainy
 def __xxt(self, group: dict, state: dict) -> None:
     """Computes E[xi xj]^T and memorize it"""
     mod = group["mod"]
     x = self.state[mod]["x"]
     if group["layer_type"] is Layer.CONV2D:
         x = F.unfold(x, mod.kernel_size, padding=mod.padding, stride=mod.stride)
         x = x.data.transpose(1, 0).reshape(x.size(1), -1)
     else:
         x = x.data.t()
     if mod.bias is not None:
         x = torch.cat([x, torch.ones_like(x[:1])])
     self.__average(state, "xxt", x, float(x.size(1)))
コード例 #24
0
ファイル: DepthPoseNet.py プロジェクト: weihaosky/dro-sfm
    def upsample_depth(self, depth, mask, ratio=8):
        """ Upsample depth field [H/ratio, W/ratio, 2] -> [H, W, 2] using convex combination """
        N, _, H, W = depth.shape
        mask = mask.view(N, 1, 9, ratio, ratio, H, W)
        mask = torch.softmax(mask, dim=2)

        up_flow = F.unfold(depth, [3,3], padding=1)
        up_flow = up_flow.view(N, 1, 9, 1, 1, H, W)

        up_flow = torch.sum(mask * up_flow, dim=2)
        up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
        return up_flow.reshape(N, 1, ratio*H, ratio*W)
コード例 #25
0
def embedding_concat(x, y):
    B, C1, H1, W1 = x.size()
    _, C2, H2, W2 = y.size()
    s = int(H1 / H2)
    x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
    x = x.view(B, C1, -1, H2, W2)
    z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
    for i in range(x.size(2)):
        z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
    z = z.view(B, -1, H2 * W2)
    z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
    return z
コード例 #26
0
ファイル: model.py プロジェクト: OuOnya/EASE
    def forward(self, x):
        '''
        Shape:
        - Input:
            x  :math:`(N, 1, Seq, hidden_size)`

        - Output: :math:`(N, Seq, frame_seq * hidden_size)`
        '''

        seq_len = x.shape[-2]

        return F.unfold(x, kernel_size=(seq_len, 1), padding=self.padding)
コード例 #27
0
 def forward(self, x):
     if self.padding:
         x = self.Padding(x, self.padding_size[0], self.padding_size[1])
     b, c, h, w = x.size()
     feats_in_one_kernel = self.kernel_size[0] * self.kernel_size[1]
     x = F.unfold(x, kernel_size=self.kernel_size, stride=self.stride)
     assert self.__C.PATCH_NUMS == h * w // feats_in_one_kernel
     x = x.view(b, c, feats_in_one_kernel, self.__C.PATCH_NUMS)  # b,c,4,160
     x = self.Patch_Dropout(x, self.__C)
     x = x.view(b, c, -1)
     x = x.transpose(1, 2)
     return x
コード例 #28
0
ファイル: raft.py プロジェクト: zhwei1688/FGVC
    def upsample_flow(self, flow, mask):
        """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
        N, _, H, W = flow.shape
        mask = mask.view(N, 1, 9, 8, 8, H, W)
        mask = torch.softmax(mask, dim=2)

        up_flow = F.unfold(8 * flow, [3,3], padding=1)
        up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)

        up_flow = torch.sum(mask * up_flow, dim=2)
        up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
        return up_flow.reshape(N, 2, 8*H, 8*W)
コード例 #29
0
 def forward_pt_manualhwc(conv, x):
     y_height, y_width = TestConvolution.calc_output_dims(conv, x)
     x = F.unfold(x, conv.kernel_size, conv.dilation, conv.padding,
                  conv.stride)
     x = x.view(x.shape[0], conv.in_channels, *conv.kernel_size,
                x.shape[2]).permute(0, 2, 3, 1,
                                    4).reshape(x.shape[0], -1, x.shape[2])
     w = conv.weight.permute(0, 2, 3, 1).reshape(1, conv.weight.shape[0],
                                                 -1)
     y = w.bmm(x)
     y = y.view(y.shape[0], y.shape[1], y_height, y_width)
     return y.cpu()
コード例 #30
0
def select_gt_img(mask, weight, g_sz=127):

    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return mask.sum() * 0

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=32, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)
    mask_uf = torch.index_select(mask_uf, 0, pos)
    mask_uf = mask_uf.view(-1, 1, g_sz, g_sz)

    return mask_uf
コード例 #31
0
ファイル: convlocal.py プロジェクト: chriscremer/Other_Code
def conv2d_local(input, weight, bias=None, padding=0, stride=1, dilation=1):
    if input.dim() != 4:
        raise NotImplementedError("Input Error: Only 4D input Tensors supported (got {}D)".format(input.dim()))
    if weight.dim() != 6:
        # outH x outW x outC x inC x kH x kW
        raise NotImplementedError("Input Error: Only 6D weight Tensors supported (got {}D)".format(weight.dim()))
 
    outH, outW, outC, inC, kH, kW = weight.size()
    kernel_size = (kH, kW)
 
    # N x [inC * kH * kW] x [outH * outW]
    cols = F.unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride)
    cols = cols.view(cols.size(0), cols.size(1), cols.size(2), 1).permute(0, 2, 3, 1)
 
    out = torch.matmul(cols, weight.view(outH * outW, outC, inC * kH * kW).permute(0, 2, 1))
    out = out.view(cols.size(0), outH, outW, outC).permute(0, 3, 1, 2)
 
    if bias is not None:
        out = out + bias.expand_as(out)
    return out
コード例 #32
0
ファイル: convcrf.py プロジェクト: aaasss0636/ConvCRF
    def _compute_gaussian(self, input, gaussian, norm=None):

        if norm is not None:
            input = input * norm

        shape = input.shape
        num_channels = shape[1]
        bs = shape[0]

        if self.blur > 1:
            off_0 = (self.blur - self.npixels[0] % self.blur) % self.blur
            off_1 = (self.blur - self.npixels[1] % self.blur) % self.blur
            pad_0 = int(math.ceil(off_0 / 2))
            pad_1 = int(math.ceil(off_1 / 2))
            input = torch.nn.functional.avg_pool2d(input,
                                                   kernel_size=self.blur,
                                                   padding=(pad_0, pad_1),
                                                   count_include_pad=False)
            npixels = [math.ceil(self.npixels[0] / self.blur),
                       math.ceil(self.npixels[1] / self.blur)]
            assert(npixels[0] == input.shape[2])
            assert(npixels[1] == input.shape[3])
        else:
            npixels = self.npixels

        if self.verbose:
            show_memusage(name="Init")

        if self.pyinn:
            input_col = P.im2col(input, self.filter_size, 1, self.span)
        else:
            # An alternative implementation of num2col.
            #
            # This has implementation uses the torch 0.4 im2col operation.
            # This implementation was not avaible when we did the experiments
            # published in our paper. So less "testing" has been done.
            #
            # It is around ~20% slower then the pyinn implementation but
            # easier to use as it removes a dependency.
            input_unfold = F.unfold(input, self.filter_size, 1, self.span)
            input_unfold = input_unfold.view(
                bs, num_channels, self.filter_size, self.filter_size,
                npixels[0], npixels[1])
            input_col = input_unfold

        k_sqr = self.filter_size * self.filter_size

        if self.verbose:
            show_memusage(name="Im2Col")

        product = gaussian * input_col
        if self.verbose:
            show_memusage(name="Product")

        product = product.view([bs, num_channels,
                                k_sqr, npixels[0], npixels[1]])

        message = product.sum(2)

        if self.verbose:
            show_memusage(name="FinalNorm")

        if self.blur > 1:
            in_0 = self.npixels[0]
            in_1 = self.npixels[1]
            message = message.view(bs, num_channels, npixels[0], npixels[1])
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # Suppress warning regarding corner alignment
                message = torch.nn.functional.upsample(message,
                                                       scale_factor=self.blur,
                                                       mode='bilinear')

            message = message[:, :, pad_0:pad_0 + in_0, pad_1:in_1 + pad_1]
            message = message.contiguous()

            message = message.view(shape)
            assert(message.shape == shape)

        if norm is not None:
            message = norm * message

        return message