コード例 #1
0
def ndarray_conv2d(inputs: np.ndarray,
                   weight: np.ndarray,
                   bias=None,
                   padding=None):
    if padding:
        pad_width = ((0, 0), (0, 0), (padding[0], padding[0]), (padding[1],
                                                                padding[1]))
        inputs = np.lib.pad(inputs,
                            pad_width,
                            mode='constant',
                            constant_values=0)

    mini_batch, in_channel_1, iH, iW = inputs.shape
    out_channel, in_channel_2, kH, kW = weight.shape
    need_bias = bias is not None
    assert in_channel_1 == in_channel_2, "in_channel of input and weight must be equal"
    if need_bias:
        assert bias.size == out_channel, "out_channel of bias and weight must be equal"

    # oH = iH - kH + 1
    # oW = iW - kW + 1
    # out_puts = np.empty((mini_batch, out_channel, oH, oW))
    # 为了提高效率,我们需要控制最外层的循序条件
    # 在第3和第4维度是必须要进行循环操作的,能优化的就只有第1和第2维度
    # 最外层的循环次数应当最小
    if need_bias:
        torch_out_puts = torch.conv2d(input=torch.tensor(inputs),
                                      weight=torch.tensor(weight),
                                      bias=torch.tensor(bias))
    else:
        torch_out_puts = torch.conv2d(input=torch.tensor(inputs),
                                      weight=torch.tensor(weight.copy()))

    return torch_out_puts.numpy()
コード例 #2
0
def main_conv(input, weight, bias=None,padding=None):
    start = time.time()
    out = conv2d(input, weight, bias=bias,padding=padding)
    Timer.show_time((time.time() - start), "Numpy conv2d forward")
    g = rand_like(out)
    start = time.time()
    out.backward(g)
    Timer.show_time((time.time() - start), "Numpy conv2d backward")

    if bias:
        t_input, t_weight, t_bias, v = to_torch([input, weight, bias, g])
        start = time.time()
        t_out = torch.conv2d(t_input, t_weight, bias=t_bias,padding=padding)
        t_out.backward(v)
    else:
        t_input, t_weight, v = to_torch([input, weight, g])
        start = time.time()
        t_out = torch.conv2d(t_input, t_weight, padding=padding)
        t_out.backward(v)

    Timer.show_time((time.time() - start), "torch conv2d")
    check(out, t_out, grad=False)

    check(input, t_input)
    check(weight, t_weight)
    if bias:
        check(bias, t_bias)
コード例 #3
0
def main_conv(input, weight, bias=None, stride=None, padding=None):
    start = time.time()
    out = conv2d(input, weight, bias=bias, stride=stride, padding=padding)
    g = rand_like(out)

    out.backward(g)
    Timer.show_time((time.time() - start), "Numpy conv2d")

    if bias:
        t_input, t_weight, t_bias, v = to_torch([input, weight, bias, g])
        start = time.time()
        t_out = torch.conv2d(t_input,
                             t_weight,
                             bias=t_bias,
                             stride=stride,
                             padding=padding)
        t_out.backward(v)
    else:
        t_input, t_weight, v = to_torch([input, weight, g])
        start = time.time()
        t_out = torch.conv2d(t_input, t_weight, stride=stride, padding=padding)
        t_out.backward(v)

    Timer.show_time((time.time() - start), "torch conv2d")

    check(out, t_out, grad=False, prefix="out", print_max=True)

    check(input, t_input, prefix="input grad", print_max=True)
    check(weight, t_weight, prefix="weight grad", print_max=True)
    if bias:
        check(bias, t_bias, prefix="bias grad", print_max=True)
コード例 #4
0
    def relprop(self, R, alpha=1):
        if self.padd_output.shape[1] == 3:
            pw = torch.clamp(self.weight, min=0)
            nw = torch.clamp(self.weight, max=0)
            X = self.padd_output
            L = self.padd_output * 0 + \
                torch.min(torch.min(torch.min(self.padd_output, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            H = self.padd_output * 0 + \
                torch.max(torch.max(torch.max(self.padd_output, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding) + 1e-9

            S = R / Za
            C = X * self.gradprop2(S, self.weight) - L * self.gradprop2(
                S, pw) - H * self.gradprop2(S, nw)
            R = C
        else:
            beta = alpha - 1
            pw = torch.clamp(self.weight, min=0)
            nw = torch.clamp(self.weight, max=0)
            px = torch.clamp(self.padd_output, min=0)
            nx = torch.clamp(self.padd_output, max=0)

            # def f(w1, w2, x1, x2):
            #     Z1 = F.conv2d(x1, w1, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups)
            #     Z2 = F.conv2d(x2, w2, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups)
            #     S1 = safe_divide(R, Z1)
            #     S2 = safe_divide(R, Z2)
            #     C1 = x1 * self.gradprop(Z1, x1, S1)[0]
            #     C2 = x2 * self.gradprop(Z2, x2, S2)[0]
            #      return C1 + C2

            def f(w1, w2, x1, x2):
                Z1 = F.conv2d(x1,
                              w1,
                              bias=self.bias,
                              stride=self.stride,
                              padding=self.padding,
                              groups=self.groups)
                Z2 = F.conv2d(x2,
                              w2,
                              bias=self.bias,
                              stride=self.stride,
                              padding=self.padding,
                              groups=self.groups)
                Z = Z1 + Z2
                S = safe_divide(R, Z)
                C1 = x1 * self.gradprop(Z1, x1, S)[0]
                C2 = x2 * self.gradprop(Z2, x2, S)[0]
                return C1 + C2

            activator_relevances = f(pw, nw, px, nx)
            inhibitor_relevances = f(nw, pw, px, nx)

            R = alpha * activator_relevances - beta * inhibitor_relevances
        R = self.static_padding.relprop(R)
        return R
コード例 #5
0
ファイル: test_precision.py プロジェクト: vineeth14/PySyft
def test_torch_conv2d(workers):
    bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
    im = torch.Tensor(
        [
            [
                [[0.5, 1.0, 2.0], [3.5, 4.0, 5.0], [6.0, 7.5, 8.0]],
                [[10.0, 11.0, 12.0], [13.0, 14.5, 15.0], [16.0, 17.5, 18.0]],
            ]
        ]
    )
    w = torch.Tensor(
        [
            [[[0.0, 3.0], [1.5, 1.0]], [[2.0, 2.0], [2.5, 2.0]]],
            [[[-0.5, -1.0], [-2.0, -1.5]], [[0.0, 0.0], [0.0, 0.5]]],
        ]
    )
    bias = torch.Tensor([-1.3, 15.0])

    im_fp = im.fix_precision()
    w_fp = w.fix_precision()
    bias_fp = bias.fix_precision()

    res0 = torch.conv2d(im_fp, w_fp, bias=bias_fp, stride=1).float_precision()
    res1 = torch.conv2d(
        im_fp, w_fp[:, 0:1].contiguous(), bias=bias_fp, stride=2, padding=3, dilation=2, groups=2
    ).float_precision()

    expected0 = torch.conv2d(im, w, bias=bias, stride=1)
    expected1 = torch.conv2d(
        im, w[:, 0:1].contiguous(), bias=bias, stride=2, padding=3, dilation=2, groups=2
    )

    assert (res0 == expected0).all()
    assert (res1 == expected1).all()
コード例 #6
0
def sobel(img):
    # print(img.shape)
    gray = torch.sum(img, keepdim=True, dim=1)
    # print(gray.shape)
    edge_x = torch.conv2d(gray, Sx, padding=1)
    # print(edge_x.shape)
    edge_y = torch.conv2d(gray, Sy, padding=1)
    # input()
    return edge_x**2 + edge_y**2
コード例 #7
0
def region_cohesion(classes):
    """Computes a measure of region cohesion, that is, the ratio between
  class edges and surface.
  """
    dx = torch.conv2d(classes, torch.tensor([[1, -2, 1]]))
    dy = torch.conv2d(classes, torch.tensor([[1], [-2], [1]]))
    mag = torch.sqrt(dx**2 + dy**2)
    total_mag = mag.sum(dim=-2)
    total_class = classes.sum(dim=-2)
    return total_mag / total_class
コード例 #8
0
ファイル: mnist.py プロジェクト: bohrium/learn-pytorch
 def loss_at(self, x, y):
     x = relu(1.0 + conv2d(x, self.get_subweight(0), bias=None, stride=2))
     x = relu(1.0 + conv2d(x, self.get_subweight(1), bias=None, stride=2))
     x = x.view(-1, 4*4*16, 1)
     x = relu(1.0 + matmul(self.get_subweight(2), x))
     x = matmul(self.get_subweight(3), x)
     x = x.view(-1, NB_C)
     logits = log_softmax(x, dim=1)
     loss = nll_loss(logits, y)
     return loss
コード例 #9
0
 def policy(self, x):
     x = t.tanh(t.conv2d(x, self.params["cnn.1"]))
     x = t.max_pool2d(x, (2, 2))
     x = t.tanh(t.conv2d(x, self.params["cnn.2"], stride=2))
     x = t.max_pool2d(x, (2, 2))
     x = t.flatten(x, 0)
     x = self.heb1.forward(x)
     x = self.heb2.forward(x)
     x = self.heb3.forward(x)
     return last_act_fn(x)
コード例 #10
0
ファイル: m18.py プロジェクト: sliscak/random-ideas
    def forward(self, image):
        image = (2 * image) - 1
        image = image.permute(2, 1, 0)
        image = torch.unsqueeze(image, 0)
        # st.write(f'IMAGE SHAPE: {image.shape}')
        # key = 1*1*30*30 == 900 after flatten
        key = self.conv(image).flatten()
        # st.write(f'KEY SHAPE: {key.shape}')
        # st.write(f'KERNEL SHAPE: {self.kernel.shape}')
        attention = torch.matmul(self.keys, key)
        attention = torch.softmax(attention, 0)
        attention = torch.reshape(attention, (-1, 1))
        # st.write(f'ATTENTION SHAPE: {attention.shape}')
        # st.write(f'VALUES SHAPE: {self.values.shape}')

        kernel = self.values * attention
        kernel = torch.sum(kernel, 0)
        kernel = torch.reshape(kernel, (1, 4, 5, 5))
        # pkernel = torch.sigmoid(torch.squeeze(kernel, 0).permute(2,1,0))
        # pkernel = pkernel.detach().cpu().numpy()
        # self.kernel_loc.image(pkernel, width=200)
        # TODO: add actiovation function here(to kernel)

        out = torch.conv2d(image, weight=kernel, stride=2)
        # pkernel = torch.sigmoid(torch.squeeze(out, 0).permute(2, 1, 0))
        # pkernel = pkernel.detach().cpu().numpy()
        # self.kernel_loc.image(pkernel, width=200)
        out = torch.flatten(out)

        attention = torch.matmul(self.keys2, out)
        attention = torch.softmax(attention, 0)
        attention = torch.reshape(attention, (-1, 1))
        kernel = self.values2 * attention
        kernel = torch.sum(kernel, 0)
        kernel = torch.reshape(kernel, (1, 1, 5, 5))

        # pkernel = torch.sigmoid(torch.squeeze(kernel, 0).permute(2, 1, 0))
        # pkernel = pkernel.detach().cpu().numpy()
        # self.kernel_loc.image(pkernel, width=200)
        # convolve image again or the output from previous conv
        out = torch.reshape(out, (1, 1, 30, 30))
        out = torch.conv2d(out, weight=kernel, stride=2)
        pkernel = torch.sigmoid(torch.squeeze(out, 0).permute(2, 1, 0))
        pkernel = pkernel.detach().cpu().numpy()
        self.kernel_loc.image(pkernel, width=200)
        out = torch.flatten(out)
        # out = torch.relu(out)
        out = self.seq(out)
        # Out Shape: torch.Size([1, 1, 30, 30])
        # st.write(f'key2 Shape: {key.shape}')
        # TODO: dont use softmax on output when using crossentropy loss function
        # out = self.seq(out)

        return out
コード例 #11
0
ファイル: router.py プロジェクト: jokingbear/moco
    def forward(self, x):
        if self.iters == 1:
            con = torch.conv2d(x, self.weight, self.bias)
            return con
        else:
            weight = self.weight.view(-1, self.groups, self.in_channels, 1, 1)
            weight = weight.transpose(0, 1).reshape(-1, self.in_channels, 1, 1)
            con = torch.conv2d(x, weight, groups=self.groups)

            return dynamic_routing(con, self.capsules, self.groups, self.iters,
                                   self.bias)
コード例 #12
0
    def policy(self, x):
        x = t.tanh(t.conv2d(x, self.params["cnn.1"]))
        x = t.max_pool2d(x, (2, 2))
        x = t.tanh(t.conv2d(x, self.params["cnn.2"], stride=2))
        x = t.max_pool2d(x, (2, 2))
        x = t.flatten(x, 0)

        x = t.tanh(f.linear(x, self.params["linear.1"].t()))
        x = t.tanh(f.linear(x, self.params["linear.2"].t()))
        x = last_act_fn(f.linear(x, self.params["linear.3"].t()))

        return x
コード例 #13
0
def torch_localmean(im, ksize=None):
    imsize = tuple(im.shape)
    im = im.type(torch.float32).reshape([1, 1] + list(imsize))
    kernel = torch.cuda.FloatTensor(np.ones(ksize)).reshape([1, 1] +
                                                            list(ksize))
    kh, kw = tuple(kernel.shape[-2:])
    pdh, pdw = (int(kh / 2), int(kw / 2))
    ones = torch.cuda.FloatTensor(np.ones(im.shape))
    summing = torch.conv2d(im, kernel, padding=(pdh, pdw))
    frac = torch.conv2d(ones, kernel, padding=(pdh, pdw))
    blurred = summing / frac
    blurred = blurred.reshape(imsize)
    return blurred
コード例 #14
0
def perceive(state_grid):
    global sobel_x, sobel_y
    # Convolve sobel filters with states
    # in x, y and channel dimension.
    compute_grid = state_grid[None, :, :, :]
    grad_x = torch.conv2d(compute_grid, sobel_x, padding=1, groups=1)[0]
    grad_y = torch.conv2d(compute_grid, sobel_y, padding=1, groups=1)[0]
    # Concatenate the cell’s state channels,
    # the gradients of channels in x and
    # the gradient of channels in y.
    #print(compute_grid.shape, grad_x.shape, grad_y.shape)
    perception_grid = torch.cat((state_grid, grad_x, grad_y), dim=0)
    #print(perception_grid.shape)
    return perception_grid
コード例 #15
0
def torch_bwopen(bw, stel):
    if not isinstance(stel, torch.Tensor):
        stel = torch.Tensor(stel).cuda()
    if len(stel.shape) != 4:
        stel = torch.reshape(stel, [1, 1] + list(stel.shape[-2:]))
    sumstel = torch.sum(stel)
    kh, kw = tuple(stel.shape[-2:])
    pdh, pdw = (int(kh / 2), int(kw / 2))
    bw = bw.reshape([1, 1] + list(bw.shape[-2:])).type(torch.float32)
    bw = (torch.conv2d(bw, stel,
                       padding=(pdh, pdw)) == sumstel).type(torch.float32)
    bw = (torch.conv2d(bw, stel, padding=(pdh, pdw)) > 0).type(torch.float32)
    bw = bw.reshape(list(bw.shape[-2:])).type(torch.uint8)
    return bw
コード例 #16
0
    def relprop(self, R, alpha):
        if self.X.shape[1] == 3:
            pw = torch.clamp(self.weight, min=0)
            nw = torch.clamp(self.weight, max=0)
            X = self.X
            L = self.X * 0 + \
                torch.min(torch.min(torch.min(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            H = self.X * 0 + \
                torch.max(torch.max(torch.max(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding) + 1e-9

            S = R / Za
            C = X * self.gradprop2(S, self.weight) - L * self.gradprop2(
                S, pw) - H * self.gradprop2(S, nw)
            R = C
        else:
            beta = alpha - 1
            pw = torch.clamp(self.weight, min=0)
            nw = torch.clamp(self.weight, max=0)
            px = torch.clamp(self.X, min=0)
            nx = torch.clamp(self.X, max=0)

            def f(w1, w2, x1, x2):
                Z1 = F.conv2d(x1,
                              w1,
                              bias=None,
                              stride=self.stride,
                              padding=self.padding)
                Z2 = F.conv2d(x2,
                              w2,
                              bias=None,
                              stride=self.stride,
                              padding=self.padding)
                S1 = safe_divide(R, Z1)
                S2 = safe_divide(R, Z2)
                C1 = x1 * self.gradprop(Z1, x1, S1)[0]
                C2 = x2 * self.gradprop(Z2, x2, S2)[0]
                return C1 + C2

            activator_relevances = f(pw, nw, px, nx)
            inhibitor_relevances = f(nw, pw, px, nx)

            R = alpha * activator_relevances - beta * inhibitor_relevances
        return R
コード例 #17
0
    def forward(self, image):
        image = (2 * image) - 1
        image = image.permute(2, 1, 0)
        image = torch.unsqueeze(image, 0)
        # st.write(f'IMAGE SHAPE: {image.shape}')
        # key = 1*1*30*30 == 900 after flatten
        key = self.conv(image).flatten()
        # st.write(f'KEY SHAPE: {key.shape}')
        # st.write(f'KERNEL SHAPE: {self.kernel.shape}')
        attention = torch.matmul(self.keys, key)
        attention = torch.softmax(attention, 0)
        attention = torch.reshape(attention, (-1, 1))
        # st.write(f'ATTENTION SHAPE: {attention.shape}')
        # st.write(f'VALUES SHAPE: {self.values.shape}')

        kernel = self.values * attention
        kernel = torch.sum(kernel, 0)
        kernel = torch.reshape(kernel, (1, 4, 5, 5))
        # TODO: add actiovation function here(to kernel)

        out = torch.conv2d(image, weight=kernel, stride=2)
        out = torch.flatten(out)
        # Out Shape: torch.Size([1, 1, 30, 30])
        # st.write(f'key2 Shape: {key.shape}')
        out = self.seq(out)

        return out
コード例 #18
0
 def calc_coarse(self):
     sample = self.circle(self.coarse_size, (0, 0), (self.max, self.max), 1)
     kernel = torch.tensor([[-1, -1, -1], [-1, 8., -1], [-1, -1, -1]]).view(
         (1, 1, 3, 3))
     sample = torch.conv2d(sample, kernel, stride=1)
     self.coarse = torch.abs(sample)
     return self.coarse
コード例 #19
0
ファイル: utils.py プロジェクト: elvirast/ms_thesis
def im2patch(input, patchSize, stride=1):
    r""" im2patch extracts all the valid patches from the input which is a 3D 
    or 4D tensor of size B x C x H x W. The extracted patches are of size 
    patchSize and they are extracted with an overlap equal to stride. The 
    output is of size B x C*P x PH x PW where P is the total number of elements
    in the patch, while PH and PW is the number of patches in the horizontal and
    vertical axes, respectively.
    """
    assert (input.dim() >= 3
            and input.dim() < 5), "A 3D or 4D tensor is expected."
    assert (isinstance(patchSize,
                       tuple)), "patchSize is expected to be a tuple."

    if len(patchSize) < 2:
        patchSize *= 2

    if input.dim() == 3:
        input = input.unsqueeze(0)

    Pn = reduce(lambda x, y: x * y, patchSize[0:2])
    h = th.eye(Pn).type(input.type())
    h = h.view(Pn, 1, patchSize[0], patchSize[1])

    batch, Nc = input.shape[0:2]

    if Nc != 1:
        input = input.view(batch * Nc, 1, input.shape[2], input.shape[3])

    P = th.conv2d(input, h, stride=stride)

    if Nc != 1:
        P = P.view(batch, Nc * Pn, P.shape[2], P.shape[3])

    return P
コード例 #20
0
def function_hook(input, weight, bias, *args, **kwargs):

    base = nn.Conv2d if input.dim() == 4 else nn.Conv3d

    class Convolution(base):
        def __init__(self, weight, bias, stride, padding, dilation, groups):
            super().__init__(in_channels=input.shape[1],
                             out_channels=weight.shape[0],
                             kernel_size=weight.shape[2:],
                             stride=stride,
                             padding=padding,
                             dilation=dilation,
                             groups=groups,
                             bias=not bias is None)
            params = {'weight': weight}
            if not bias is None:
                params['bias'] = bias
            self.load_state_dict(params)

    if input.dim() == 4:
        output = torch.conv2d(input.tensor(), weight, bias, *args, **kwargs)
    elif input.dim() == 5:
        output = torch.conv3d(input.tensor(), weight, bias, *args, **kwargs)
    return forward_hook(Convolution(weight, bias, *args, **kwargs), (input, ),
                        output)
コード例 #21
0
ファイル: predictor.py プロジェクト: MM21-1946/mm21_no1946
def mask2weight(mask2d, mask_kernel, padding=0):
    # from the feat2d.py,we can know the mask2d is 4-d
    weight = torch.conv2d(mask2d[None, None, :, :].float(),
                          mask_kernel,
                          padding=padding)[0, 0]
    weight[weight > 0] = 1 / weight[weight > 0]
    return weight
コード例 #22
0
    def construct_noisy_observation_matrix(self,
                                           sigma=1.,
                                           filter_half_width=2):
        n_fields = self.size
        n_states = self.n_states

        # create a map where only one field is 1 in each map
        mp_grid = torch.eye(n_fields).reshape(n_fields, self.walls.shape[0],
                                              self.walls.shape[1])

        # convolute observation patterns with map
        filter = self.generate_Normal_2d(2 * filter_half_width + 1, sigma)
        om = torch.conv2d(input=mp_grid[:, None],
                          weight=filter[None, None],
                          padding=[filter_half_width, filter_half_width])
        assert (om.shape[-2:] == mp_grid.shape[-2:])

        # flatten map again and extract valid fields
        om_flat = torch.flatten(om, 2, 3).permute(2, 0, 1)  # o s a
        valid_fields = torch.flatten(~self.walls)
        om_flat_valid = om_flat[valid_fields][:, valid_fields]
        assert (om_flat_valid.shape[0] == om_flat_valid.shape[1]
                and om_flat_valid.shape[0] == n_states)

        # normalize
        om_norm = om_flat_valid[:, :, 0]
        om_norm /= torch.sum(om_norm, axis=0,
                             keepdim=True)  # renormalize to transition rate

        return om_norm
コード例 #23
0
def Laplacian_and_Hessian(img):
    if img.is_cuda:
        laplacian_filter = torch.tensor([[1., 1., 1.], [1., -8., 1.],
                                         [1., 1., 1.]]).cuda()
    else:
        laplacian_filter = torch.tensor([[1., 1., 1.], [1., -8., 1.],
                                         [1., 1., 1.]])
    L = torch.conv2d(
        img.reshape(1, 1, img.shape[0], img.shape[1]),
        laplacian_filter.reshape(1, 1, laplacian_filter.shape[0],
                                 laplacian_filter.shape[1]))
    H = torch.conv2d(
        L,
        laplacian_filter.reshape(1, 1, laplacian_filter.shape[0],
                                 laplacian_filter.shape[1]))
    return L, H
コード例 #24
0
ファイル: operation.py プロジェクト: Hasan4825/RankingT
def conv1x1(input: torch.Tensor, weight: torch.Tensor):
    """Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv."""

    if weight is None:
        return input

    return torch.conv2d(input, weight)
    def conv_Rn_G(self, input):
        # Generate the full stack of convolution kernels (all transformed copies)
        kernel_stack = torch.cat([self.kernel(self.h_grid.grid[i]) for i in range(self.N_h)], dim=0) # [N_out x N_h, N_in, X, Y]
        # And apply them all at once
        output = torch.conv2d(
            input=input,
            weight=kernel_stack,
            bias=None,
            stride=self.stride,
            padding=self.padding,
            dilation=self.dilation,
            groups=self.conv_groups)
        # Reshape the last channel to create a vector valued RnxH feature map
        output = torch.stack(torch.split(output, self.N_out, 1), 2)

        #kernel_stack = torch.stack([self.kernel(self.h_grid.grid[i]) for i in range(self.N_h)], dim=1)
        # ks = kernel_stack.shape
        # kernel_stack = torch.reshape(kernel_stack, [ks[0] * ks[1], ks[2], ks[-2], ks[-1]])
        # output_2 = torch.conv2d(
        #     input=input,
        #     weight=kernel_stack,
        #     bias=None,
        #     stride=self.stride,
        #     padding=self.padding,
        #     dilation=self.dilation,
        #     groups=self.conv_groups)
        # output_2=output_2.reshape(output_2.shape[0], self.N_out, self.N_h, output_2.shape[-2], output_2.shape[-1])
        # Return the output
        return output
コード例 #26
0
ファイル: baseline.py プロジェクト: Xiaohui9607/mmvp
    def cdna_transformation(self, image, cdna_input):
        batch_size, height, width = image.shape[0], image.shape[
            2], image.shape[3]

        cdna_kerns = self.fc(cdna_input)
        cdna_kerns = cdna_kerns.view(batch_size, self.num_masks, 1,
                                     DNA_KERN_SIZE, DNA_KERN_SIZE)
        cdna_kerns = torch.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
        norm_factor = torch.sum(cdna_kerns, dim=[2, 3, 4], keepdim=True)
        cdna_kerns /= norm_factor

        cdna_kerns = cdna_kerns.view(batch_size * self.num_masks, 1,
                                     DNA_KERN_SIZE, DNA_KERN_SIZE)
        image = image.permute([1, 0, 2, 3])

        transformed = torch.conv2d(image,
                                   cdna_kerns,
                                   stride=1,
                                   padding=[2, 2],
                                   groups=batch_size)

        transformed = transformed.view(self.channels, batch_size,
                                       self.num_masks, height, width)
        transformed = transformed.permute([1, 0, 3, 4, 2])
        transformed = torch.unbind(transformed, dim=-1)

        return transformed
コード例 #27
0
ファイル: layers.py プロジェクト: shirgur/AGFVisualization
        def final_backward(R_p, pw, nw, X1):
            X = X1
            L = X * 0 + \
                torch.min(torch.min(torch.min(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            H = X * 0 + \
                torch.max(torch.max(torch.max(X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,
                          keepdim=True)[0]
            Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \
                 torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding)

            Sp = safe_divide(R_p, Za)

            Rp = X * self.gradprop2(Sp, self.weight) - L * self.gradprop2(Sp, pw) - H * self.gradprop2(Sp, nw)
            return Rp
コード例 #28
0
ファイル: Vanilla.py プロジェクト: wjmolina/Thesis-Ideas
def convolution(image, kernel):
    '''
     input : [a, b, c] tensor, [d, e] tensor with b >= d and c >= e
    output : [a, b - d + 1, c - e + 1] tensor
    '''
    return torch.conv2d(image.view(-1, 1, *image[0].shape),
                        kernel.view(1, 1, *kernel.shape))[:, 0, :, :]
コード例 #29
0
def conv_transpose2d(input: Tensor, weight: Tensor, stride=1, padding=0, output_padding=0):
    """
    :param input: minibatch , in_channels , iH , iW
    :param weight: in_channels,out_channels,kH , kW
    :param stride:
    :param padding:
    :param output_padding:
    :return:
    """

    stride = (1, 1) if stride is None else to_pair(stride)
    padding = (0, 0) if padding is None else to_pair(padding)
    output_padding = (0, 0) if output_padding is None else to_pair(output_padding)
    _, _, kH, kW = weight.shape
    assert output_padding[0] < stride[0] and output_padding[1] < stride[1], \
        "output padding must be smaller than stride,but got output_padding=%s and stride=%s" % (output_padding, stride)
    assert kH > padding[1] and kW > padding[1], \
        "padding must be smaller than kernel size, but got kernel_size=%s and padding=%s" % ((kH, kW), padding)

    N, C, H, W = input.shape
    if stride[0] > 1 or stride[1] > 1:
        inf_input = np.zeros((N, C, (H - 1) * stride[0] + 1, (W - 1) * stride[1] + 1))
        index_i = np.repeat(np.arange(0, H) * stride[0], W)
        index_j = np.tile(np.arange(0, W) * stride[1], H)
        inf_input[:, :, index_i, index_j] = input.data.reshape(N, C, -1)
    else:
        inf_input = input.data

    _, _, kH, kW = weight.shape

    pad00 = kH - 1 - padding[0]
    pad01 = pad00 + output_padding[0]
    pad10 = kW - 1 - padding[1]
    pad11 = pad10 + output_padding[1]

    if pad00 or pad01 or pad10 or pad11:
        x_padded = np.pad(inf_input, ((0, 0), (0, 0), (pad00, pad01), (pad10, pad11)),
                          mode='constant')
    else:
        x_padded = inf_input

    _weight = np.swapaxes(weight.data, axis1=0, axis2=1)
    _weight = np.flip(_weight, (2, 3)).copy()

    out = torch.conv2d(torch.tensor(x_padded),
                       torch.tensor(_weight),
                       bias=None)

    data = out.numpy()
    requires_grad = input.requires_grad or weight.requires_grad
    grad_fn = Conv2dTransposedBackward
    depends_on = []

    if input.requires_grad:
        depends_on.append(Edge(input, ['input', weight.data, stride, padding]))

    if weight.requires_grad:
        depends_on.append(Edge(weight, ['weight', x_padded, stride, padding]))

    return Tensor(data, requires_grad, depends_on, grad_fn)
コード例 #30
0
def grad_2D(x):
    weight = x.new_zeros(2, 1, 3, 3)
    weight[0, 0] = torch.tensor([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
    weight[1, 0] = torch.tensor([[0, -1, 0], [0, 1, 0], [0, 0, 0]])
    x = x[:, None]  # Add channel dimension
    out = torch.conv2d(x, weight, padding=1)
    return out[:, :, :, :]
コード例 #31
0
ファイル: grad.py プロジェクト: RichieMay/pytorch
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):
    r"""
    Computes the gradient of conv2d with respect to the weight of the convolution.

    Args:
        input: input tensor of shape (minibatch x in_channels x iH x iW)
        weight_size : Shape of the weight gradient tensor
        grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias: optional bias tensor (out_channels). Default: None

    Examples::

        >>> input = torch.randn(1,1,3,3, requires_grad=True)
        >>> weight = torch.randn(1,1,1,2, requires_grad=True)
        >>> output = F.conv2d(input, weight)
        >>> grad_output = torch.randn(output.shape)
        >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
        >>> F.grad.conv2d_weight(input, weight.shape, grad_output)

    """
    stride = _pair(stride)
    padding = _pair(padding)
    dilation = _pair(dilation)
    in_channels = input.shape[1]
    out_channels = grad_output.shape[1]
    min_batch = input.shape[0]

    grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
                                                  1)
    grad_output = grad_output.contiguous().view(
        grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
        grad_output.shape[3])

    input = input.contiguous().view(1, input.shape[0] * input.shape[1],
                                    input.shape[2], input.shape[3])

    grad_weight = torch.conv2d(input, grad_output, bias, dilation, padding,
                               stride, in_channels * min_batch)

    grad_weight = grad_weight.contiguous().view(
        min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
        grad_weight.shape[3])

    return grad_weight.sum(dim=0).view(
        in_channels // groups, out_channels,
        grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
            2, 0, weight_size[2]).narrow(3, 0, weight_size[3])