def forward(self, x):
        for t in range(4):
            if t < 1:
                Z_1 = self.W_b_1(x)
                Z_2_mpool, indices_hid = self.MPool(Z_1)
                Z_2 = self.W_b_2(self.LRN(F.relu(Z_2_mpool)))
                read_out, indices_max = F.max_pool2d_with_indices(
                    self.LRN(F.relu(Z_2)),
                    kernel_size=Z_2.size()[2:],
                    return_indices=True)
                final_z = self.Lin(read_out.view(-1, 32))
            if t >= 1:
                Z_1 = self.W_b_1(x) + self.W_l_1(self.LRN(
                    F.relu(Z_1))) + self.W_t_1(self.LRN(F.relu(Z_2)))
                Z_2_mpool, indices_hid = self.MPool(Z_1)
                Z_2 = self.W_b_2(self.LRN(F.relu(Z_2_mpool))) + self.W_l_2(
                    self.LRN(F.relu(Z_2)))
                read_out, indices_max = F.max_pool2d_with_indices(
                    self.LRN(F.relu(Z_2)),
                    kernel_size=Z_2.size()[2:],
                    return_indices=True)
                final_z = self.Lin(read_out.view(-1, 32))

        #print(torch.sum(torch.isnan(final_z)))
        #print(F.sigmoid(final_z[0,:]))
        #print(final_z.size())
        return (final_z)
Beispiel #2
0
 def infer(self, data_dir, out_dir, shape, stride, device='cpu', epsilon=-1):
     #Assume that the data_dir will contain pickled files which are 1 indexed
     #Shape will be desired shape of filters
     #Each file will be of shape [channels, x, y]
     #Perform convolution then pooling
     filters = self.f1.weight.data
     
     #Prune weights
     if epsilon != -1:
         filters = prune_weights(filters, epsilon)
         
     filters = torch.reshape(filters, shape).to(device)
     files = [x for x in os.listdir(data_dir) if str.isdigit(x)]
     for file in files:
         X = torch.load(os.path.join(data_dir, file), map_location=device)
         X_shape = X.shape
         #Normalize input
         X = s.normalize_single_sample(X.view(-1).unsqueeze(0))
         X = torch.reshape(X, [1, X_shape[0], X_shape[1], X_shape[2]]).float()
         #Convolve
         out_map = F.conv2d(X, filters, stride=stride)
         #Pooling
         out_map, _ = F.max_pool2d_with_indices(out_map, 2, stride=1)
         
         #Reshape to remove batch dimension
         out_map = out_map.squeeze(0)
         torch.save(out_map, os.path.join(out_dir, file))
Beispiel #3
0
def abs_maxpool_2d(x):
    x_pool, index = F.max_pool2d_with_indices(torch.abs(x), (2, 2), 2)
    x_flat = x.reshape([x.shape[0], x.shape[1], -1])
    index_flat = index.reshape([index.shape[0], index.shape[1], -1])
    x_selected = torch.gather(x_flat, 2, index_flat).reshape(x_pool.shape)
    x_pool = x_pool * torch.sign(x_selected)
    return x_pool
Beispiel #4
0
 def compute_indices(self, x):
     b, c, h, w = x.size()
     oh, ow = 2 * h, 2 * w
     x = -torch.arange(oh * ow, dtype=x.dtype, device=x.device).view(
         1, 1, oh, ow)
     indices = F.max_pool2d_with_indices(x, (2, 2),
                                         (2, 2))[1].expand(b, c, h, w)
     return indices
Beispiel #5
0
 def forward(self, u, v):
     B, C, _, _ = u.shape
     x = vector2scalar(u, v)
     _, idx = F.max_pool2d_with_indices(x,
                                        kernel_size=self.kernel_size,
                                        stride=self.stride,
                                        padding=self.padding)
     u = torch.gather(u.view(B, C, -1), 2, idx.view(B, C, -1)).view_as(idx)
     v = torch.gather(v.view(B, C, -1), 2, idx.view(B, C, -1)).view_as(idx)
     return u, v
Beispiel #6
0
 def forward(self, input):
     pooling_indices = []
     x = input
     for layer in self.encoder.children():
         x = layer(x)
         x, indices = F.max_pool2d_with_indices(x, kernel_size=2)
         pooling_indices = [indices, *pooling_indices]
     for layer in self.decoder.children():
         indices, *pooling_indices = pooling_indices
         x = F.max_unpool2d(x, indices, kernel_size=2)
         x = layer(x)
     return x
Beispiel #7
0
 def find_local_max(self, box_prob):
     B, C, H, W = box_prob.shape
     max_prob, idx = F.max_pool2d_with_indices(box_prob,
                                               3,
                                               1,
                                               1,
                                               return_indices=True)
     max_prob = max_prob[0, 0]
     box_prob = box_prob[0, 0]
     is_local_max = torch.nonzero(box_prob == max_prob)
     y, x = is_local_max[:, 0], is_local_max[:, 1]
     idx = torch.argsort(-box_prob[y, x])
     k = self.scatter_topk
     y = y[idx[:k]]
     x = x[idx[:k]]
     return y.cpu().numpy(), x.cpu().numpy(), box_prob[y, x]
Beispiel #8
0
 def infer_from_spectro(self, data_dir, out_dir, shape, stride, device='cpu', epsilon=-1):
     filters = self.f1.weight.data
     
     #Prune weights
     if epsilon != -1:
         filters = prune_weights(filters, epsilon)
     
     filters = torch.reshape(filters, [-1, 1, shape[2], shape[3]]).to(device)
     
     from skimage.io import imread
     
     #Extract folder names with assumption that all folder names are integers
     dirs = [x for x in os.listdir(data_dir) if str.isdigit(x)]
     
     for i in range(len(dirs)):
         folder_name = dirs[i]
         #Read in image
         img = torch.load(os.path.join(data_dir, folder_name, 'spectro'))
         if img.shape[0] > img.shape[1]:
             continue
         img = torch.as_tensor(img)
         img_shape = img.shape
         img = s.normalize_single_sample(img.view(-1).unsqueeze(0))
         img = torch.reshape(img, [1, 1, img_shape[0], img_shape[1]]).float()
         img = img.to(device)
         
         #Convolve
         out_map = F.conv2d(img, filters, stride=stride)
         #Pooling
         out_map, _ = F.max_pool2d_with_indices(out_map, 2, stride=1)
         
         #Reshape to remove batch dimension
         out_map = out_map.squeeze(0)
         
         #Save
         torch.save(out_map, os.path.join(out_dir, folder_name))  
Beispiel #9
0
    features: B * C * H * W
    mask: B * H * W
    '''
    assert features.shape[2:4] == mask.shape[
        1:3], 'shape mismatch between features and mask'
    b, c, h, w = features.shape
    mask = mask.unsqueeze(1).repeat(1, c, 1, 1)  # B*H*W -> B*1*H*W
    features = features * mask
    pooled_features = F.avg_pool2d(features, [h, w])
    scale = (h * w) / mask.sum(-1).sum(-1).clamp(min=1.0)
    scale = scale.unsqueeze(-1).unsqueeze(-1)
    return pooled_features * scale


if __name__ == '__main__':

    b, c, h, w = 1, 3, 4, 4
    features = torch.rand(b, c, h, w)
    mask = torch.zeros(b, h, w)
    mask[:, 1:3, 1:3] = 1
    print('features', features)
    # print ('mask', mask)

    a, b = F.max_pool2d_with_indices(features, [h, w])
    print(a)
    print(b)

    # max = mask_global_max_pooling_2d(features, mask)
    # avg = mask_global_avg_pooling_2d(features, mask)
    # print(max)
    # print(avg)
Beispiel #10
0
    def forward(self, input):

        x11 = F.relu(self.bn11(self.conv11(input)))
        x12 = F.relu(self.bn12(self.conv12(x11)))
        x1p, id1 = F.max_pool2d_with_indices(x12,
                                             kernel_size=2,
                                             stride=2,
                                             return_indices=True)

        x21 = F.relu(self.bn21(self.conv21(x1p)))
        x22 = F.relu(self.bn22(self.conv22(x21)))
        x2p, id2 = F.max_pool2d_with_indices(x22,
                                             kernel_size=2,
                                             stride=2,
                                             return_indices=True)

        x31 = F.relu(self.bn31(self.conv31(x2p)))
        x32 = F.relu(self.bn32(self.conv32(x31)))
        x33 = F.relu(self.bn33(self.conv33(x32)))
        x3p, id3 = F.max_pool2d_with_indices(x33,
                                             kernel_size=2,
                                             stride=2,
                                             return_indices=True)

        x41 = F.relu(self.bn41(self.conv41(x3p)))
        x42 = F.relu(self.bn42(self.conv42(x41)))
        x43 = F.relu(self.bn43(self.conv43(x42)))
        x4p, id4 = F.max_pool2d_with_indices(x43,
                                             kernel_size=2,
                                             stride=2,
                                             return_indices=True)

        x51 = F.relu(self.bn51(self.conv51(x4p)))
        x52 = F.relu(self.bn52(self.conv52(x51)))
        x53 = F.relu(self.bn53(self.conv53(x52)))
        x5p, id5 = F.max_pool2d(x53,
                                kernel_size=2,
                                stride=2,
                                return_indices=True)
        print(x5p.size(), id5.size())
        #  unpooling - conv - bn - activation
        #            - conv - bn - activation
        #            - conv - bn - activation
        #            -

        x5d = F.max_unpool2d(x5p, id5, kernel_size=2, stride=2)
        x53d = F.relu(self.bn53d(self.conv53d(x5d)))
        x52d = F.relu(self.bn52d(self.conv52d(x53d)))
        x51d = F.relu(self.bn51d(self.conv51d(x52d)))

        x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2)
        x43d = F.relu(self.bn43d(self.conv43d(x4d)))
        x42d = F.relu(self.bn42d(self.conv42d(x43d)))
        x41d = F.relu(self.bn41d(self.conv41d(x42d)))

        x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2)
        x33d = F.relu(self.bn33d(self.conv33d(x3d)))
        x32d = F.relu(self.bn32d(self.conv32d(x33d)))
        x31d = F.relu(self.bn31d(self.conv31d(x32d)))

        x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2)
        x22d = F.relu(self.bn22d(self.conv22d(x2d)))
        x21d = F.relu(self.bn21d(self.conv21d(x22d)))

        x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2)
        x12d = F.relu(self.bn12d(self.conv12d(x1d)))
        x11d = self.conv11d(x12d)

        return x11d
Beispiel #11
0
        mask = torch.where(d >= bins[-1],
                           torch.tensor([1.]).to(device),
                           torch.tensor([0.]).to(device))
        masked_s = torch.where(d >= bins[-1], g, torch.tensor([0.]).to(device))

        semivariance1[:, :,
                      -1] = masked_s.sum(dim=2) / (mask.sum(dim=2) + 1e-7)

        semivariance1[:, :, -1][mask.sum(dim=2) == 0] = 0

        loss = ((semivariance1 - semivariance0)**2).mean()

        return loss

    def retrieve_elements_from_indices(self, tensor, indices):
        flattened_tensor = tensor.flatten(start_dim=2)
        output = flattened_tensor.gather(
            dim=2, index=indices.flatten(start_dim=2)).view_as(indices)
        return output


if __name__ == "__main__":
    import torch.nn.functional as F

    x1 = torch.randn(6, 64, 256, 256)
    x0, ind = F.max_pool2d_with_indices(x1, 2)

    kl = KrigingLoss()

    loss = kl(x0, ind, x1)