Example #1
0
def spherical_mapped_conv(features: torch.Tensor, cache: Cache, out_size: int,
                          src_order: int, tgt_order: int, kernel_size: int):
    _, c, __, ___ = features.size()
    dev = features.get_device()
    conv = MappedConvolution(c, out_size, kernel_size * kernel_size)
    conv = conv.to(dev) if dev >= 0 else conv
    m, w = cache.get_kernels(src_order, tgt_order, kernel_size)
    m = m.to(dev) if dev >= 0 else m
    w = w.to(dev) if dev >= 0 else w
    return conv.forward(features, m, w)
Example #2
0
def test_bilinear_interpolation_cpu():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double()

    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=params.weights_unit(in_channels, out_channels),
        input=params.input_4x5().repeat(bs, in_channels, 1, 1),
        sample_map=params.sample_map1(),
        cuda=False)

    # Manually computed correct result
    correct_output = 2 + params.in_channels * torch.tensor(
        [[35.75, 16.00, 28.00, 39.25, 45.00], [
            32.25, 32.00, 23.00, 36.75, 29.00
        ], [34.50, 47.00, 31.00, 34.25, 33.75],
         [39.00, 27.00, 35.25, 40.75, 39.50]]).double()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #3
0
def test_out_of_bounds_sampling_cuda():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double().cuda()

    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=params.weights_unit(in_channels, out_channels),
        input=params.input_4x5().repeat(bs, in_channels, 1, 1),
        sample_map=params.sample_map4(),
        cuda=True)

    # Manually computed correct result
    correct_output = 2 + params.in_channels * torch.tensor(
        [[29, 23, 26, 10, 13], [18, 45, 23, 20, 34], [18, 0, 22, 13, 17],
         [15, 14, 17, 15, 25]]).double().cuda()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #4
0
def test_integer_sampling_cuda():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double().cuda()

    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=params.weights_unit(in_channels, out_channels),
        input=params.input_4x5().repeat(bs, in_channels, 1, 1),
        sample_map=params.sample_map0(),
        cuda=True)

    # Manually computed correct result
    correct_output = 2 + in_channels * torch.tensor(
        [[30, 25, 31, 39, 33], [49, 40, 40, 54, 43], [46, 35, 47, 26, 33],
         [50, 36, 27, 40, 45]]).double().cuda()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #5
0
def test_integer_sampling_cpu():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double()
    w = params.weights_unit(in_channels, out_channels)
    in_4x5 = params.input_4x5()
    m = params.sample_map0()
    '''
        m (the sample map) samples from in_4x5 (the input) as:
            in_4x5[0, 0, m[0, 0, 0, 1].long(), m[0, 0, 0, 0].long()]
            i.e. input[batch, channel, y, x]
            where y, x = sample_map's last dim coords
            and the sample_maps 3rd dim corresponds 
            to the neighboring samples for each of the kernel's center points

            >>> in_4x5[0, 0, m[0,0,0,1].long(), m[0,0,0,0].long()]
                        tensor(11., dtype=torch.float64)
            >>> in_4x5[0, 0, m[0,0,1,1].long(), m[0,0,1,0].long()]
                        tensor(8., dtype=torch.float64)
            >>> in_4x5[0, 0, m[0,0,2,1].long(), m[0,0,2,0].long()]
                        tensor(2., dtype=torch.float64)
            >>> in_4x5[0, 0, m[0,1,2,1].long(), m[0,1,2,0].long()]
                        tensor(0., dtype=torch.float64)
    '''
    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=w,
        input=in_4x5.repeat(bs, in_channels, 1, 1),
        sample_map=m,
        cuda=False)

    # Manually computed correct result
    correct_output = 2 + in_channels * torch.tensor(
        [[30, 25, 31, 39, 33], [49, 40, 40, 54, 43], [46, 35, 47, 26, 33],
         [50, 36, 27, 40, 45]]).double()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #6
0
 def __init__(
     self,
     in_features: int,
     out_features: int,
     kernel_size: int,
     interpolation: str = "bispherical",  # 'nearest' , 'bilinear'
     bias: bool = True,
     source_order_offset: int = 0,
 ):
     super(Conv2dISEA, self).__init__()
     self.conv = MappedConvolution(
         in_channels=in_features,
         out_channels=out_features,
         kernel_size=kernel_size * kernel_size,
         interpolation=interpolation,
         bias=True,
     )
     self.kernel_size = kernel_size
     self.source_order_offset = source_order_offset
Example #7
0
def test_downsampling_with_integer_sampling_cpu():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double()

    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=params.weights_unit(in_channels, out_channels),
        input=params.input_4x5().repeat(bs, in_channels, 1, 1),
        sample_map=params.sample_map2(),
        cuda=False)

    # Manually computed correct result
    correct_output = 2 + params.in_channels * torch.tensor([[48, 50], [28, 57]
                                                            ]).double()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #8
0
def test_layer_weights_cpu():

    # Basic MappedConvolution layer
    layer = MappedConvolution(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size).double()

    # Run a forward and backward pass
    output, forward_time, backward_time, gradcheck_res = utils.mapped_conv_test(
        layer,
        weight=params.weights_0_25(in_channels, out_channels),
        input=params.input_ones().repeat(bs, in_channels, 1, 1),
        sample_map=params.sample_map0(),
        cuda=False)

    # Manually computed correct result
    correct_output = 2 + params.in_channels * 2.5 * torch.ones(1, 1, 4,
                                                               5).double()

    # Assert gradient check has passed
    assert gradcheck_res

    # Assert outputs match
    testing.assert_allclose(output, correct_output)
Example #9
0
 for low_res_w, high_res_w in tqdm.tqdm(zip(low_res_ws, high_res_ws),
                                        desc=res_desc):
     res_desc = "Res = {}->{}".format(low_res_w, high_res_w)
     for kernel_size in tqdm.tqdm(kernel_sizes, desc=res_desc):
         ks_desc = "Kernel Size = {}".format(kernel_size)
         for std in tqdm.tqdm(stds, desc=ks_desc):
             kernel = gkern(kernlen=int(kernel_size), std=std)
             verts = compute_num_vertices(in_order)
             rm, rw = sphere_to_image_resample_map(
                 in_order, [low_res_w // 2, low_res_w])
             ico = generate_icosphere(out_order)
             vm, vw = vertex_to_vertex_kernel_map(
                 ico, int(kernel_size), int(kernel_size), in_order)
             conv = MappedConvolution(1,
                                      1,
                                      kernel_size=int(kernel_size *
                                                      kernel_size),
                                      interpolation='bispherical',
                                      bias=False)
             # conv.weight = torch.nn.Parameter(torch.ones_like(conv.weight) / (kernel_size * kernel_size))
             conv.weight = torch.nn.Parameter(
                 torch.from_numpy(kernel.reshape(1, 1, -1)).float())
             um, uw = sphere_to_image_resample_map(
                 out_order, [high_res_w // 2, high_res_w])
             # https://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n
             # Bresenham's line algorithm.
             bresenham_line = lambda m, n: [
                 i * n // m + n // (2 * m) for i in range(m)
             ]
             corner_selection = bresenham_line(corners, low_res_w)
             # height_selection = bresenham_line(2, low_res_w // 2)
             height_selection = [2, 5]
Example #10
0
    # Normalize color
    sum_weights = torch.zeros(rgb_vertices.shape[-1]).cuda()
    sum_weights.index_add_(0, resample_map[..., 0].long().view(-1),
                           resample_weights.view(-1))
    rgb_vertices /= (sum_weights + 1e-12)

    # earth4k = numpy.load("earth4k.npz")
    # rgb_vertices = torch.from_numpy(earth4k["earth"]).cuda()
    elapsed = timeit.default_timer() - start_time
    print("resample: " + str(elapsed))

    kernel_size = [3, 3]
    kernel_total = kernel_size[0] * kernel_size[1]
    out_channels = 1
    in_channels = 1
    conv = MappedConvolution(in_channels, out_channels, kernel_total).cuda()
    conv.weight = torch.nn.Parameter(
        (torch.ones(kernel_total) / float(kernel_total)).repeat(
            out_channels, in_channels, 1).cuda())
    conv.bias = torch.nn.Parameter(torch.zeros(out_channels).cuda())

    # start_time = timeit.default_timer()
    # sample_map, sample_weights = vertex_to_vertex_kernel_map(icosphere,
    #     kernel_size[0], kernel_size[1], order, nearest=False)
    # elapsed = timeit.default_timer() - start_time
    # print("calc: " + str(elapsed))

    # numpy.savez_compressed("7.npz", map=sample_map.numpy(), weights=sample_weights.numpy())
    start_time = timeit.default_timer()
    order7 = numpy.load("7.npz")
    sample_map = torch.from_numpy(order7['map'])
        return time_cuda(layer, [data, mapping])
    else:
        return time_cuda(layer, [data])


bs = 1
in_channels = 10
out_channels = 1
num_trials = 100
interpolation = 'bilinear'
sizes = [(10, 10), (20, 25), (20, 50), (50, 100), (100, 100), (200, 250),
         (200, 500), (500, 1000), (1000, 1000), (2000, 2500)]

# Initialize the layers
mapped_conv = MappedConvolution(in_channels=in_channels,
                                out_channels=out_channels,
                                kernel_size=9,
                                interpolation=interpolation).double().cuda()
conv = Convolution(in_channels, out_channels, (3, 3),
                   padding=1).double().cuda()

store_map_forward = torch.zeros(len(sizes))
store_map_backward = torch.zeros(len(sizes))
store_reg_forward = torch.zeros(len(sizes))
store_reg_backward = torch.zeros(len(sizes))
for i in range(len(sizes)):
    print(sizes[i])
    h, w = sizes[i]

    # Define inputs and gradient
    data = torch.arange(h * w).repeat(bs, in_channels, 1,
                                      1).view(bs, in_channels, h, w).double()