Exemplo n.º 1
0
def dense_corr_check():
    # gradcheck takes a tuple of tensors as input, check if your gradient
    # evaluated with these tensors are close enough to numerical
    # approximations and returns True if they all verify this condition.
    dense_corr = DenseCorr.apply
    dve_dim = 4
    stride = 2
    B, C, H, W = 4, dve_dim, 4, 4

    common = {"dtype": torch.double, "requires_grad": True}
    feats1 = torch.randn(B, C, H, W, **common)
    feats2 = torch.randn(B, C, H, W, **common)

    batch_grid_u = torch.randn(B,
                               H,
                               W,
                               2,
                               dtype=torch.double,
                               requires_grad=False)

    H_input = H * stride
    W_input = W * stride
    xxyy = tps.spatial_grid_unnormalized(H_input, W_input).double()
    xxyy.requires_grad = False
    args = (feats1, feats2, xxyy, batch_grid_u, stride)

    feats1.cuda()
    feats2.cuda()
    xxyy.cuda()
    batch_grid_u.cuda()
    test = gradcheck(dense_corr, args, eps=1e-6, atol=1e-4)
    print("passed test: {}".format(test))
def dense_corr_loss(feat, input_size, opt, feat_spectral, pow=0.5, normalize_vectors=True):
    # feat_spectral is a list of dimensions of features from different layers
    
    B, C, H, W = input_size
    b, c, h, w = feat.size()
    device = feat.device
    stride = H // h
    
    with torch.no_grad():
        yyxx = tps.spatial_grid_unnormalized(H, W).to(device)
        diff = yyxx[::stride, ::stride, None, None, :] - yyxx[None, None, ::stride, ::stride, :]
        diff = (diff * diff).sum(4).sqrt()
        diff = diff.pow(pow)
    
    loss = 0.
    for bb in range(b):
        f1 = feat[bb].reshape(c, h*w)
        if normalize_vectors: 
            f1 = layer_wise_normalize(f1, feat_spectral)
        corr = torch.matmul(f1.t(), f1)
        corr = corr.reshape(h, w, h, w)         

        smcorr = F.softmax(corr.reshape(h, w, -1) * opt.temperature, dim=2).reshape(corr.shape)
        L = diff * smcorr
        loss += L.sum()
    
    return loss / (h * w * b)
Exemplo n.º 3
0
def find_descriptor(x, y, source_descs, target_descs, stride):
    C, H, W = source_descs.shape
    x = int(np.round(x / stride))
    y = int(np.round(y / stride))
    x = min(W - 1, max(x, 0))
    y = min(H - 1, max(y, 0))
    query_desc = source_descs[:, y, x]
    corr = torch.matmul(query_desc.reshape(-1, C),
                        target_descs.reshape(C, H * W))
    maxidx = corr.argmax()
    grid = spatial_grid_unnormalized(H, W).reshape(-1, 2) * stride
    x, y = grid[maxidx]
    return x.item(), y.item()
def find_descriptor(x, y, source_descs, target_descs):
    # input and output of this function are both normalized coors
    C, H, W = source_descs.shape
    x = int(np.round((x + 1.) / 2. * (W - 1)))
    y = int(np.round((y + 1.) / 2. * (H - 1)))
    x = min(W - 1, max(x, 0))
    y = min(H - 1, max(y, 0))
    query_desc = source_descs[:, y, x]
    corr = torch.matmul(query_desc.reshape(-1, C), target_descs.reshape(C, H * W))
    maxidx = corr.argmax()
    grid = tps.spatial_grid_unnormalized(H, W).reshape(-1, 2)
    y, x = grid[maxidx]
    x_norm = 2. * x.item() / (W - 1) - 1 # normalize to [-1, 1]
    y_norm = 2. * y.item() / (H - 1) - 1
    return x_norm, y_norm
Exemplo n.º 5
0
def dense_correlation_loss(feats,
                           meta,
                           pow=0.5,
                           fold_corr=False,
                           normalize_vectors=True):
    feats = feats[0]
    device = feats.device
    grid = meta['grid']

    # Grid (B,H,W,2): For each pixel in im1, where did it come from in im2
    grid = grid.to(device)

    H_input = grid.shape[1]
    W_input = grid.shape[2]

    feats1 = feats[0::2]
    feats2 = feats[1::2]

    B, C, H, W = feats1.shape
    h, w = H, W

    stride = H_input // H

    batch_grid_u = tps.grid_unnormalize(grid, H_input, W_input)
    batch_grid_u = batch_grid_u[:, ::stride, ::stride, :]
    xxyy = tps.spatial_grid_unnormalized(H_input, W_input).to(device)

    if fold_corr:
        from model.folded_correlation import DenseCorr
        """This function computes the gradient explicitly to avoid the memory
        issues with using autorgrad in a for loop."""
        assert not normalize_vectors
        dense_corr = DenseCorr.apply
        return dense_corr(feats1, feats2, xxyy, batch_grid_u, stride, pow)

    loss = 0.
    for b in range(B):
        f1 = feats1[b].reshape(C, H * W)  # source
        f2 = feats2[b].reshape(C, h * w)  # target

        if normalize_vectors:
            f1 = F.normalize(f1, p=2, dim=0) * 20
            f2 = F.normalize(f2, p=2, dim=0) * 20

        corr = torch.matmul(f1.t(), f2)
        corr = corr.reshape(H, W, h, w)

        with torch.no_grad():
            diff = batch_grid_u[b, :, :, None, None, :] - \
                    xxyy[None, None, ::stride, ::stride, :]
            diff = (diff * diff).sum(4).sqrt()
            diff = diff.pow(pow)

        # grid_u = tps.grid_unnormalize(grid[b], H_input, W_input)
        # diff = grid_u[:, :, None, None, :] - xxyy[None, None, :, :, :]

        # Equivalent to this
        #
        # diff = torch.zeros(H_input, W_input, H_input, W_input, 2)
        # for I in range(H_input):
        #     for J in range(W_input):
        #         for i in range(H_input):
        #             for j in range(W_input):
        #                 diff[I, J, i, j, 0] = J + flow[b, I, J, 0] - j
        #                 diff[I, J, i, j, 1] = I + flow[b, I, J, 1] - i

        # diff = diff[::stride, ::stride, ::stride, ::stride]
        # diff = (diff * diff).sum(4).sqrt()
        # diff = diff.pow(pow)

        smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)

        L = diff * smcorr

        loss += L.sum()

    return loss / (H * W * B)
Exemplo n.º 6
0
def dense_correlation_loss_dve(feats,
                               meta,
                               pow=0.5,
                               fold_corr=False,
                               normalize_vectors=True):
    feats = feats[0]
    device = feats.device

    # Grid (B,H,W,2): For each pixel in im1, where did it come from in im2
    grid = meta['grid'].to(device)

    H_input = grid.shape[1]
    W_input = grid.shape[2]

    feats1 = feats[0::2]
    feats2 = feats[1::2]

    B, C, H, W = feats1.shape
    h, w = H, W

    stride = H_input // H

    xxyy = tps.spatial_grid_unnormalized(H_input, W_input).to(device)
    batch_grid_u = tps.grid_unnormalize(grid, H_input, W_input)
    batch_grid_u = batch_grid_u[:, ::stride, ::stride, :]

    if False:
        import matplotlib.pyplot as plt

        vis1 = meta['im1'][0].clone()
        vis2 = meta['im2'][0].clone()
        visgrid = tps.grid_unnormalize(grid, H_input, W_input)[0]

        fig = plt.figure()  # a new figure window
        ax1 = fig.add_subplot(1, 3, 1)
        ax2 = fig.add_subplot(1, 3, 2)
        ax3 = fig.add_subplot(1, 3, 3)

        ax1.imshow(vis1.permute(1, 2, 0) + 0.5)
        ax2.imshow(vis2.permute(1, 2, 0) + 0.5)

        for i in range(H_input):
            for j in range(W_input):
                if torch.rand([]) < 0.01:
                    ax1.scatter(j, i)
                    jj, ii = visgrid[i, j]
                    ax2.scatter(jj, ii)

        dists = (batch_grid_u[0] -
                 xxyy[::stride, ::stride]).pow(2).sum(2).sqrt()
        ax3.imshow(dists / dists.max())
        fig.savefig('/tmp/lossvis.pdf')
        fig.clf()

    if fold_corr:
        """This function computes the gradient explicitly to avoid the memory
        issues with using autorgrad in a for loop."""
        from model.folded_correlation_dve import DenseCorrDve
        dense_corr = DenseCorrDve.apply
        return dense_corr(feats1, feats2, xxyy, batch_grid_u, stride,
                          normalize_vectors, pow)

    loss = 0.
    for b in range(B):
        f1 = feats1[b].reshape(C, H * W)  # source
        f2 = feats2[b].reshape(C, h * w)  # target
        fa = feats1[(b + 1) % B].reshape(C, h * w)  # auxiliary

        if normalize_vectors:
            f1 = F.normalize(f1, p=2, dim=0) * 20
            f2 = F.normalize(f2, p=2, dim=0) * 20
            fa = F.normalize(fa, p=2, dim=0) * 20

        corr = torch.matmul(f1.t(), fa)
        corr = corr.reshape(H, W, h, w)
        smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)
        smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)
        del smcorr

        f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * W)
        del smcorr_fa

        corr2 = torch.matmul(f1_via_fa.t(), f2).reshape(corr.shape)
        smcorr2 = F.softmax(corr2.reshape(H, W, -1), dim=2).reshape(corr.shape)
        del corr2

        with torch.no_grad():
            diff = batch_grid_u[b, :, :, None, None, :] - \
                    xxyy[None, None, ::stride, ::stride, :]
            diff = (diff * diff).sum(4).sqrt()
            diff = diff.pow(pow)

        L = diff * smcorr2

        loss += L.float().sum()

    return loss / (H * W * B)