def computeLossMatchability(network, I, indexRoll, grid, maskMargin, args, ssim, LrLoss): f = F.normalize(network['netFeatCoarse'](I), p=2, dim=1) corr = network['netCorr'](f[indexRoll], f) finalGrad, final = model.predFlowCoarse(corr, network['netFlowCoarse'], grid) #corr = corr.detach() match = model.predMatchability(corr, network['netMatch']) * maskMargin matchCycle = F.grid_sample(match[indexRoll], final) * match ## cycle loss on flow flowC = F.grid_sample(final[indexRoll].permute(0, 3, 1, 2), final).permute(0, 2, 3, 1) lossCycle = torch.mean(torch.abs(flowC - grid), dim=3).unsqueeze(1) ## Dim : N * 1 * W * H lossCycle = torch.sum(lossCycle * matchCycle) / (torch.sum(matchCycle) + 0.001) ## Reconstruction Loss 3 channels IWarp = F.grid_sample(I, final) lossLr = LrLoss(IWarp, I[indexRoll], matchCycle, args.margin, maskMargin, ssim) ## matchability loss lossMatch = torch.sum(torch.abs(1 - matchCycle) * maskMargin) / (torch.sum(maskMargin) + 0.001) lossGrad = torch.sum(finalGrad * (1 - matchCycle[:, :, :-1, :-1]) * maskMargin[:, :, :-1, :-1]) / (torch.sum((1 - matchCycle[:, :, :-1, :-1]) * maskMargin[:, :, :-1, :-1]) + 0.001) #lossGrad = torch.mean(finalGrad) loss = lossLr + args.theta * lossCycle + args.eta * lossMatch + args.grad * lossGrad return lossLr.item(), lossCycle.item(), lossMatch.item(), lossGrad.item(), loss
def computeLossNoMatchability(network, I, indexRoll, grid, maskMargin, args, ssim, LrLoss): f = F.normalize(network['netFeatCoarse'](I), p=2, dim=1) corr = network['netCorr'](f[indexRoll], f) _, final = model.predFlowCoarse(corr, network['netFlowCoarse'], grid) flowC = F.grid_sample(final[indexRoll].permute(0, 3, 1, 2), final).permute(0, 2, 3, 1) ## cycle loss on flow lossCycle = torch.mean(torch.abs(flowC - grid), dim=3).unsqueeze(1) ## Dim : N * 1 * W * H lossCycle = torch.sum( lossCycle * maskMargin) / (torch.sum(maskMargin) + 0.001) ## Reconstruction Loss 3 channels IWarp = F.grid_sample(I, final) lossLr = LrLoss(IWarp, I[indexRoll], maskMargin, args.margin, maskMargin, ssim) ## matchability loss loss = lossLr + args.mu_cycle * lossCycle return lossLr.item(), lossCycle.item(), 0, 0, loss
def validation(df, valDir, inPklCoarse, network, trainMode): strideNet = 16 minSize = 480 precAllAlign = np.zeros(8) totalAlign = 0 pixelGrid = np.around(np.logspace(0, np.log10(36), 8).reshape(-1, 8)) for key in list(network.keys()): network[key].eval() with torch.no_grad(): for i in tqdm(range(len(df))): scene = df['scene'][i] #### -- Source Image feature Is = Image.open( os.path.join(os.path.join(valDir, scene), df['source_image'][i])).convert('RGB') Is, Xs, Ys = ResizeMinResolution(minSize, Is, df['XA'][i], df['YA'][i], strideNet) Isw, Ish = Is.size IsTensor = transforms.ToTensor()(Is).unsqueeze(0).cuda() #### -- Target Image feature It = Image.open( os.path.join(os.path.join(valDir, scene), df['target_image'][i])).convert('RGB') It, Xt, Yt = ResizeMinResolution(minSize, It, df['XB'][i], df['YB'][i], strideNet) Itw, Ith = It.size ItTensor = transforms.ToTensor()(It).unsqueeze(0).cuda() #### -- grid gridY = torch.linspace(-1, 1, steps=ItTensor.size(2)).view( 1, -1, 1, 1).expand(1, ItTensor.size(2), ItTensor.size(3), 1) gridX = torch.linspace(-1, 1, steps=ItTensor.size(3)).view( 1, 1, -1, 1).expand(1, ItTensor.size(2), ItTensor.size(3), 1) grid = torch.cat((gridX, gridY), dim=3).cuda() bestParam = inPklCoarse[i] flowGlobalT = F.affine_grid( torch.from_numpy(bestParam).unsqueeze(0).cuda(), ItTensor.size()) # theta should be of size N×2×3 IsSample = F.grid_sample(IsTensor, flowGlobalT) featsSample = F.normalize(network['netFeatCoarse'](IsSample)) featt = F.normalize(network['netFeatCoarse'](ItTensor)) corr21 = network['netCorr'](featt, featsSample) _, flowCoarse = model.predFlowCoarse(corr21, network['netFlowCoarse'], grid) flowFinal = F.grid_sample(flowGlobalT.permute(0, 3, 1, 2), flowCoarse).permute(0, 2, 3, 1).contiguous() pixelDiffT, nbAlign = alignmentError(Itw, Ith, Isw, Ish, Xs, Ys, Xt, Yt, flowFinal, pixelGrid) precAllAlign += pixelDiffT totalAlign += nbAlign return precAllAlign / totalAlign