def forward(self, image, mask, vertex, vertex_weights):
     seg_pred, vertex_pred = self.net(image)
     loss_seg = self.criterion(seg_pred, mask)
     loss_seg = torch.mean(loss_seg.view(loss_seg.shape[0],-1),1)
     loss_vertex = smooth_l1_loss(vertex_pred, vertex, vertex_weights, reduce=False)
     precision, recall = compute_precision_recall(seg_pred, mask)
     return seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall
def validate(network, valLoader):
	lossSegTotal = 0
	lossVertexTotal = 0
	lossTotal = 0
	network.eval()
	for idx, data in enumerate(valLoader):

		with torch.no_grad():

			# Extract data and forward propagate
			image, maskGT, vertexGT, vertexWeightsGT = [d.cuda() for d in data]
			segPred, vertexPred = network(image)

			# Compute loss
			criterion = CrossEntropyLoss(reduce=False) # Imported from torch.nn
			lossSeg = criterion(segPred, maskGT)
			lossSeg = torch.mean(lossSeg.view(lossSeg.shape[0],-1),1)
			lossVertex = smooth_l1_loss(vertexPred, vertexGT, vertexWeightsGT, reduce=False)
			precision, recall = compute_precision_recall(segPred, maskGT)
			lossSeg = torch.mean(lossSeg) # Mean over batch
			lossVertex = torch.mean(lossVertex) # Mean over batch
			loss = (1-lossRatio)*lossSeg + lossRatio*lossVertex

			# Update moving average loss
			lossSegTotal = (lossSegTotal*idx + lossSeg.item())/(idx+1)
			lossVertexTotal = (lossVertexTotal*idx + lossVertex.item())/(idx+1)
			lossTotal = (lossTotal*idx + loss.item())/(idx+1)

	return lossTotal, lossVertexTotal, lossSegTotal
Exemple #3
0
    def forward(self, image, mask, vertex, vertex_weights, hcoords=None):
        seg_pred, vertex_pred = self.net(image)
        loss_seg = self.criterion(seg_pred, mask)
        loss_seg = torch.mean(loss_seg.view(loss_seg.shape[0],-1),1)

        mask_pred = torch.argmax(seg_pred, dim=1, keepdim=True).float().cuda().detach()
        loss_vertex = smooth_l1_loss(vertex_pred, vertex, vertex_weights, reduce=False)
        loss_p2l, loss_voting = center_voting_loss_v1(vertex_pred, vertex_weights, mask_pred, hcoords)

        precision, recall = compute_precision_recall(seg_pred, mask)
        return seg_pred, vertex_pred, loss_seg, loss_vertex, loss_p2l, loss_voting, precision, recall
Exemple #4
0
    def forward(self, image, mask, vertex, vertex_weights):
        #喂入RGB图片,输出 两张语义分割图,kp×2张
        seg_pred, vertex_pred = self.net(image)
        #语义分割,前景 各个label,以及背景图
        loss_seg = self.criterion(seg_pred, mask)

        loss_seg = torch.mean(loss_seg.view(loss_seg.shape[0], -1), 1)
        #输入都是n张图片组,
        loss_vertex = smooth_l1_loss(vertex_pred,
                                     vertex,
                                     vertex_weights,
                                     reduce=False)

        precision, recall = compute_precision_recall(seg_pred, mask)

        return seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall
Exemple #5
0
 def forward(self, image, mask, image_render, vertex, vertex_weights):
     ratio = train_cfg['vertex_loss_ratio']
     seg_pred, vertex_pred = self.net(image_render, 'direct')
     loss1, _, _ = self.compute_loss(seg_pred, vertex_pred, mask, vertex,
                                     vertex_weights, ratio)
     seg_pred_mapped, vertex_pred_mapped = self.net(image, 'mapped')
     loss2, loss_seg, loss_vertex = self.compute_loss(
         seg_pred_mapped, vertex_pred_mapped, mask, vertex, vertex_weights,
         ratio)
     no_mapping_render = self.net(image_render, 'before_mapping')
     mapping_result = self.net(image, 'mapping_result')
     square_loss = nn.MSELoss()
     loss3 = square_loss(no_mapping_render, mapping_result)
     precision, recall = compute_precision_recall(seg_pred_mapped, mask)
     loss1 = torch.mean(loss1)
     loss2 = torch.mean(loss2)
     return seg_pred_mapped, vertex_pred_mapped, loss_seg, loss_vertex, precision, recall, loss1, loss2, loss3
Exemple #6
0
    def forward(self, image, mask, vertex, vertex_weights, vertex_init_pert,
                vertex_init):

        vertex_pred, x2s, x4s, x8s, xfc = self.estNet(vertex_weights *
                                                      vertex_init_pert)
        seg_pred, q_pred = self.imNet(image, x2s, x4s, x8s, xfc)

        loss_q = smooth_l1_loss(q_pred, (vertex_init - vertex),
                                vertex_weights,
                                reduce=False)
        loss_vertex = smooth_l1_loss(vertex_pred,
                                     vertex_init,
                                     vertex_weights,
                                     reduce=False)
        loss = (10 * loss_vertex) + loss_q

        precision, recall = compute_precision_recall(seg_pred, mask)
        return seg_pred, vertex_pred, q_pred, loss, precision, recall
Exemple #7
0
class NetWrapper(nn.Module):
    def __init__(self, net):
        super(NetWrapper, self).__init__()
        self.net = net
        self.criterion = nn.CrossEntropyLoss(reduce=False)

    def forward(self, image, mask, vertex, vertex_weights):
<<<<<<< HEAD
        seg_pred, vertex_pred = self.net(image, mode="mapped")
=======
        seg_pred, vertex_pred = self.net(image, 'mapped')
>>>>>>> 2c722555563b8a77e36b246d82747754cf8dfae7
        loss_seg = self.criterion(seg_pred, mask)
        loss_seg = torch.mean(loss_seg.view(loss_seg.shape[0], -1), 1)
        loss_vertex = smooth_l1_loss(vertex_pred, vertex, vertex_weights, reduce=False)
        precision, recall = compute_precision_recall(seg_pred, mask)
        return seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall


class EvalWrapper(nn.Module):
    def forward(self, seg_pred, vertex_pred, use_argmax=True):
        vertex_pred = vertex_pred.permute(0, 2, 3, 1)
        b, h, w, vn_2 = vertex_pred.shape
        vertex_pred = vertex_pred.view(b, h, w, vn_2 // 2, 2)
        if use_argmax:
            mask = torch.argmax(seg_pred, 1)
        else:
            mask = seg_pred
        return ransac_voting_layer_v3(mask, vertex_pred, 512, inlier_thresh=0.99)