Beispiel #1
0
    def get_points(self, point_coords_list, idx, flip, im_size, warped_im_size):
        X = np.fromstring(point_coords_list.iloc[idx,0], sep=';')
        Y = np.fromstring(point_coords_list.iloc[idx,1], sep=';')
        if flip:
            X = im_size[1] - X
        Xpad = -np.ones(20); Xpad[:len(X)] = X
        Ypad = -np.ones(20); Ypad[:len(X)] = Y
        point_coords = np.concatenate((Xpad.reshape(1, 20), Ypad.reshape(1, 20)), axis=0)

        h,w,c = im_size
        im_size = torch.FloatTensor([[h,w,c]])
        
        coordinate = torch.FloatTensor(point_coords).view(1, 2, 20)
        #target_points_norm = PointsToUnitCoords(point_coords, im_size)
        target_points_norm = PointsToUnitCoords(coordinate, im_size)

        h,w,c = warped_im_size
        warped_im_size = torch.FloatTensor([[h,w,c]])
        
        warped_points_aff_norm = self.pointTnf.affPointTnf(self.theta_identity, target_points_norm)
        warped_points_aff = PointsToPixelCoords(warped_points_aff_norm, warped_im_size)
        
        # make arrays float tensor for subsequent processing
        point_coords = torch.Tensor(point_coords.astype(np.float32))
        return point_coords, warped_points_aff
Beispiel #2
0
def pck_metric(batch,batch_start_idx,theta_aff,theta_tps,theta_aff_tps,model_tps,stats,args,use_cuda=True):
    alpha = args.pck_alpha
    do_aff = theta_aff is not None
    do_tps = theta_tps is not None
    do_aff_tps = theta_aff_tps is not None
    
    source_im_size = batch['source_im_size']
    target_im_size = batch['target_im_size']

    source_points = batch['source_points']
    target_points = batch['target_points']
    
    # Instantiate point transformer
    pt = PointTnf(use_cuda=use_cuda,
                  tps_reg_factor=args.tps_reg_factor)

    # warp points with estimated transformations
    target_points_norm = PointsToUnitCoords(target_points,target_im_size)

    if do_aff:
        # do affine only
        warped_points_aff_norm = pt.affPointTnf(theta_aff,target_points_norm)
        warped_points_aff = PointsToPixelCoords(warped_points_aff_norm,source_im_size)

    if do_tps:
        # do tps only
        warped_points_tps_norm = pt.defPointTnf(theta_tps,target_points_norm,model_tps)
        warped_points_tps = PointsToPixelCoords(warped_points_tps_norm,source_im_size)
        
    if do_aff_tps:
        # do tps+affine
        warped_points_aff_tps_norm = pt.defPointTnf(theta_aff_tps,target_points_norm,model_tps)
        warped_points_aff_tps_norm = pt.affPointTnf(theta_aff,warped_points_aff_tps_norm)
        warped_points_aff_tps = PointsToPixelCoords(warped_points_aff_tps_norm,source_im_size)
    
    L_pck = batch['L_pck'].data
    
    current_batch_size=batch['source_im_size'].size(0)
    indices = range(batch_start_idx,batch_start_idx+current_batch_size)

    # import pdb; pdb.set_trace()

    if do_aff:
        pck_aff = pck(source_points.data, warped_points_aff.data, L_pck, alpha)
        
    if do_tps:
        pck_tps = pck(source_points.data, warped_points_tps.data, L_pck, alpha)
        
    if do_aff_tps:
        pck_aff_tps = pck(source_points.data, warped_points_aff_tps.data, L_pck, alpha)
        
    if do_aff:
        stats['aff']['pck'][indices] = pck_aff.unsqueeze(1).cpu().numpy()
    if do_tps:
        stats['tps']['pck'][indices] = pck_tps.unsqueeze(1).cpu().numpy()
    if do_aff_tps:
        stats['aff_tps']['pck'][indices] = pck_aff_tps.unsqueeze(1).cpu().numpy() 
        
    return stats
Beispiel #3
0
def pck_metric(batch,batch_start_idx,theta_1,theta_2,geometric_model_1,geometric_model_2,stats,args,use_cuda=True):

    two_stage=(geometric_model_2 is not None)

    alpha = args.pck_alpha
       
    source_im_size = batch['source_im_size']
    target_im_size = batch['target_im_size']

    source_points = batch['source_points']
    target_points = batch['target_points']
    
    # Instantiate point transformer
    pt = PointTnf(use_cuda=use_cuda,
                  tps_reg_factor=args.tps_reg_factor)

    if geometric_model_1=='affine':
        tnf_1 = pt.affPointTnf
    elif geometric_model_1=='hom':
        tnf_1 = pt.homPointTnf
    elif geometric_model_1=='tps':
        tnf_1 = pt.tpsPointTnf

    if two_stage:
        if geometric_model_2=='affine':
            tnf_2 = pt.affPointTnf
        elif geometric_model_2=='hom':
            tnf_2 = pt.homPointTnf
        elif geometric_model_2=='tps':
            tnf_2 = pt.tpsPointTnf        

    # warp points with estimated transformations
    target_points_norm = PointsToUnitCoords(target_points,target_im_size)

    # compute points stage 1 only
    warped_points_1_norm = tnf_1(theta_1,target_points_norm)
    warped_points_1 = PointsToPixelCoords(warped_points_1_norm,source_im_size)

    if two_stage:        
        # do tps+affine
        warped_points_1_2_norm = tnf_2(theta_2,target_points_norm)
        warped_points_1_2_norm = tnf_1(theta_1,warped_points_1_2_norm)
        warped_points_1_2 = PointsToPixelCoords(warped_points_1_2_norm,source_im_size)
    

    L_pck = batch['L_pck'].data
    
    current_batch_size=batch['source_im_size'].size(0)
    indices = range(batch_start_idx,batch_start_idx+current_batch_size)

    # compute PCK
    pck_1 = pck(source_points.data, warped_points_1.data, L_pck, alpha)
    stats[geometric_model_1]['pck'][indices] = pck_1.unsqueeze(1).cpu().numpy()
    if two_stage:
        pck_1_2 = pck(source_points.data, warped_points_1_2.data, L_pck, alpha)
        stats[geometric_model_1+'_'+geometric_model_2]['pck'][indices] = pck_1_2.unsqueeze(1).cpu().numpy() 
        
    return stats
    def forward(self, theta_forward, theta_backward):
        batch = theta_forward.size()[0]
        b,h,w = self.coord.size()
        coord = Variable(self.coord.expand(batch, h, w))

        img_size = Variable(torch.FloatTensor([[240, 240, 1]])).cuda()

        forward_norm = PointsToUnitCoords(coord, img_size)
        forward_norm = self.pointTnf.affPointTnf(theta_forward, forward_norm)
        forward_coord = PointsToPixelCoords(forward_norm, img_size)
        
        backward_norm = PointsToUnitCoords(forward_coord, img_size)
        backward_norm = self.pointTnf.affPointTnf(theta_backward, backward_norm)
        backward_coord = PointsToPixelCoords(backward_norm, img_size)

        loss = (torch.dist(coord, backward_coord, p=2) ** 2) / (config.NUM_OF_COORD * config.NUM_OF_COORD) / batch

        return loss
    def forward(self, theta_A, theta_B, theta_C):
        batch = theta_A.size()[0]
        b,h,w = self.coord.size()
        self.coord = Variable(self.coord.expand(batch, h, w))

        img_size = Variable(torch.FloatTensor([[240, 240, 1]])).cuda()

        A_norm = PointsToUnitCoords(self.coord, img_size)
        A_norm = self.pointTnf.affPointTnf(theta_A, A_norm)
        A_coord = PointsToPixelCoords(A_norm, img_size)
        
        B_norm = PointsToUnitCoords(A_coord, img_size)
        B_norm = self.pointTnf.affPointTnf(theta_B, B_norm)
        B_coord = PointsToPixelCoords(B_norm, img_size)

        C_norm = PointsToUnitCoords(B_coord, img_size)
        C_norm = self.pointTnf.affPointTnf(theta_C, C_norm)
        C_coord = PointsToPixelCoords(C_norm, img_size)

        loss = (torch.dist(self.coord, C_coord, p=2) ** 2) / (config.NUM_OF_COORD * config.NUM_OF_COORD) / batch

        return loss
    def get_points(self, point_coords_list, idx, flip, im_size, warped_im_size,
                   boundary):
        X = np.fromstring(point_coords_list.iloc[idx, 0], sep=';')
        Y = np.fromstring(point_coords_list.iloc[idx, 1], sep=';')

        top, bottom, left, right = boundary
        if self.random_crop:
            X = X - left
            Y = Y - top

        ind = []
        for i in range(len(X)):
            if X[i] < 0 or X[i] >= (right - left) or Y[i] < 0 or Y[i] >= (
                    bottom - top):
                ind.append(i)

        if flip:
            X = im_size[1] - X
        Xpad = -np.ones(20)
        Xpad[:len(X)] = X
        Ypad = -np.ones(20)
        Ypad[:len(X)] = Y

        if len(ind) != 0:
            for i in ind:
                Xpad[i] = -1
                Ypad[i] = -1
        point_coords = np.concatenate(
            (Xpad.reshape(1, 20), Ypad.reshape(1, 20)), axis=0)

        h, w, c = im_size
        im_size = torch.FloatTensor([[h, w, c]])

        coordinate = torch.FloatTensor(point_coords).view(1, 2, 20)
        target_points_norm = PointsToUnitCoords(coordinate, im_size)

        h, w, c = warped_im_size
        warped_im_size = torch.FloatTensor([[h, w, c]])

        warped_points_aff_norm = self.pointTnf.affPointTnf(
            self.theta_identity, target_points_norm)
        warped_points_aff = PointsToPixelCoords(warped_points_aff_norm,
                                                warped_im_size)

        # make arrays float tensor for subsequent processing
        point_coords = torch.Tensor(point_coords.astype(np.float32))
        return point_coords, warped_points_aff
Beispiel #7
0
    def forward(self, coord, theta_forward, theta_backward):
        batch = theta_forward.size()[0]
        b,h,w = coord.size()
        coord = Variable(coord.expand(batch, h, w))

        im_size = Variable(torch.FloatTensor([[15, 15, 1]]))
        target_norm = PointsToUnitCoords(coord, im_size).cuda()

        if self.transform == 'affine':
            forward_norm = self.pointTnf.affPointTnf(theta_forward, target_norm)
            forward_coord = PointsToPixelCoords(forward_norm, im_size.cuda())

            backward_norm = self.pointTnf.affPointTnf(theta_backward, forward_norm)
            backward_coord = PointsToPixelCoords(backward_norm, im_size.cuda())

        if self.dist_metric == 'L2':
            loss = torch.dist(coord.cuda(), backward_coord, p=2)
        elif self.dist_metric == 'L1':
            loss = torch.dist(coord.cuda(), backward_coord, p=1)

        return loss
Beispiel #8
0
def pck_metric(batch,
               batch_start_idx,
               theta_aff,
               theta_aff_tps,
               stats,
               args,
               use_cuda=True):
    alpha = args.pck_alpha

    source_im_size = batch['source_im_size']
    target_im_size = batch['target_im_size']

    source_points = batch['source_points']
    target_points = batch['target_points']

    # Instantiate point transformer
    pt = PointTnf(use_cuda=use_cuda, tps_reg_factor=args.tps_reg_factor)

    # warp points with estimated transformations
    target_points_norm = PointsToUnitCoords(target_points, target_im_size)

    warped_points_aff_tps_norm = pt.tpsPointTnf(theta_aff_tps,
                                                target_points_norm)
    warped_points_aff_tps_norm = pt.affPointTnf(theta_aff,
                                                warped_points_aff_tps_norm)
    warped_points_aff_tps = PointsToPixelCoords(warped_points_aff_tps_norm,
                                                source_im_size)

    L_pck = batch['L_pck'].data

    current_batch_size = batch['source_im_size'].size(0)
    indices = range(batch_start_idx, batch_start_idx + current_batch_size)

    pck_aff_tps = pck(source_points.data, warped_points_aff_tps.data, L_pck,
                      alpha)

    stats['aff_tps']['pck'][indices] = pck_aff_tps.unsqueeze(1).cpu().numpy()

    return stats