Example #1
0
    def Indexes_of_BackgroundPoints(self, Keypoints, Descriptors,
                                    keypoint_indexes):
        backgroundpoitnsIndex = Keypoints[:, 2] == -1
        insideboxPoitnsIndex = Keypoints[:, 2] == 1

        backgroundDescriptors = clustering.preprocess_features(
            Descriptors[:500000][[backgroundpoitnsIndex[:500000]]])

        insideboxDescriptors = clustering.preprocess_features(
            Descriptors[:500000][[insideboxPoitnsIndex[:500000]]])

        number_of_insideClusters = 100
        number_of_outsideClusters = 250
        backgroundclustering = clustering.Kmeans(number_of_outsideClusters,
                                                 centroids=None)
        insideboxclustering = clustering.Kmeans(number_of_insideClusters,
                                                centroids=None)

        backgroundclustering.cluster(backgroundDescriptors, verbose=False)
        insideboxclustering.cluster(insideboxDescriptors, verbose=False)

        foregroundpointindex = np.zeros(len(Keypoints)) == -1
        for imagename in keypoint_indexes:
            start, end = keypoint_indexes[imagename]
            keypoints = Keypoints[start:end, :]
            descriptors = Descriptors[start:end, :]

            distanceinside, Iinside = insideboxclustering.index.search(
                clustering.preprocess_features(descriptors), 1)
            distanceoutside, Ioutside = backgroundclustering.index.search(
                clustering.preprocess_features(descriptors), 1)

            points_to_keep = (distanceinside < distanceoutside).reshape(-1)
            points_to_keep = np.logical_and(points_to_keep, keypoints[:,
                                                                      2] == 1)
            foregroundpointindex[start:end] = points_to_keep

        return foregroundpointindex
    def load_trained_fiststep_model(self, checkpoint_filename):
        LogText(
            f"Pretrained First Step model loaded from  : {checkpoint_filename}",
            self.experiment_name, self.log_path)
        checkpoint = torch.load(checkpoint_filename, map_location='cpu')
        self.iterations = checkpoint['iteration']
        self.centroid = checkpoint['centroid']

        if (self.centroid is not None):
            self.KmeansClustering = clustering.Kmeans(self.number_of_clusters,
                                                      self.centroid)

        self.model.load_state_dict(checkpoint['state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        self.schedualer = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            self.optimizer, T_0=5, T_mult=1, eta_min=5e-6)
    def init_firststep(self, lr, weight_decay, number_of_clusters,
                       training_iterations_before_first_clustering):

        LogText(f"Training model initiated", self.experiment_name,
                self.log_path)
        self.weight_decay = weight_decay
        self.lr = lr
        self.training_iterations_before_first_clustering = training_iterations_before_first_clustering
        self.number_of_clusters = number_of_clusters

        self.optimizer = torch.optim.RMSprop(self.model.parameters(),
                                             lr=self.lr,
                                             weight_decay=self.weight_decay)
        self.schedualer = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            self.optimizer, T_0=5, T_mult=1, eta_min=5e-6)

        self.centroid = None
        self.margin = 0.8
        self.eps = 1e-9
        self.KmeansClustering = clustering.Kmeans(self.number_of_clusters)
        self.iterations = 0
Example #4
0
    def __init__(self, namesurf, surf):
        """Classe per il campo"""
        self.surf = spritesheet.SpriteSheet(surf, 'file')

        # Prendo le misure dell'immagine
        size = surf.get_size()

        self.square_size = 30

        # Calcolo il numero di righe e di colonne
        self.raws = range(int(size[1] / self.square_size))
        self.columns = range(int(size[0] / self.square_size))

        # Calcolo i colori dominanti attravero un algoritmo
        # chiamato "k-means clustering"
        self.color = clustering.Kmeans(5).run(namesurf)

        self.color.sort(key=lambda x: sum(x))
        self.color = self.color[::-1]

        self.back, self.cflags, self.lines, self.cmine, self.numbers = self.color
    def Update_pseudoLabels(self, dataloader, oldkeypoints=None):

        LogText(f"Clustering stage for iteration {self.iterations}",
                self.experiment_name, self.log_path)
        self.model.eval()

        imagesize = 256
        heatmapsize = 64
        numberoffeatures = 256
        buffersize = 500000
        # allocation of 2 buffers for temporal storing of keypoints and descriptors.
        Keypoint_buffer = torch.zeros(buffersize, 3)
        Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)

        # arrays on which we save buffer content periodically. Corresponding files are temporal and
        # will be deleted after the completion of the process
        CreateFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'), 3)
        CreateFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'), numberoffeatures)

        # intermediate variables
        first_index = 0
        last_index = 0
        buffer_first_index = 0
        buffer_last_index = 0
        keypoint_indexes = {}

        pointsperimage = 0
        LogText(f"Inference of keypoints and descriptors begins",
                self.experiment_name, self.log_path)
        for i_batch, sample in enumerate(dataloader):
            input = Cuda(sample['image'])
            names = sample['filename']

            with torch.no_grad():
                output = self.model.forward(input)
            outputHeatmap = output[0]
            descriptors_volume = output[1]

            batch_keypoints = GetBatchMultipleHeatmap(
                outputHeatmap, self.confidence_thres_FAN)

            for i in range(input.size(0)):

                indexes = batch_keypoints[:, 0] == i
                sample_keypoints = batch_keypoints[indexes, 1:][:, :3]

                pointsperimage += len(sample_keypoints)
                if (oldkeypoints is not None):
                    if (names[i] in oldkeypoints):
                        keypoints_previous_round = Cuda(
                            torch.from_numpy(
                                oldkeypoints[names[i]].copy())).float()
                        sample_keypoints = MergePoints(
                            sample_keypoints, keypoints_previous_round)

                descriptors = GetDescriptors(descriptors_volume[i],
                                             sample_keypoints[:, :2],
                                             heatmapsize, heatmapsize)

                numofpoints = sample_keypoints.shape[0]
                last_index += numofpoints
                buffer_last_index += numofpoints

                Keypoint_buffer[buffer_first_index:buffer_last_index, :
                                2] = sample_keypoints.cpu()[:, :2]
                Descriptor__buffer[
                    buffer_first_index:buffer_last_index, :] = descriptors

                keypoint_indexes[names[i]] = [first_index, last_index]
                first_index += numofpoints
                buffer_first_index += numofpoints

            # periodically we store the buffer in file
            if buffer_last_index > int(buffersize * 0.8):
                AppendFileArray(
                    np.array(Keypoint_buffer[:buffer_last_index]),
                    str(
                        GetCheckPointsPath(self.experiment_name, self.log_path)
                        / 'keypoints'))
                AppendFileArray(
                    np.array(Descriptor__buffer[:buffer_last_index]),
                    str(
                        GetCheckPointsPath(self.experiment_name, self.log_path)
                        / 'descriptors'))

                Keypoint_buffer = torch.zeros(buffersize, 3)
                Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
                buffer_first_index = 0
                buffer_last_index = 0

        # store any keypoints left on the buffers
        AppendFileArray(
            np.array(Keypoint_buffer[:buffer_last_index]),
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        AppendFileArray(
            np.array(Descriptor__buffer[:buffer_last_index]),
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))

        # load handlers to the Keypoints and Descriptor files
        Descriptors, fileHandler1 = OpenreadFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))
        Keypoints, fileHandler2 = OpenreadFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        Keypoints = Keypoints[:, :]
        LogText(
            f"Keypoints Detected per image Only detector {pointsperimage / len(keypoint_indexes)}",
            self.experiment_name, self.log_path)
        LogText(f"Inference of keypoints and descriptors completed",
                self.experiment_name, self.log_path)
        LogText(
            f"Keypoints Detected per image {len(Keypoints)/len(keypoint_indexes)}",
            self.experiment_name, self.log_path)

        # we use a subset of all the descriptors for clustering based on the recomendation of the Faiss repository
        numberOfPointsForClustering = 500000

        descriptors = clustering.preprocess_features(
            Descriptors[:numberOfPointsForClustering])
        _, self.centroid = self.KmeansClustering.cluster(descriptors,
                                                         verbose=False)

        self.KmeansClustering.clus.nredo = 1

        thresholds = self.GetThresholdsPerCluster(Descriptors)

        Image_Keypoints = {}

        averagepointsperimage = 0

        for image in keypoint_indexes:
            start, end = keypoint_indexes[image]
            keypoints = Keypoints[start:end, :]

            image_descriptors = clustering.preprocess_features(
                Descriptors[start:end])

            # calculate distance of each keypoints to each centroid
            distanceMatrix, clustering_assignments = self.KmeansClustering.index.search(
                image_descriptors, self.number_of_clusters)

            distanceMatrix = np.take_along_axis(
                distanceMatrix, np.argsort(clustering_assignments), axis=-1)

            # assign keypoints to centroids using the Hungarian algorithm. This ensures that each
            # image has at most one instance of each cluster
            keypointIndex, clusterAssignment = linear_sum_assignment(
                distanceMatrix)

            tempKeypoints = np.zeros((len(keypointIndex), 3))
            tempKeypoints = keypoints[keypointIndex]

            clusterAssignmentDistance = distanceMatrix[keypointIndex,
                                                       clusterAssignment]

            clusterstokeep = np.zeros(len(clusterAssignmentDistance))
            clusterstokeep = clusterstokeep == 1

            # keep only points that lie in their below a cluster specific theshold
            clusterstokeep[clusterAssignmentDistance <
                           thresholds[clusterAssignment]] = True

            tempKeypoints[:, 2] = clusterAssignment

            Image_Keypoints[image] = tempKeypoints[clusterstokeep]

            averagepointsperimage += sum(clusterstokeep)

        #initialise centroids for next clustering round
        self.KmeansClustering = clustering.Kmeans(self.number_of_clusters,
                                                  self.centroid)
        LogText(
            f"Keypoints Detected per image {averagepointsperimage/len(Image_Keypoints)}",
            self.experiment_name, self.log_path)

        self.save_keypoints(Image_Keypoints,
                            f'UpdatedKeypoints{self.iterations}.pickle')
        ClosereadFileArray(
            fileHandler1,
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        ClosereadFileArray(
            fileHandler2,
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))
        LogText(f"Clustering stage completed", self.experiment_name,
                self.log_path)
        return Image_Keypoints
Example #6
0
    def CreateInitialPseudoGroundtruth(self, dataloader):

        LogText(f"Extraction of initial Superpoint pseudo groundtruth",
                self.experiment_name, self.log_path)

        imagesize = 256
        heatmapsize = 64
        numberoffeatures = 256
        buffersize = 500000

        #allocation of 2 buffers for temporal storing of keypoints and descriptors.
        Keypoint_buffer = torch.zeros(buffersize, 3)
        Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)

        #arrays on which we save buffer content periodically. Corresponding files are temporal and
        #will be deleted after the completion of the process
        CreateFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'), 3)
        CreateFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'), numberoffeatures)

        #intermediate variables
        first_index = 0
        last_index = 0
        buffer_first_index = 0
        buffer_last_index = 0
        keypoint_indexes = {}

        LogText(f"Inference of Keypoints begins", self.experiment_name,
                self.log_path)
        for i_batch, sample in enumerate(dataloader):
            input = Cuda(sample['image_gray'])
            names = sample['filename']
            bsize = input.size(0)

            if (self.UseScales):
                input = input.view(-1, 1, input.shape[2], input.shape[3])

            with torch.no_grad():
                detectorOutput, descriptorOutput = self.GetSuperpointOutput(
                    input)

            if (self.UseScales):
                detectorOutput = detectorOutput.view(bsize, -1,
                                                     detectorOutput.shape[2],
                                                     detectorOutput.shape[3])
                input = input.view(bsize, -1, input.shape[2], input.shape[3])
                descriptorOutput = descriptorOutput.view(
                    bsize, -1, descriptorOutput.size(1),
                    descriptorOutput.size(2), descriptorOutput.size(3))[:, 0]
            for i in range(0, bsize):

                keypoints = self.GetPoints(detectorOutput[i].unsqueeze(0),
                                           self.confidence_thres_superpoint,
                                           self.nms_thres_superpoint)

                if (self.RemoveBackgroundClusters):
                    bounding_box = sample['bounding_box'][i]
                    pointsinbox = torch.ones(len(keypoints))
                    pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
                    pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
                    pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
                    pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1

                elif (self.use_box):
                    bounding_box = sample['bounding_box'][i]
                    pointsinbox = torch.ones(len(keypoints))
                    pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
                    pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
                    pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
                    pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1
                    keypoints = keypoints[pointsinbox == 1]

                descriptors = GetDescriptors(descriptorOutput[i], keypoints,
                                             input.shape[3], input.shape[2])

                #scale image keypoints to FAN resolution
                keypoints = dataloader.dataset.keypointsToFANResolution(
                    dataloader.dataset, names[i], keypoints)

                keypoints = ((heatmapsize / imagesize) * keypoints).round()

                last_index += len(keypoints)
                buffer_last_index += len(keypoints)

                Keypoint_buffer[
                    buffer_first_index:buffer_last_index, :2] = keypoints
                Descriptor__buffer[
                    buffer_first_index:buffer_last_index] = descriptors

                if (self.RemoveBackgroundClusters):
                    Keypoint_buffer[buffer_first_index:buffer_last_index,
                                    2] = pointsinbox

                keypoint_indexes[names[i]] = [first_index, last_index]
                first_index += len(keypoints)
                buffer_first_index += len(keypoints)

            #periodically we store the buffer in file
            if buffer_last_index > int(buffersize * 0.8):
                AppendFileArray(
                    np.array(Keypoint_buffer[:buffer_last_index]),
                    str(
                        GetCheckPointsPath(self.experiment_name, self.log_path)
                        / 'keypoints'))
                AppendFileArray(
                    np.array(Descriptor__buffer[:buffer_last_index]),
                    str(
                        GetCheckPointsPath(self.experiment_name, self.log_path)
                        / 'descriptors'))

                Keypoint_buffer = torch.zeros(buffersize, 3)
                Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
                buffer_first_index = 0
                buffer_last_index = 0

        LogText(f"Inference of Keypoints completed", self.experiment_name,
                self.log_path)
        #store any keypoints left on the buffers
        AppendFileArray(
            np.array(Keypoint_buffer[:buffer_last_index]),
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        AppendFileArray(
            np.array(Descriptor__buffer[:buffer_last_index]),
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))

        #load handlers to the Keypoints and Descriptor files
        Descriptors, fileHandler1 = OpenreadFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))
        Keypoints, fileHandler2 = OpenreadFileArray(
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        Keypoints = Keypoints[:, :]
        LogText(
            f"Keypoints Detected per image {len(Keypoints)/len(keypoint_indexes)}",
            self.experiment_name, self.log_path)

        #perform outlier detection
        inliersindexes = np.ones(len(Keypoints)) == 1
        if (self.remove_superpoint_outliers_percentage > 0):
            inliersindexes = self.Indexes_of_inliers(Keypoints, Descriptors,
                                                     buffersize)

        #extend outliers with background points for constant background datasets
        if (self.RemoveBackgroundClusters):
            foregroundpointindex = self.Indexes_of_BackgroundPoints(
                Keypoints, Descriptors, keypoint_indexes)
            inliersindexes = np.logical_and(inliersindexes,
                                            foregroundpointindex)

        LogText(
            f"Keypoints Detected per image(filtering) {sum(inliersindexes) / len(keypoint_indexes)}",
            self.experiment_name, self.log_path)
        #we use a subset of all the descriptors for clustering based on the recomendation of the Faiss repository
        numberOfPointsForClustering = 500000

        LogText(f"Clustering of keypoints", self.experiment_name,
                self.log_path)
        #clustering of superpoint features
        KmeansClustering = clustering.Kmeans(self.number_of_clusters,
                                             centroids=None)
        descriptors = clustering.preprocess_features(
            Descriptors[:numberOfPointsForClustering][
                inliersindexes[:numberOfPointsForClustering]])
        KmeansClustering.cluster(descriptors, verbose=False)

        thresholds = self.GetThresholdsPerCluster(inliersindexes, Descriptors,
                                                  KmeansClustering)

        Image_Keypoints = {}
        averagepointsperimage = 0
        for image in keypoint_indexes:
            start, end = keypoint_indexes[image]
            inliersinimage = inliersindexes[start:end]
            keypoints = Keypoints[start:end, :]

            inliersinimage[np.sum(keypoints[:, :2] < 0, 1) > 0] = False
            inliersinimage[np.sum(keypoints[:, :2] > 64, 1) > 0] = False

            keypoints = keypoints[inliersinimage]

            image_descriptors = clustering.preprocess_features(
                Descriptors[start:end])
            image_descriptors = image_descriptors[inliersinimage]

            #calculate distance of each keypoints to each centroid
            distanceMatrix, clustering_assignments = KmeansClustering.index.search(
                image_descriptors, self.number_of_clusters)

            distanceMatrix = np.take_along_axis(
                distanceMatrix, np.argsort(clustering_assignments), axis=-1)

            #assign keypoints to centroids using the Hungarian algorithm. This ensures that each
            #image has at most one instance of each cluster
            keypointIndex, clusterAssignment = linear_sum_assignment(
                distanceMatrix)

            tempKeypoints = keypoints[keypointIndex]

            clusterAssignmentDistance = distanceMatrix[keypointIndex,
                                                       clusterAssignment]

            clusterstokeep = np.zeros(len(clusterAssignmentDistance))
            clusterstokeep = clusterstokeep == 1

            # keep only points that lie in their below a cluster specific theshold
            clusterstokeep[clusterAssignmentDistance <
                           thresholds[clusterAssignment]] = True

            tempKeypoints[:, 2] = clusterAssignment

            Image_Keypoints[image] = tempKeypoints[clusterstokeep]
            averagepointsperimage += sum(clusterstokeep)

        LogText(
            f"Keypoints Detected per image(clusteringAssignment) {averagepointsperimage / len(Image_Keypoints)}",
            self.experiment_name, self.log_path)
        ClosereadFileArray(
            fileHandler1,
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'keypoints'))
        ClosereadFileArray(
            fileHandler2,
            str(
                GetCheckPointsPath(self.experiment_name, self.log_path) /
                'descriptors'))
        self.save_keypoints(Image_Keypoints, "SuperPointKeypoints.pickle")
        LogText(f"Extraction of Initial pseudoGroundtruth completed",
                self.experiment_name, self.log_path)
        return Image_Keypoints