Exemplo n.º 1
0
    def __get_images_labels_bboxes(self, data_sources, num_samples,
                                   is_training_data):

        self.dataset = pascalvoc_datasets.get_dataset_info(
            data_sources, num_samples)
        self.is_training_data = is_training_data
        if self.is_training_data:

            shuffle = True
            #make sure most samples can be fetched in one epoch
            self.num_readers = 2
        else:
            #make sure data is fetchd in sequence
            shuffle = False
            self.num_readers = 1

        provider = slim.dataset_data_provider.DatasetDataProvider(
            self.dataset,
            shuffle=shuffle,
            num_readers=self.num_readers,
            common_queue_capacity=30 * self.batch_size,
            common_queue_min=10 * self.batch_size)

        # Get for SSD network: image, labels, bboxes.
        [image, shape, format, self.filename, glabels, gbboxes,
         gdifficults] = provider.get([
             'image', 'shape', 'format', 'filename', 'object/label',
             'object/bbox', 'object/difficult'
         ])

        # Pre-processing image, labels and bboxes.
        self.image, self.glabels, self.gbboxes = self.__preprocess_data(
            image, glabels, gbboxes)

        #         anchors_1 = g_ssd_model.get_allanchors(minmaxformat=False)
        anchors = g_ssd_model.get_allanchors(minmaxformat=True)
        print(anchors[-1][-4:])
        #flattent the anchors
        temp_anchors = []
        for i in range(len(anchors)):
            temp_anchors.append(tf.reshape(anchors[i], [-1, 4]))
        anchors = tf.concat(temp_anchors, axis=0)

        self.jaccard = g_ssd_model.compute_jaccard(self.gbboxes, anchors)

        # Assign groundtruth information for all default/anchor boxes
        #         gclasses, glocalisations, gscores = g_ssd_model.tf_ssd_bboxes_encode(glabels, gbboxes)

        return
Exemplo n.º 2
0
    def __get_images_labels_bboxes(self, data_sources, num_samples,
                                   is_training_data):
        '''

        :param data_sources: .tfrecord files
        :param num_samples:  number of samples in all the files
        :param is_training_data:
        :return:
        '''

        self.dataset = pascalvoc_datasets.get_dataset_info(
            data_sources, num_samples)
        self.is_training_data = is_training_data
        if self.is_training_data:

            shuffle = True
            #make sure most samples can be fetched in one epoch
            self.num_readers = 2
        else:
            #make sure data is fetchd in sequence
            shuffle = False
            self.num_readers = 1

        provider = slim.dataset_data_provider.DatasetDataProvider(
            self.dataset,
            shuffle=shuffle,
            num_readers=self.num_readers,
            common_queue_capacity=20 * self.batch_size,
            common_queue_min=10 * self.batch_size)

        # Get for SSD network: image, labels, bboxes.
        [image, shape, format, filename, glabels, gbboxes,
         gdifficults] = provider.get([
             'image', 'shape', 'format', 'filename', 'object/label',
             'object/bbox', 'object/difficult'
         ])
        glabels -= self.labels_offset

        # Pre-processing image, labels and bboxes.
        image, glabels, gbboxes = self.__preprocess_data(
            image, glabels, gbboxes, shape)

        gclasses_list, glocalisations_list, gscores_list = g_ssd_model.match_achors(
            glabels, gbboxes, matching_threshold=self.matched_thresholds)

        return self.__batching_data(image, glabels, format, filename, gbboxes,
                                    gdifficults, gclasses_list,
                                    glocalisations_list, gscores_list)