예제 #1
0
    def run(self):

        with tf.Graph().as_default():
            self.get_voc_2007_test_data()
            with tf.Session('') as sess:
                init = tf.global_variables_initializer()
                sess.run(init)
                with slim.queues.QueueRunners(sess):

                    image, glabels, gbboxes, filename, jaccard = sess.run([
                        self.image, self.glabels, self.gbboxes, self.filename,
                        self.jaccard
                    ])

                    print(filename)
                    print(glabels)
                    print(gbboxes)
                    print(jaccard)

                    #selet the first image in the batch

                    image_data = np_image_unwhitened(image)
                    self.__disp_image(image_data, glabels, gbboxes)
                    #                         found_matched = self.__disp_matched_anchors(image_data,target_labels_data, target_localizations_data, target_scores_data)
                    plt.show()

        return
예제 #2
0
                                                          num_classes=9,
                                                          decode=True)
rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses,
                                                    rscores,
                                                    rbboxes,
                                                    top_k=400)
rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses,
                                                   rscores,
                                                   rbboxes,
                                                   nms_threshold=0.3)

# In[31]:

# Draw bboxes
img_bboxes = np.copy(ssd_vgg_preprocessing.np_image_unwhitened(rimg))
bboxes_draw_on_img(img_bboxes,
                   rclasses,
                   rscores,
                   rbboxes,
                   colors_tableau,
                   thickness=1)
# bboxes_draw_on_img(img_bboxes, test_labels, test_scores, test_bboxes, colors_tableau, thickness=1)

print('Labels / scores:', list(zip(rclasses, rscores)))
print('Grountruth labels:', list(glabels))
print(gbboxes)

fig = plt.figure(figsize=(20, 20))
plt.imshow(img_bboxes)
예제 #3
0
    def run(self):

        with tf.Graph().as_default():
            #             batch_data= self.get_voc_2007_train_data(is_training_data=True)
            #             batch_data = self.get_voc_2007_test_data()
            #             batch_data = self.get_voc_2012_train_data()
            #            batch_data = self.get_voc_2007_2012_train_data(is_training_data = True)
            batch_data = self.get_gtsdb_train_data(is_training_data=True)

            return self.iterate_file_name(batch_data)

            with tf.Session('') as sess:
                init = tf.global_variables_initializer()
                sess.run(init)
                with slim.queues.QueueRunners(sess):
                    while True:

                        image, filename, glabels, gbboxes, gdifficults, gclasses, glocalisations, gscores = sess.run(
                            list(batch_data))

                        #                         print("min: {}, max: {}".format(gbboxes.min(), gbboxes.max()))
                        #                         return

                        #                         print(glabels)
                        #                         print("number of zero label patch {}".format((glabels.sum(axis=1)  == 0).sum()))
                        #                         return

                        #

                        print(filename)
                        selected_file = b'000050'
                        picked_inds = None
                        #selet the first image in the batch
                        if selected_file is None:
                            picked_inds = 0
                        else:
                            picked_inds = (selected_file == filename).nonzero()
                            if len(picked_inds[0]) == 0:
                                picked_inds = None
                            else:
                                picked_inds = picked_inds[0][0]

                        if picked_inds is None:
                            continue

                        self.check_match_statistics(filename, gclasses,
                                                    gscores)
                        target_labels_data = [
                            item[picked_inds] for item in gclasses
                        ]
                        target_localizations_data = [
                            item[picked_inds] for item in glocalisations
                        ]
                        target_scores_data = [
                            item[picked_inds] for item in gscores
                        ]
                        image_data = image[picked_inds]
                        print("picked file {}".format(filename[picked_inds]))

                        image_data = np_image_unwhitened(image_data)
                        self.__disp_image(image_data, glabels[picked_inds],
                                          gbboxes[picked_inds])
                        found_matched = self.__disp_matched_anchors(
                            image_data, target_labels_data,
                            target_localizations_data, target_scores_data)
                        plt.show()
                        break
                        #exit the batch data testing right after a successful match have been found


#                         if found_matched:
#this could be a potential issue to be solved since sometime not all grouth truth bboxes are encoded.

        return