コード例 #1
0
    def evaluate_generator(self,
                           annotations,
                           comet_experiment=None,
                           iou_threshold=0.5,
                           max_detections=200):
        """ Evaluate prediction model using a csv fit_generator

        Args:
            annotations (str): Path to csv label file, labels are in the format -> path/to/image.png,x1,y1,x2,y2,class_name
            iou_threshold(float): IoU Threshold to count for a positive detection (defaults to 0.5)
            max_detections (int): Maximum number of bounding box predictions
            comet_experiment(object): A comet experiment class objects to track

        Return:
            mAP: Mean average precision of the evaluated data
        """
        #Format args for CSV generator
        classes_file = utilities.create_classes(annotations)
        arg_list = utilities.format_args(annotations, classes_file, self.config)
        args = parse_args(arg_list)

        #create generator
        validation_generator = CSVGenerator(
            args.annotations,
            args.classes,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
        )

        average_precisions = evaluate(validation_generator,
                                      self.prediction_model,
                                      iou_threshold=iou_threshold,
                                      score_threshold=args.score_threshold,
                                      max_detections=max_detections,
                                      save_path=args.save_path,
                                      comet_experiment=comet_experiment)

        # print evaluation
        total_instances = []
        precisions = []
        for label, (average_precision, num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  validation_generator.label_to_name(label),
                  'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        print('mAP using the weighted average of precisions among classes: {:.4f}'.format(
            sum([a * b for a, b in zip(total_instances, precisions)]) /
            sum(total_instances)))

        mAP = sum(precisions) / sum(x > 0 for x in total_instances)
        print('mAP: {:.4f}'.format(mAP))
        return mAP
コード例 #2
0
    def on_epoch_end(self, epoch, logs={}):
        # run evaluation
        average_precisions = evaluate(self.generator,
                                      self.model,
                                      iou_threshold=self.iou_threshold,
                                      score_threshold=self.score_threshold,
                                      max_detections=self.max_detections,
                                      save_path=self.save_path)

        self.mean_ap = sum(
            average_precisions.values()) / len(average_precisions)

        if self.tensorboard is not None and self.tensorboard.writer is not None:
            import tensorflow as tf
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = self.mean_ap
            summary_value.tag = "mAP_train" if self.train else "mAP_test"
            self.tensorboard.writer.add_summary(summary, epoch)

        if self.verbose == 1:
            for label, average_precision in average_precisions.items():
                print(self.generator.label_to_name(label),
                      '{:.4f}'.format(average_precision))
            print('mAP: {:.4f}'.format(self.mean_ap))
コード例 #3
0
def main(args=None):
    from keras import backend as K

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    print('Arguments: {}'.format(args))

    # create object that stores backbone information
    backbone = models.backbone(args.backbone)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the generators
    validation_generator = create_generators(args, backbone.preprocess_image)

    # create the model
    weights = args.weights
    # default to imagenet if nothing else is specified
    if weights is None and args.imagenet_weights:
        weights = backbone.download_imagenet()

    print('Creating model, this may take a second...')
    model, prediction_model = create_models(
        backbone_retinanet=backbone.retinanet,
        num_classes=validation_generator.num_classes(),
        weights=weights,
        multi_gpu=args.multi_gpu,
        freeze_backbone=args.freeze_backbone)

    # evaluate model
    print('Evaluating model from {}, this may take a while...'.format(
        args.weights))
    average_precisions = evaluate(
        validation_generator,
        prediction_model,
        iou_threshold=0.5,
        score_threshold=0.05,
        max_detections=300,
    )

    # print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              validation_generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision += average_precision
    print('mAP: {:.4f}'.format(precision / present_classes))
コード例 #4
0
def main1():
    # for file in glob.glob("./snapshots/*_05.h5"):
    file = './snapshots/resnet50_csv_05.h5'
    # file = 'C:/Projects/OLD-keras-retinanet-master/snapshots/resnet50_csv_01.h5'
    map_total = 0

    for i in range(50, 100, 5):
        i = i / 100

        keras.backend.tensorflow_backend.set_session(get_session())
        model = keras.models.load_model(file, custom_objects=custom_objects)

        val_generator = CSVGenerator(
            csv_data_file='c:/MTSD/Updated/test - copy.csv',
            csv_class_file='c:/MTSD/Updated/classes.csv',
            base_dir='c:/MTSD/Updated/detection/',
            image_min_side=1440,
            image_max_side=2560,
            min_size=25)
        # analyse_images(val_generator)

        my_eval = eval.evaluate(val_generator,
                                model,
                                score_threshold=0.5,
                                iou_threshold=0.5,
                                save_path='C:/video-out/',
                                ground_truth=False)

        print(my_eval)

        print(sum(my_eval.values()) / 39)
        keras.backend.clear_session()
        break
コード例 #5
0
def test_model(model, generator, score_threshold):
    result = evaluate(generator,
                      models.convert_model(model, None),
                      score_threshold=score_threshold,
                      iou_threshold=0.5,
                      max_detections=100,
                      save_path=None)
    return result
コード例 #6
0
def main(config, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # create the generator
    generator = create_generator(args, config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model,
                              backbone_name=args.backbone,
                              convert=args.convert_model)

    # print model summary
    # print(model.summary())

    # start evaluation
    if args.dataset_type == 'coco':
        from keras_retinanet.utils.coco_eval import evaluate_coco
        evaluate_coco(generator, model, args.score_threshold)
    else:
        average_precisions = evaluate(generator,
                                      model,
                                      iou_threshold=args.iou_threshold,
                                      score_threshold=args.score_threshold,
                                      max_detections=args.max_detections,
                                      save_path=args.save_path)

        # print evaluation
        present_classes = 0
        precision = 0
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  generator.label_to_name(label),
                  'with average precision: {:.4f}'.format(average_precision))
            if num_annotations > 0:
                present_classes += 1
                precision += average_precision
        print('mAP: {:.4f}'.format(precision / present_classes))
コード例 #7
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        # run evaluation
        average_precisions = evaluate(
            self.generator,
            self.model,
            iou_threshold=self.iou_threshold,
            score_threshold=self.score_threshold,
            max_detections=self.max_detections,
            save_path=self.save_path
        )

        # compute per class average precision
        present_classes = 0
        precision = 0
        for label, (average_precision, num_annotations ) in average_precisions.items():
            if self.verbose == 1:
                print('{:.0f} instances of class'.format(num_annotations),
                      self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
            if self.save_map_path is not None:
                out = open(self.save_map_path, 'a')
                out.write('{:.0f} instances of class {} with average precision: {:.4f}\n'.format(num_annotations, self.generator.label_to_name(label), average_precision))
                out.close()
            if num_annotations > 0:
                present_classes += 1
                precision       += average_precision
        self.mean_ap = precision / present_classes

        if self.tensorboard is not None and self.tensorboard.writer is not None:
            import tensorflow as tf
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = self.mean_ap
            summary_value.tag = "mAP"
            self.tensorboard.writer.add_summary(summary, epoch)

        logs['mAP'] = self.mean_ap

        if self.save_map_path is not None:
            out = open(self.save_map_path, 'a')
            out.write('Ep {}: mAP: {:.4f}\n'.format(epoch + 1, self.mean_ap))
            out.close()

        if self.verbose == 1:
            print('mAP: {:.4f}'.format(self.mean_ap))
コード例 #8
0
ファイル: callbacks.py プロジェクト: pySirin/DeepForest
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        # run evaluation
        average_precisions = evaluate(self.generator,
                                      self.model,
                                      iou_threshold=self.iou_threshold,
                                      score_threshold=self.score_threshold,
                                      max_detections=self.max_detections,
                                      save_path=self.save_path,
                                      experiment=self.experiment)

        # compute per class average precision
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            if self.verbose == 1:
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    self.generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
        if self.weighted_average:
            self.mean_neon_ap = sum([
                a * b for a, b in zip(total_instances, precisions)
            ]) / sum(total_instances)
        else:
            self.mean_neon_ap = sum(precisions) / sum(x > 0
                                                      for x in total_instances)

        if self.verbose == 1:
            print('NEON mAP: {:.4f}'.format(self.mean_neon_ap))

        self.experiment.log_metric("Neon mAP", self.mean_neon_ap)
コード例 #9
0
test_csv_file = 'test_hpc_RMmin2_corrected.csv'
class_csv_file = 'classes.csv'

train_ds = pd.read_csv(test_csv_file)


test_generator = csv_generator.CSVGenerator(test_csv_file,
    csv_class_file=class_csv_file)


# In[ ]:


model_path = os.path.join('training-files/retinanet_100/models_for_testing/resnet50_csv_99_1.h5')

# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
model = models.convert_model(model)
average_precisions,recall,precision,true_positives,false_positives = evaluate(
            test_generator,
            model,
            save_path='./save/test/')

print (average_precisions)
print (true_positives)
print (false_positives)


myData = [precision,recall]  
myFile = open('pr-model_all_unfiltered.csv', 'w')  
コード例 #10
0
def main(data, DeepForest_config, experiment, args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    #Add seperate dir
    #save time for logging
    dirname = datetime.now().strftime("%Y%m%d_%H%M%S")
    experiment.log_parameter("Start Time", dirname)

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path +
                                                         dirname):
        os.makedirs(args.save_path + dirname)

    # create the testing generators
    generator = create_generator(args, data, DeepForest_config)

    #create the NEON mAP generator
    NEON_generator = create_NEON_generator(args, site, DeepForest_config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model,
                              backbone_name=args.backbone,
                              convert=args.convert_model,
                              nms_threshold=DeepForest_config["nms_threshold"])

    #print(model.summary())

    average_precisions = evaluate(generator,
                                  model,
                                  iou_threshold=args.iou_threshold,
                                  score_threshold=args.score_threshold,
                                  max_detections=args.max_detections,
                                  save_path=args.save_path + dirname)

    # print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision += average_precision
    print('mAP: {:.4f}'.format(precision / present_classes))
    experiment.log_metric("mAP", precision / present_classes)

    #Use field collected polygons only for Florida site
    #if site == "OSBS":

    ##Ground truth scores
    #jaccard=Jaccard(
    #generator=generator,
    #model=model,
    #score_threshold=args.score_threshold,
    #save_path=args.save_path,
    #experiment=experiment,
    #DeepForest_config=DeepForest_config
    #)
    #print(f" Mean IoU: {jaccard:.2f}")

    #experiment.log_metric("Mean IoU", jaccard)

    #Neon plot recall rate
    recall = neonRecall(site,
                        generator,
                        model,
                        score_threshold=args.score_threshold,
                        save_path=args.save_path,
                        experiment=experiment,
                        DeepForest_config=DeepForest_config)

    experiment.log_metric("Recall", recall)

    print(f" Recall: {recall:.2f}")

    #Logs the number of train and eval "trees"
    ntrees = sum([len(x) for x in generator.annotation_dict.values()])
    experiment.log_parameter("Number of Trees", ntrees)

    #NEON plot mAP
    average_precisions = evaluate(
        NEON_generator,
        model,
        iou_threshold=args.iou_threshold,
        score_threshold=args.score_threshold,
        max_detections=args.max_detections,
        save_path=args.save_path + dirname,
        experiment=experiment,
    )

    # print evaluation
    present_classes = 0
    precision = 0
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        if num_annotations > 0:
            present_classes += 1
            precision += average_precision
    print('NEON mAP: {:.4f}'.format(precision / present_classes))
    experiment.log_metric("NEON_mAP", precision / present_classes)
コード例 #11
0
def main(args=None, model_filename=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = parse_args(args)

    # make sure keras and tensorflow are the minimum required version
    check_keras_version()
    check_tf_version()

    # optionally choose specific GPU
    if args.gpu:
        setup_gpu(args.gpu)

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    backbone = models.backbone(args.backbone)
    generator = create_generator(args, backbone.preprocess_image)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    if args.continual_learning_model == 'dual_memory':  # Continual learning dual-memory modelling treatment
        base_models = LoadModels(args.historical_snapshots_folder,
                                 args.backbone, args.day_number)
        all_models = []
        for model in base_models:
            generator.compute_shapes = make_shapes_callback(model)
            if args.convert_model:
                model = models.convert_model(model,
                                             anchor_params=anchor_params)
            all_models.append(model)

        (average_precisions, inference_time, detections_per_model,
         final_detections) = evaluate_dual_memory_model(
             generator,
             all_models,
             iou_threshold=args.iou_threshold,
             score_threshold=args.score_threshold,
             max_detections=args.max_detections,
             save_path=args.save_path)

        # bbox_savepath given, save bounding box coordinates from dual-memory model predictions:

        if args.bbox_savepath:
            detections_per_model = [[
                [class_predictions.tolist() for class_predictions in image]
                for image in model_predictions
            ] for model_predictions in detections_per_model]
            detections_with_filenames = {
                'final_detections': final_detections,
                'annotations': args.annotations,
                'detections_per_model': detections_per_model
            }
            with open(args.bbox_savepath, 'wt') as outf:
                json.dump(detections_with_filenames, outf)

            print("Finished dual memory model")
            print(average_precisions, inference_time)

    else:
        if model_filename is None:
            model_filename = args.model
        model = models.load_model(model_filename, backbone_name=args.backbone)

        generator.compute_shapes = make_shapes_callback(model)

        # optionally convert the model
        if args.convert_model:
            model = models.convert_model(model, anchor_params=anchor_params)

        # print model summary
        # print(model.summary())

        # start evaluation
        if args.dataset_type == 'coco':
            from ..utils.coco_eval import evaluate_coco
            evaluate_coco(generator, model, args.score_threshold)
        else:
            average_precisions, inference_time = evaluate(
                generator,
                model,
                iou_threshold=args.iou_threshold,
                score_threshold=args.score_threshold,
                max_detections=args.max_detections,
                save_path=args.save_path)

    # print evaluation
    total_instances = []
    precisions = []
    #labels = []
    for label, (average_precision,
                num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations),
              generator.label_to_name(label),
              'with average precision: {:.4f}'.format(average_precision))
        #labels.append(label)
        total_instances.append(num_annotations)
        precisions.append(average_precision)

    if sum(total_instances) == 0:
        print('No test instances found.')
        return

    print('Inference time for {:.0f} images: {:.4f}'.format(
        generator.size(), inference_time))

    print('mAP using the weighted average of precisions among classes: {:.4f}'.
          format(
              sum([a * b for a, b in zip(total_instances, precisions)]) /
              sum(total_instances)))
    print('mAP: {:.4f}'.format(
        sum(precisions) / sum(x > 0 for x in total_instances)))

    #print(labels)
    print(precisions)
    print(total_instances)

    # Save mAP and other accuracy statistics to mAP_savepath:

    mAP = sum(precisions) / sum(x > 0 for x in total_instances)
    date = datetime.now().strftime("%Y%m%d%H%M")
    with open(args.mAP_savepath, 'a') as outf:
        outf.write(
            f"{date}, {mAP}, {precisions}, {total_instances}, {model_filename}, {args.continual_learning_model}"
            + "\n")
    return mAP
コード例 #12
0
def main():
    ax = plt.gca()
    #ax.set_aspect(20)

    score_threshold = [0.3]  #, 0.4, 0.5, 0.6, 0.7, 0.8]
    setup_gpu(0)
    classes = Path(
        '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/KAIST_MPD/class_name_to_ID_CARS.csv'
    )
    annotations = Path(
        '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/KAIST_MPD/fine_tuning_kaist_cars/ds/test_w_people.csv'
    )
    save_path = Path(
        '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/tests_manual_annotations_08_cars/'
    )
    generator = create_generator(annotations, classes)
    #weight = '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/weights/manual_annotation_08.h5'
    weight_path_ra = '/data/students_home/fschipani/rand_augment/snapshots_ra_scratch'
    weight_path = '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/snapshots/'

    # optionally load anchor parameters
    anchor_params = None
    dataframe = pd.DataFrame(columns=[
        'epoch', 'score_threshold', 'person_instances', 'cyclist_instances',
        'cars_instances', 'map_person', 'map_cyclist', 'map_cars',
        'weighted_map', 'map', 'false_positives', 'true_positives', 'recall',
        'precision'
    ])
    for weight in glob.iglob(weight_path + '/*.h5'):
        print(weight)
        for threshold in [0.3]:
            os.makedirs(save_path.joinpath(str(threshold * 100)),
                        exist_ok=True)
            name_folder = weight.replace('/weights', '').replace('.h5', '')
            print(weight)
            model = models.load_model(weight, backbone_name='resnet50')
            model = models.convert_model(model, anchor_params=anchor_params)
            os.makedirs(save_path.joinpath(str(threshold *
                                               100)).joinpath(name_folder),
                        exist_ok=True)
            average_precisions, other_metrics = evaluate(
                generator,
                model,
                iou_threshold=0.5,
                score_threshold=threshold,
                max_detections=100,
                save_path=save_path.joinpath(str(threshold *
                                                 100)).joinpath(name_folder))
            total_instances = []
            precisions = []
            for label, (average_precision,
                        num_annotations) in average_precisions.items():
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
                total_instances.append(num_annotations)
                precisions.append(average_precision)
            if sum(total_instances) == 0:
                print('No test instances found.')
                return
            values = {
                'epoch':
                int(
                    weight.replace('resnet50_csv_', '').replace('.h5', '').
                    replace(
                        '/data/students_home/fschipani/thesis/MSc-Thesis-PJ/Dataset/snapshots/',
                        '')),
                'score_threshold':
                threshold,
                'person_instances':
                int(average_precisions[0][1]),
                'cyclist_instances':
                int(average_precisions[1][1]),
                'cars_instances':
                int(average_precisions[2][1]),
                'map_person':
                average_precisions[0][0],
                'map_cyclist':
                average_precisions[1][0],
                'map_cars':
                average_precisions[2][0],
                'weighted_map':
                (sum([a * b for a, b in zip(total_instances, precisions)]) /
                 sum(total_instances)),
                'map': (sum(precisions) / sum(x > 0 for x in total_instances)),
                'false_positives':
                pd.Series(other_metrics[0]),
                'true_positives':
                pd.Series(other_metrics[1]),
                'recall':
                pd.Series(other_metrics[2]),
                'precision':
                pd.Series(other_metrics[3])
            }
            dataframe = dataframe.append(values, ignore_index=True)
            K.clear_session()
            #plt.plot(other_metrics[2][2], other_metrics[3][2], label = str(threshold)) #[recall, precision][class] usually class: 0->person 1->cyclist 2->cars
    dataframe.to_csv('./spero_sia_l_ultimo.csv')