예제 #1
0
    def __new__(cls, root, subset='validation', num_cores=28, transform=None, filter=None):

        assert subset in ('validation', 'train'), \
            'only support subset (validation, train)'
        logger.warning('This api is going to be deprecated, '
                       'please use ImageRecord instead')

        from tensorflow.python.platform import gfile
        glob_pattern = os.path.join(root, '%s-*-of-*' % subset)
        file_names = gfile.Glob(glob_pattern)
        if not file_names:
            raise ValueError('Found no files in --root matching: {}'.format(glob_pattern))

        from tensorflow.python.data.experimental import parallel_interleave
        from lpot.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet
        ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False)
        ds = ds.apply(
          parallel_interleave(
            tf.data.TFRecordDataset, cycle_length=num_cores))

        if transform is not None:
            transform.transform_list.insert(0, ParseDecodeImagenet())
        else:
            transform = ParseDecodeImagenet()
        ds = ds.map(transform, num_parallel_calls=None)

        ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)  # this number can be tuned
        return ds
예제 #2
0
파일: metric.py 프로젝트: vuiseng9/lpot
 def result(self):
     """calculate metric"""
     if self.num_sample == 0:
         logger.warning("sample num is 0 can't calculate topk")
         return 0
     else:
         return self.num_correct / self.num_sample
예제 #3
0
def iterator_sess_run(sess, iter_op, feed_dict, output_tensor, iteration=-1):
    """Run the graph that have iterator integrated in the graph
    
    Args:
        sess (tf.compat.v1.Session): the model sess to run the graph
        iter_op (Operator): the MakeIterator op
        feed_dict(dict): the feeds to initialize a new iterator
        output_tensor(list): the output tensors 
        iteration(int): iterations to run, when -1 set, run to end of iterator
    
    Returns:
        preds: the results of the predictions
    """
    sess.run(iter_op, feed_dict)
    preds = []
    idx = 0 
    while idx < iteration or iteration == -1:
        try:
            prediction = sess.run(output_tensor)
            preds.append(prediction)
            idx += 1
        except tf.errors.OutOfRangeError:
            break
        except:
            logger.warning('not run out of the preds...')
            break
    preds = list(zip(*preds))
    return preds
예제 #4
0
    def result(self):
        from .coco_tools import COCOWrapper, COCOEvalWrapper
        if len(self.ground_truth_list) == 0:
            logger.warning("sample num is 0 can't calculate mAP")
            return 0
        else:
            groundtruth_dict = {
                'annotations':
                self.ground_truth_list,
                'images': [{
                    'id': image_id
                } for image_id in self.image_ids],
                'categories': [{
                    'id': k,
                    'name': v
                } for k, v in self.category_map.items()]
            }
            coco_wrapped_groundtruth = COCOWrapper(groundtruth_dict)
            coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
                self.detection_list)
            box_evaluator = COCOEvalWrapper(coco_wrapped_groundtruth,
                                            coco_wrapped_detections,
                                            agnostic_mode=False)
            box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
                include_metrics_per_category=False,
                all_metrics_per_category=False)
            box_metrics.update(box_per_category_ap)
            box_metrics = {
                'DetectionBoxes_' + key: value
                for key, value in iter(box_metrics.items())
            }

            return box_metrics['DetectionBoxes_Precision/mAP']
예제 #5
0
    def __init__(self, root, subset='val', num_cores=28, transform=None, filter=None):
        self.val_dir = os.path.join(root, subset)
        assert os.path.exists(self.val_dir), "find no val dir in {}".format(root) + \
            "please make sure there are train/val subfolders"
        import glob
        logger.warning('This api is going to be deprecated, ' + \
                       'please use ImageRecord instead')

        self.transform = transform
        self.image_list = []
        files = glob.glob(os.path.join(self.val_dir, '*'))
        files.sort()
        for idx, file in enumerate(files):
            imgs = glob.glob(os.path.join(file, '*'))
            for img in imgs:
                self.image_list.append((img, idx))
예제 #6
0
def evaluate(predictions, dataset):
    f1 = exact_match = total = 0
    for article in dataset:
        for paragraph in article['paragraphs']:
            for qa in paragraph['qas']:
                total += 1
                if qa['id'] not in predictions:
                    message = 'Unanswered question ' + qa['id'] + \
                              ' will receive score 0.'
                    logger.warning(message)
                    continue

                ground_truths = list(map(lambda x: x['text'], qa['answers']))
                prediction = predictions[qa['id']]

                f1 += metric_max_over_ground_truths(
                    f1_score, prediction, ground_truths)

    f1 = 100.0 * f1 / total
    return f1
예제 #7
0
 def __call__(self, sample):
     logger.warning('This transform is going to be deprecated, '
                    'imagenet decoding will be performed automatically')
     return sample