예제 #1
0
	def produce(self, ip):
		"""
		Compute mAP (mean average precision) by comparing ground truth to prediction
		:param ip: ground truth list, prediction list
			ground truth is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes])
			prediction   is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes], confidence)
			Note: Even if there are no attributes, an empty list will be received from the previous chain object
		:return: mAP
		"""
		actualRaw, predictedRaw = ip
                # filter out values where labels do not match
                predicted = []
                for frame in predictedRaw:
                    inList = []
                    for row in frame:
                        if self._is_valid(row, 'object') and self._is_valid(row, 'occlusion') and self._is_valid(row, 'activity'):
                            inList.append((row[1], row[2], row[-1])) # required output format: (label, [xmin, ymin, xmax, ymax], conf)
                    predicted.append(inList)

                actual = []
                for frame in actualRaw:
                    inList = []
                    for row in frame:
                        if self._is_valid(row, 'object') and self._is_valid(row, 'occlusion') and self._is_valid(row, 'activity'):
                            inList.append((row[1], row[2])) # required output format: (label, [xmin, ymin, xmax, ymax])
                    actual.append(inList)

		assert len(actual) == len(predicted)

                return mAP.evaluate_mAP(actual, predicted)
예제 #2
0
	def setUp(self):
		self.groundTruth = \
			{0: {'person': [0, 0, 10, 10]},
			1: {'person': [0, 0, 11, 11], 'dog': [20, 20, 30, 30]},
			2: {'person': [0, 0, 12, 12], 'dog': [0, 0, 13, 13]},
			3: {'dog': [0, 0, 12, 12], 'person': [0, 0, 13, 13]}}

		self.predicted   = \
			{0: {'person': ([0, 0, 10, 10], 0.70), 'dog': ([6, 6, 11, 11], 0.75), 'cat': ([0, 0, 10, 10], 0.65)},
			1: {'person': ([0, 0, 11, 11], 0.71)},
			2: {'person': ([0, 0, 12, 12], 0.72)},
			3: {'person': ([0, 0, 13, 13], 0.71), 'dog': ([0, 0, 12, 12], 0.72)}}

		self.expectedIntuitiveOutDict = \
			{0: {'person': 'person'},
			1: {'person': 'person', 'dog': ''},
			2: {'person': 'person', 'dog': ''},
			3: {'person': 'person', 'dog': 'dog'}}

		self.expectedIntuitiveMetric = 0.714285714286

		self.actualIntuitiveMetric, self.actualIntuitiveOutDict = evaluate_intuitive(self.groundTruth, self.predicted, outputMatching=True)

		# hand calulcation of MAP@2
		# AP = sum(P@i * R@i - R@(i-1)) calculating for each separate set of overlapping boxes
		# frame 0: (1*1) + (1/2 * 0) = 1
		#        : 0
		# frame 1: 1
		#        : 0
		# frame 2: 0.5
		# frame 3: 1
		# mAP = 3.5/6 = 0.5833
		self.expectedMAP = 0.5833

		self.actualMAP = evaluate_mAP(self.groundTruth, self.predicted, k=2, overlapThreshold=0.5)
예제 #3
0
	def produce(self, ip):
		"""
		Computer mAP (mean average precision) by comparing ground truth to prediction
		:param ip: ground truth list, prediction list
			ground truth is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes])
			prediction   is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes], confidence)
			Note: Even if there are no attributes, an empty list will be received from the previous chain object
			Note: input of the occluded, generated, and lost flags is not currently accepted
		:return: mAP
		"""
		actual, predicted = ip
		assert len(actual) == len(predicted)

		# form ground truth list in desired format -- if any label or attribute matches the desired logic, mark the
		# create a summary output label; if not mark the label as absent
		gt = []
		for frame in actual:
			inList = []
			for row in frame:
				words2Check = [row[1]] + row[3]
				nLabel = ''
				for word in self.checkWords:
					if word in words2Check:
						nLabel += word
					if self.checkLogic == 'and':
						if word not in words2Check:
							nLabel = 'absent'
							break
				if not nLabel:
					nLabel = 'absent'
				inList.append(tuple([nLabel] + [row[2]]))
			gt.append(inList)

		# same process for predictions but include confidence score
		pd = []
		for frame in predicted:
			inList = []
			for row in frame:
				words2Check = [row[1]] + row[3]
				nLabel = ''
				for word in self.checkWords:
					if word in words2Check:
						nLabel += word
					if self.checkLogic == 'and':
						if word not in words2Check:
							nLabel = 'absent'
							break
				if not nLabel:
					nLabel = 'absent'
				inList.append(tuple([nLabel] + [row[2]] + [row[-1]]))
			pd.append(inList)

		mAP = evaluate_mAP(gt, pd)
		yield mAP
예제 #4
0
    def produce(self, ip):
        """
		Computer mAP (mean average precision) by comparing ground truth to prediction
		:param ip: ground truth list, prediction list
			ground truth is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes])
			prediction   is list of lists where each element is (frame, label, [xmin, ymin, xmax, ymax], [attributes], confidence)
			Note: Even if there are no attributes, an empty list will be received from the previous chain object
			Note: input of the occluded, generated, and lost flags is not currently accepted
		:return: mAP
		"""
        actual, predicted = ip
        assert len(actual) == len(predicted)

        # form ground truth list in desired format -- if any label or attribute matches the desired logic, mark the
        # create a summary output label; if not mark the label as absent
        gt = []
        for frame in actual:
            inList = []
            for row in frame:
                words2Check = [row[1]] + row[3]
                nLabel = ''
                for word in self.checkWords:
                    if word in words2Check:
                        nLabel += word
                    if self.checkLogic == 'and':
                        if word not in words2Check:
                            nLabel = 'absent'
                            break
                if not nLabel:
                    nLabel = 'absent'
                inList.append(tuple([nLabel] + [row[2]]))
            gt.append(inList)

        # same process for predictions but include confidence score
        pd = []
        for frame in predicted:
            inList = []
            for row in frame:
                words2Check = [row[1]] + row[3]
                nLabel = ''
                for word in self.checkWords:
                    if word in words2Check:
                        nLabel += word
                    if self.checkLogic == 'and':
                        if word not in words2Check:
                            nLabel = 'absent'
                            break
                if not nLabel:
                    nLabel = 'absent'
                inList.append(tuple([nLabel] + [row[2]] + [row[-1]]))
            pd.append(inList)

        mAP = evaluate_mAP(gt, pd)
        yield mAP
예제 #5
0
    def setUp(self):
        self.groundTruth = \
         {0: {'person': [0, 0, 10, 10]},
         1: {'person': [0, 0, 11, 11], 'dog': [20, 20, 30, 30]},
         2: {'person': [0, 0, 12, 12], 'dog': [0, 0, 13, 13]},
         3: {'dog': [0, 0, 12, 12], 'person': [0, 0, 13, 13]}}

        self.predicted   = \
         {0: {'person': ([0, 0, 10, 10], 0.70), 'dog': ([6, 6, 11, 11], 0.75), 'cat': ([0, 0, 10, 10], 0.65)},
         1: {'person': ([0, 0, 11, 11], 0.71)},
         2: {'person': ([0, 0, 12, 12], 0.72)},
         3: {'person': ([0, 0, 13, 13], 0.71), 'dog': ([0, 0, 12, 12], 0.72)}}

        self.expectedIntuitiveOutDict = \
         {0: {'person': 'person'},
         1: {'person': 'person', 'dog': ''},
         2: {'person': 'person', 'dog': ''},
         3: {'person': 'person', 'dog': 'dog'}}

        self.expectedIntuitiveMetric = 0.714285714286

        self.actualIntuitiveMetric, self.actualIntuitiveOutDict = evaluate_intuitive(
            self.groundTruth, self.predicted, outputMatching=True)

        # hand calulcation of MAP@2
        # AP = sum(P@i * R@i - R@(i-1)) calculating for each separate set of overlapping boxes
        # frame 0: (1*1) + (1/2 * 0) = 1
        #        : 0
        # frame 1: 1
        #        : 0
        # frame 2: 0.5
        # frame 3: 1
        # mAP = 3.5/6 = 0.5833
        self.expectedMAP = 0.5833

        self.actualMAP = evaluate_mAP(self.groundTruth,
                                      self.predicted,
                                      k=2,
                                      overlapThreshold=0.5)