Пример #1
0
	def add_detection( self, detection ):

		if not detection.timestamp in self.detections:
			self.detections[ detection.timestamp ] = detection

			# use really all candidate_ids of a detection
			candidates = [ x[0] for x in detection.candidate_ids ]

			# or use only the set of unique candidates per detection
			#candidates = list( set( [ x[0] for x in detection.candidate_ids ] ) )

			for id in candidates:

				self.ids_count += 1

				self.ids_sum += aux.int_id_to_binary( id )
				self.ids_sum_weighted_neighbourhood += aux.weighted_neighbourhood_id( id )
				self.ids_dist = self.ids_dist + aux.get_distribution_from_id( id )


			# for the add_observation method we only use the first candidate, because the observations need to be independent.
			if self.ids_count == 0:
				self.ids_dists = aux.get_distribution_from_id( candidates[ 0 ] )
			else:
				self.ids_dists = aux.add_observation( self.ids_dists, aux.get_distribution_from_id( candidates[ 0 ] ), 0.1 )

		else:
			print 'Warning: detection not added, path already has detection for this timestamp'
Пример #2
0
    def add_match(self, match):

        self.matches[match.detection.timestamp] = match

        if not match.detection.is_empty():

            # use really all candidate_ids of a detection:
            candidates = [c[0] for c in match.detection.candidate_ids]

            # or use only the set of unique candidates per detection. Counterintuitively not necessarily better
            #candidates = list( set( [ c[0] for c in match.detection.candidate_ids ] ) )

            for c in candidates:
                #self.match_ids_sum += aux.int_id_to_binary( c )
                self.match_ids_sum += aux.weighted_neighbourhood_id(c)
                self.match_ids_count += 1
Пример #3
0
	def add_match( self, match ):

		self.matches[ match.detection.timestamp ] = match

		if not match.detection.is_empty():

			# use really all candidate_ids of a detection:
			candidates = [ c[0] for c in match.detection.candidate_ids ]

			# or use only the set of unique candidates per detection. Counterintuitively not necessarily better
			#candidates = list( set( [ c[0] for c in match.detection.candidate_ids ] ) )

			for c in candidates:
				#self.match_ids_sum += aux.int_id_to_binary( c )
				self.match_ids_sum += aux.weighted_neighbourhood_id( c )
				self.match_ids_count += 1
Пример #4
0
    def get_mean_id(self):

        if self.mean_id is None:

            ids_sum = np.zeros(12)
            ids_count = 0

            for d in self.future_path:
                if not d.is_empty():
                    candidates = [c[0] for c in d.candidate_ids]
                    for c in candidates:
                        ids_sum += aux.weighted_neighbourhood_id(c)
                        ids_count += 1

            self.mean_id = ids_sum * 1.0 / ids_count

        return self.mean_id
Пример #5
0
	def get_mean_id( self ):

		if self.mean_id is None:

			ids_sum = np.zeros( 12 )
			ids_count = 0

			for d in self.future_path:
				if not d.is_empty():
					candidates = [ c[0] for c in d.candidate_ids ]
					for c in candidates:
						ids_sum += aux.weighted_neighbourhood_id( c )
						ids_count += 1

			self.mean_id = ids_sum*1.0 / ids_count

		return self.mean_id
Пример #6
0
    def add_detection(self, detection):

        self.detections[detection.timestamp] = detection

        if not detection.is_empty():

            # use really all candidate_ids of a detection:
            candidates = [c[0] for c in detection.candidate_ids]

            # or use only the set of unique candidates per detection. Counterintuitively not necessarily better
            #candidates = list( set( [ c[0] for c in detection.candidate_ids ] ) )

            for c in candidates:
                #self.detections_ids_sum += aux.int_id_to_binary( c )
                self.detections_ids_sum += aux.weighted_neighbourhood_id(c)
                self.detections_ids_count += 1

        self.mean_id_needs_update = True
        self.sorted_detections_need_update = True
        self.sorted_unempty_detections_need_update = True
Пример #7
0
	def add_detection( self, detection ):

		if not detection.timestamp in self.detections:
			self.detections[ detection.timestamp ] = detection

			self.ids_count += 1

			self.ids_sum += detection.decoded_id
			self.ids_sum_mean += aux.int_id_to_binary( detection.decoded_mean )
			self.ids_sum_weighted_neighbourhood += aux.weighted_neighbourhood_id( detection.decoded_mean )

			self.saliency_count += detection.localizer_saliency
			self.ids_sum_saliency += ( np.array(detection.decoded_id) * detection.localizer_saliency )

			confidence = np.min( np.abs( 0.5 - detection.decoded_id ) * 2 )
			self.confidence_count += confidence
			self.ids_sum_confidence += ( np.array(detection.decoded_id) * confidence )

		else:
			print 'Warning: detection not added, path already has detection for this timestamp'
Пример #8
0
	def add_detection( self, detection ):

		self.detections[ detection.timestamp ] = detection

		if not detection.is_empty():

			# use really all candidate_ids of a detection:
			candidates = [ c[0] for c in detection.candidate_ids ]

			# or use only the set of unique candidates per detection. Counterintuitively not necessarily better
			#candidates = list( set( [ c[0] for c in detection.candidate_ids ] ) )

			for c in candidates:
				#self.detections_ids_sum += aux.int_id_to_binary( c )
				self.detections_ids_sum += aux.weighted_neighbourhood_id( c )
				self.detections_ids_count += 1

		self.mean_id_needs_update = True
		self.sorted_detections_need_update = True
		self.sorted_unempty_detections_need_update = True
Пример #9
0
	def determine_average_id_by_weighted_neighbourhood_2iter( self ):

		average = self.ids_sum_weighted_neighbourhood*1.0 / self.ids_count

		new_sum = np.zeros( 12 )
		new_count = 0

		for d in self.detections.values():
			candidates = [ c[0] for c in d.candidate_ids ]
			for c in candidates:
				c_bin = aux.int_id_to_binary( c )
				h_dis = float( np.sum( np.abs( c_bin - average ) ) )
				if h_dis < 4:  # here is the hamming distance limitation
					new_sum += aux.weighted_neighbourhood_id( c )
					new_count += 1

		if new_count > 0:
			new_average = np.round( new_sum*1.0 / new_count )
		else:
			new_average = np.round( average )

		determined_id = aux.binary_id_to_int( new_average )
		return determined_id