Пример #1
0
	def build_observations(self, target_position):

		if "malfunctioning PEs" not in self._simulation_parameters:
			super().build_observations(target_position)

			return

		# for convenience (notation)
		sensor_to_proc_elem = sensors_PEs_connector.sensors_PEs_mapping(self._PEsSensorsConnections)

		# ...idem
		failing_proc_elem = self._simulation_parameters["malfunctioning PEs"]

		self._observations = []

		for t, s in enumerate(target_position.T):

			# a list with the observations for all the sensors at the current time instant
			current_obs = []

			for i_sens, sens in enumerate(self._sensors):

				# the PE associated with this sensor
				i_pe = str(sensor_to_proc_elem[i_sens])

				# if the PE is in the list *and* marked to fail at this specific time instant...
				if (i_pe in failing_proc_elem) and (t in failing_proc_elem[i_pe]):

					if self._simulation_parameters["malfunctioning PEs deed"] == "pure noise":

						# ...the method yielding just noise is called
						current_obs.append(sens.measurement_noise())

					elif re.match('additive noise with variance (\d+)',
					              self._simulation_parameters["malfunctioning PEs deed"]):

						m = re.match('additive noise with variance (\d+)',
						             self._simulation_parameters["malfunctioning PEs deed"])

						# ...a large noise is added
						current_obs.append(
							sens.detect(state.to_position(s.reshape(-1, 1))) +
							self._PRNGs["Sensors and Monte Carlo pseudo random numbers generator"].randn() * np.sqrt(
								float(m.group(1))
							)
						)

					else:

						raise Exception("unknown deed for malfunctioning PEs")

				else:

					current_obs.append(sens.detect(state.to_position(s.reshape(-1, 1))))

			# all the observations for the current time instant are added to the final result
			self._observations.append(np.array(current_obs))
Пример #2
0
	def actual_sampling_step(self, observations):

		ro = np.exp(self.gamma_postgossip)

		# the significant samples according to the consensus algorithm...
		significant_samples = self._state[:, self.i_significant]

		# ...and their corresponding weights, which are at the same time updated using the first-stage weights
		updated_significant_weights = self.weights[self.i_significant]*ro

		# in order to avoid numerical problems...
		updated_significant_weights += 1e-200

		# ...when normalizing
		updated_significant_weights /= updated_significant_weights.sum()

		# resampling of the particles from the previous step according to the first-stage weights
		i_resampled = self._resampling_algorithm.get_indexes(updated_significant_weights, self.n_particles)
		resampled = significant_samples[:, i_resampled]
		self.ro = ro[i_resampled]

		self._state = self._state_transition_kernel.next_state(resampled)

		# for each sensor, we compute the likelihood of EVERY particle (position)
		likelihoods = np.array(
			[sensor.likelihood(obs, state.to_position(self._state)) for sensor, obs in zip(self._sensors, observations)])

		# careful with floating point arithmetic issues
		likelihoods += 1e-200

		loglikelihoods = np.log(likelihoods)

		# for each particle we store the product of the likelihoods for all the sensors multiplied by the number of PEs
		self.gamma = self._n_PEs*loglikelihoods.sum(axis=0)
Пример #3
0
	def step(self, observations):

		assert len(observations) == len(self._sensors)

		# every particle is updated (previous state is not stored...)
		self._state = self._state_transition_kernel.next_state(self._state)

		# FIXME: this code is needed if the sensor is not RSS
		# # for each sensor, we compute the likelihood of EVERY particle (position)
		# likelihoods = np.array(
		# 	[sensor.likelihood(obs, state.to_position(self._state)) for sensor, obs in
		# 	 zip(self._sensors, observations)])

		# for EVERY sensor, we compute the likelihood of EVERY particle (position)
		likelihoods = self._sensors_array.likelihood(observations, state.to_position(self._state))

		# in order to avoid floating point arithmetic issues
		likelihoods += 1e-200

		loglikelihoods = np.log(likelihoods)

		# for each particle, we compute the product of the likelihoods for all the sensors
		self._loglikelihoods_product = loglikelihoods.sum(axis=0)

		# the weights are updated
		self._log_weights += self._loglikelihoods_product

		# the aggregated weight is kept up to date at all times
		self.update_aggregated_weight()

		# whatever is required (it depends on the algorithm) to avoid weights degeneracy...
		self.avoid_weight_degeneracy()
Пример #4
0
	def step(self, observations):

		# oversampling (of the previous probability measure)
		i_oversampled_particles = self._resampling_algorithm.get_indexes(self.weights, self.n_particles*self._L)

		# oversampled particles are updated
		oversampled_particles = self._state_transition_kernel.next_state(self._state[:, i_oversampled_particles])

		# for each sensor, we compute the likelihood of EVERY particle (position)
		likelihoods = np.prod([sensor.likelihood(obs, state.to_position(oversampled_particles))
		                       for sensor, obs in zip(self._sensors, observations)], axis=0)

		# in order to avoid dividing by zero
		likelihoods += 1e-200

		# normalization
		weights = likelihoods/likelihoods.sum()

		# indexes of the particles...
		i_sampled_particles = self._resampling_algorithm.get_indexes(weights, self.n_particles)

		# ...that determine the bounding box
		particles_bounding_box = oversampled_particles[:, i_sampled_particles]

		self.bounding_box_min = particles_bounding_box.min(axis=1)
		self.bounding_box_max = particles_bounding_box.max(axis=1)
Пример #5
0
	def step(self, observations):

		# the exponents of the monomials and their associated coefficients are extracted from the "consensed" beta's
		exponents = np.array(list(self.betaConsensus.keys()))
		betas = np.array(list(self.betaConsensus.values()))

		# for the sake of convenience
		x = state.to_position(self._state)

		# a matrix containing the monomials evaluated for the all the x's
		phi = (x[:, :, np.newaxis]**exponents.T[:, np.newaxis, :]).prod(axis=0)

		# the exponent of the Joint Likelihood Function (JLF) for every particle (x), as computed in this PE
		S = (phi * betas[np.newaxis, :]).sum(axis=1)

		# S contains exponents...and hence subtracting a constant is tantamount to scaling the power (the JLF)
		shifted_S = S - max(S)

		# the weights should be multiplied by e^shifted_S and divided by the sum thereof, and when taking
		# the logarithm this yields
		self._log_weights += shifted_S - np.log(np.exp(shifted_S).sum())

		# the aggregated weight is kept up to date at all times
		self.update_aggregated_weight()

		# whatever is required (it depends on the algorithm) to avoid weights degeneracy...
		self.avoid_weight_degeneracy()
    def simulate_trajectory(self, nTimeInstants):

        # initial state is obtained by means of the prior...
        self._state = self._prior.sample(
            PRNG=self._pseudo_random_numbers_generator)

        # a trajectory with the requested number of time instants...plus the initial one
        computedTrajectory = np.empty((state.n_elements, nTimeInstants))

        # initial state is set
        computedTrajectory[:, 0:1] = self._state

        # the trajectory is simulated, and the corresponding observations are obtained (notice that there is no
        # observation for initial position)
        for iTime in range(1, nTimeInstants):

            # a new state is obtained as the target moves...
            self._state = self._transition_kernel.next_state(
                self._state, PRNG=self._pseudo_random_numbers_generator)

            # ..and it is stored in the corresponding position (which is iTime+1)
            computedTrajectory[:, iTime:iTime + 1] = self._state

        return (state.to_position(computedTrajectory),
                state.to_velocity(computedTrajectory))
Пример #7
0
	def polynomial_approximation(self, observations):

		x = state.to_position(self._state)

		# in the first matrix, we just replicate the samples matrix (<component within sample>,<number of sample>)
		# along the third dimension; in the second matrix, the third dimension gives the number of monomial
		phi = (x[:, :, np.newaxis]**self._r_a.T[:, np.newaxis, :]).prod(axis=0)

		# the true values for the function to be approximated
		A = self.likelihood_mean(x).T

		# the coefficients of the approximation (each row corresponds to one coefficient)
		Y = np.linalg.pinv(phi).dot(A)

		# the solution is stored in a dictionary for easy access
		alpha = dict(zip(self._r_a_tuples, Y))

		# a dictionary (indexed by the elements in r_d_tuples) with the computed coefficients gamma
		gamma = {}

		for r, possible_combinations in zip(self._r_d, self._rs_gamma):

			accum = 0

			# we sum over all the possible combinations (each one gives rise to one term)
			for t in possible_combinations:

				# alpha * covariance * alpha^T (alpha has been stored as a row vector)
				accum += alpha[t[:self._M]].dot(self._noiseCovariance).dot(alpha[t[self._M:]][:, np.newaxis])

			accum /= 2

			# the computed value is added to the dictionary
			gamma[tuple(r)] = accum.item(0)

		# this term is independent of the indices
		b = self._noiseCovariance.dot(observations)

		# a dictionary to store the beta component associated to every vector of indices
		self.beta = {}

		for r in self._r_d_tuples:

			deg = sum(r)

			if deg <= self._R_p:

				self.beta[r] = alpha[r].dot(b) - gamma[r]

			elif deg <= (2*self._R_p):

				self.beta[r] = - gamma[r]

			else:

				raise Exception('coefficient for this combination of exponents not found!!')
Пример #8
0
    def build_observations(self, target_position):

        # observations for all the sensors at every time instant (each list)
        # REMARK: conversion to float is done so that the observations (when 1 or 0) are amenable to be used in later
        # computations
        self._observations = [
            np.array([
                sens.detect(state.to_position(s[:, np.newaxis]))
                for sens in self._sensors
            ],
                     dtype=float) for s in target_position.T
        ]
Пример #9
0
	def step(self, observations):

		# every particle is propagated for computing the first-stage weights
		auxiliar_state = self._state_transition_kernel.next_state(self._state)

		# for each sensor, we compute the likelihood of EVERY particle (position)
		likelihoods = np.array(
			[sensor.likelihood(obs, state.to_position(auxiliar_state)) for sensor, obs in zip(self._sensors, observations)])

		# careful with floating point arithmetic issues
		likelihoods += 1e-200

		loglikelihoods = np.log(likelihoods)

		# for each particle we store the product of the likelihoods for all the sensors multiplied by the number of PEs
		self.gamma = self._n_PEs*loglikelihoods.sum(axis=0)
Пример #10
0
	def actual_sampling_step(self, observations):

		# resampling of the particles from the previous time instant (with a RandomState object that should be
		# synchronized across different PEs
		i_sampled_particles = self._resampling_algorithm.get_indexes(self.weights)

		# ...the resulting particles
		self._state, self.norm_constants = self.rejection_sampling(self._state[:, i_sampled_particles])

		likelihoods = np.prod([sensor.likelihood(obs, state.to_position(self._state))
		                       for sensor, obs in zip(self._sensors, observations)], axis=0)

		# in order to avoid numerical precision problems
		likelihoods += 1e-200

		self.loglikelihoods = np.log(likelihoods)
Пример #11
0
	def process_frame(self, target_position, target_velocity):

		# let the super class do its thing...
		super().process_frame(target_position, target_velocity)

		# this array will store the results before they are saved
		estimated_pos = np.full((state.n_elements_position, self._n_time_instants, len(self._estimators)), np.nan)

		# for every PF (different from estimator)...
		for pf in self._PFs:
			# ...initialization
			pf.initialize()

		for iTime in range(self._n_time_instants):

			print(colorama.Fore.LIGHTWHITE_EX + '---------- iFrame = {}, iTime = {}'.format(self._i_current_frame,
			                                                                                iTime) + colorama.Style.RESET_ALL)

			print(colorama.Fore.CYAN + 'position:\n' + colorama.Style.RESET_ALL, target_position[:, iTime:iTime + 1])
			print(colorama.Fore.YELLOW + 'velocity:\n' + colorama.Style.RESET_ALL, target_velocity[:, iTime:iTime + 1])

			# for every PF (different from estimator)...
			for pf in self._PFs:
				# ...a step is taken
				pf.step(self._observations[iTime])

			# for every estimator, along with its corresponding label,...
			for iEstimator, (estimator, label) in enumerate(zip(self._estimators, self._estimators_labels)):
				# for the sake of efficiency
				current_estimated_pos = state.to_position(estimator.estimate())

				self._estimated_pos[:, iTime:iTime + 1, self._i_current_frame, iEstimator] = current_estimated_pos

				# the position given by this estimator at the current time instant is written to the HDF5 file
				estimated_pos[:, iTime:iTime + 1, iEstimator] = current_estimated_pos

				print('position estimated by {}\n'.format(label),
				      self._estimated_pos[:, iTime:iTime + 1, self._i_current_frame, iEstimator])

		# the results (estimated positions) are saved
		self._h5_current_frame.create_dataset(
			'estimated position', shape=estimated_pos.shape, dtype=float, data=estimated_pos)

		# in order to make sure the HDF5 files is valid...
		self._f.flush()
Пример #12
0
	def step(self, observations):

		assert len(observations) == len(self._sensors)

		# the particles are propagated with a *fake* RandomState that always returns 0's
		predictions = self._state_transition_kernel.next_state(self._state, self._fake_random_state)

		# for each sensor, we compute the likelihood of EVERY predicted particle (position)
		predictions_likelihoods = np.array(
			[sensor.likelihood(obs, state.to_position(predictions)) for sensor, obs in
			 zip(self._sensors, observations)])

		# + 1e-200 in order to avoid division by zero
		predictions_likelihoods_product = predictions_likelihoods.prod(axis=0) + 1e-200

		# first-stage weights in Auxiliary Particle Filter
		sampling_weights = predictions_likelihoods_product / predictions_likelihoods_product.sum()

		i_particles_resampled = self._resampling_algorithm.get_indexes(sampling_weights)

		# every particle is updated (previous state is not stored...)
		self._state = self._state_transition_kernel.next_state(self._state[:, i_particles_resampled])

		# for each sensor, we compute the likelihood of EVERY particle (position)
		likelihoods = np.array([
				sensor.likelihood(obs, state.to_position(self._state))
				for sensor, obs in zip(self._sensors, observations)
				])

		# careful with floating point arithmetic issues
		likelihoods += 1e-200

		loglikelihoods = np.log(likelihoods)

		# for each particle, we compute the product of the likelihoods for all the sensors
		loglikelihoods_product = loglikelihoods.sum(axis=0)

		# every likelihood is exponentiated by the estimate of the number of PEs
		# REMARK: exponentiating the argument of the logaritm is tantamount to multiplying the logarithm
		self._log_weights = loglikelihoods_product*self.estimated_n_PEs - np.log(
			predictions_likelihoods_product[i_particles_resampled])

		# normalization
		self.normalize_weights_and_update_aggregated()

		# -------------------------

		# we compute the mean...
		self._mean = (self.weights[np.newaxis, :] * self.samples).sum(axis=1)

		# ...and (weighted) covariance
		self._covariance = np.cov(self.samples, ddof=0, aweights=self.weights)

		# if the matrix is singular (this happens when a single particle accumulates most of the weight)
		if np.isnan(np.linalg.cond(self._covariance)) or np.linalg.cond(self._covariance) > (1 / sys.float_info.epsilon):

			# the (weighted) self._covariance matrix being zero does NOT self._mean there is no uncertainty about the random vector.
			# On the contrary, it *most likely* means that ALL of the likelihoods are really small, which results in a
			# single (arbitrary, due to numeric precision) weight concentrating all the mass (corresponding to the
			# biggest likelihood that may be 10^-12...)
			self._covariance += np.identity(state.n_elements)*self._default_variance

		# we try...
		try:

			# ...to invert the self._covariance matrix
			inv_covariance = np.linalg.inv(self._covariance)

		# it it's not possible (singular)...
		except np.linalg.linalg.LinAlgError:

			# an epsilon is added to the diagonal of the self._covariance matrix
			inv_covariance = np.linalg.inv(self._covariance + np.identity(self._covariance.shape[0])*1e-9)

		# Q and nu are updated
		self._Q, self._nu = inv_covariance, inv_covariance @ self._mean
    def pos(self):

        return state.to_position(self._state)
Пример #14
0
    def process_frame(self, target_position, target_velocity):

        # let the super class do its thing...
        super().process_frame(target_position, target_velocity)

        for iTopology, (pf, distributed_pf) in enumerate(
                zip(self._PFsForTopologies,
                    self._distributedPFsForTopologies)):

            n_PEs = self._settings_topologies[iTopology]['number of PEs']

            # the last dimension is for the number of algorithms (centralized and distributed)
            estimated_pos = np.full(
                (state.n_elements_position, self._n_time_instants, 2), np.nan)

            aggregated_weights = np.full((self._n_time_instants, n_PEs),
                                         np.nan)

            # initialization of the particle filters
            pf.initialize()
            distributed_pf.initialize()

            for iTime in range(self._n_time_instants):

                print('---------- iFrame = {}, iTopology = {}, iTime = {}'.
                      format(self._i_current_frame, iTopology, iTime))

                print('position:\n', target_position[:, iTime:iTime + 1])
                print('velocity:\n', target_velocity[:, iTime:iTime + 1])

                # particle filters are updated
                pf.step(self._observations[iTime])
                distributed_pf.step(self._observations[iTime])

                # the mean computed by the centralized and distributed PFs
                centralizedPF_mean, distributedPF_mean = pf.compute_mean(
                ), distributed_pf.compute_mean()

                estimated_pos[:, iTime:iTime + 1,
                              0] = state.to_position(centralizedPF_mean)
                estimated_pos[:, iTime:iTime + 1,
                              1] = state.to_position(distributedPF_mean)

                self._centralizedPF_pos[:, iTime:iTime + 1,
                                        self._i_current_frame,
                                        iTopology] = state.to_position(
                                            centralizedPF_mean)
                self._distributedPF_pos[:, iTime:iTime + 1,
                                        self._i_current_frame,
                                        iTopology] = state.to_position(
                                            distributedPF_mean)

                # the aggregated weights of the different PEs in the distributed PF are stored
                self._distributedPFaggregatedWeights[iTopology][
                    iTime, :,
                    self._i_current_frame] = distributed_pf.aggregated_weights
                aggregated_weights[
                    iTime, :] = distributed_pf.aggregated_weights

                print('centralized PF\n', centralizedPF_mean)
                print('distributed PF\n', distributedPF_mean)

            # data is saved
            h5_estimated_pos = self._h5_current_frame.create_dataset(
                'topology/{}/estimated position'.format(iTopology),
                shape=estimated_pos.shape,
                dtype=float,
                data=estimated_pos)

            h5_estimated_pos.attrs['M'] = n_PEs

            self._h5_current_frame.create_dataset(
                'topology/{}/DPF aggregated weights'.format(iTopology),
                aggregated_weights.shape,
                dtype=float,
                data=aggregated_weights)