def ensemble_forward(self, x, noise_set_count): noise_set = self.sample_noise_set(noise_set_count) codes = self.encoder(noise_set) l1_b1 = self.W1( codes ) # codes[0]: encoded info for layer 1, l1: main net layer1 weights l1, b1 = l1_b1[0], l1_b1[1] l2_b2 = self.W2( codes ) # codes[1]: encoded info for layer 2, l2: main net layer2 weights l2, b2 = l2_b2[0], l2_b2[1] l1_concat = l1.reshape(-1, l1.shape[2]) b1_concat = b1.flatten() b2_concat = b2.flatten() l2_concat = l2.reshape(b2_concat.shape[0], -1) x = F.linear(x, l1_concat) + b1_concat x = self.relu(x) x = x.reshape(-1, l1.shape[1]) x = F.linear(x, l2_concat) + b2_concat x = self.tanh(x) mask = utils.block_diag(torch.ones(x.shape[0], 1, self.n_actions)).to(self.gpu) x = x * mask x = torch.sum(x, dim=0) x = x.reshape(-1, self.n_actions) return x
def tip_prior_full(prior): # This is yet to be properly defined. For now it will create the TIP prior and # prior just contains the size of the array - this function will be replaced with # the real code when we know what the priors look like. x_prior, c_prior, c_inv_prior = tip_prior() n_pixels = prior['n_pixels'] mean = np.array([x_prior for i in xrange(n_pixels)]).flatten() c_inv_prior_mat = [c_inv_prior for n in xrange(n_pixels)] prior_cov_inverse = block_diag(c_inv_prior_mat, dtype=np.float32) return mean, prior_cov_inverse
def integrate_distribution(self, v_dist, x=None): r""" Integrates the GMM velocity distribution to a distribution over position. The Kalman Equations are used. .. math:: \mu_{t+1} =\textbf{F} \mu_{t} .. math:: \mathbf{\Sigma}_{t+1}={\textbf {F}} \mathbf{\Sigma}_{t} {\textbf {F}}^{T} .. math:: \textbf{F} = \left[ \begin{array}{cccc} \sigma_x^2 & \rho_p \sigma_x \sigma_y & 0 & 0 \\ \rho_p \sigma_x \sigma_y & \sigma_y^2 & 0 & 0 \\ 0 & 0 & \sigma_{v_x}^2 & \rho_v \sigma_{v_x} \sigma_{v_y} \\ 0 & 0 & \rho_v \sigma_{v_x} \sigma_{v_y} & \sigma_{v_y}^2 \\ \end{array} \right]_{t} :param v_dist: Joint GMM Distribution over velocity in x and y direction. :param x: Not used for SI. :return: Joint GMM Distribution over position in x and y direction. """ p_0 = self.initial_conditions['pos'].unsqueeze(1) ph = v_dist.mus.shape[-3] sample_batch_dim = list(v_dist.mus.shape[0:2]) pos_dist_sigma_matrix_list = [] pos_mus = p_0[:, None] + torch.cumsum(v_dist.mus, dim=2) * self.dt vel_dist_sigma_matrix = v_dist.get_covariance_matrix() pos_dist_sigma_matrix_t = torch.zeros(sample_batch_dim + [v_dist.components, 2, 2], device=self.device) for t in range(ph): vel_sigma_matrix_t = vel_dist_sigma_matrix[:, :, t] full_sigma_matrix_t = block_diag( [pos_dist_sigma_matrix_t, vel_sigma_matrix_t]) pos_dist_sigma_matrix_t = self.F[..., :2, :].matmul( full_sigma_matrix_t.matmul(self.F_t)[..., :2]) pos_dist_sigma_matrix_list.append(pos_dist_sigma_matrix_t) pos_dist_sigma_matrix = torch.stack(pos_dist_sigma_matrix_list, dim=2) return GMM2D.from_log_pis_mus_cov_mats(v_dist.log_pis, pos_mus, pos_dist_sigma_matrix)
def no_propagation(x_analysis, P_analysis, P_analysis_inverse, M_matrix, Q_matrix, prior=None, state_propagator=None, date=None): """No propagation. In this case, we return the original prior. As the information filter behaviour is the standard behaviour in KaFKA, we only return the inverse covariance matrix. **NOTE** the input parameters are there to comply with the API, but are **UNUSED**. Parameters ----------- x_analysis : array The analysis state vector. This comes either from the assimilation or directly from a previoulsy propagated state. P_analysis : 2D sparse array The analysis covariance matrix (typically will be a sparse matrix). As this is an information filter update, you will typically pass `None` to it, as it is unused. P_analysis_inverse : 2D sparse array The INVERSE analysis covariance matrix (typically a sparse matrix). M_matrix : 2D array The linear state propagation model. Q_matrix: 2D array (sparse) The state uncertainty inflation matrix that is added to the covariance matrix. Returns ------- x_forecast (forecast state vector), `None` and P_forecast_inverse (forecast inverse covariance matrix)""" x_prior, c_prior, c_inv_prior = tip_prior() n_pixels = len(x_analysis) / 7 x_forecast = np.array([x_prior for i in xrange(n_pixels)]).flatten() c_inv_prior_mat = [c_inv_prior for n in xrange(n_pixels)] P_forecast_inverse = block_diag(c_inv_prior_mat, dtype=np.float32) return x_forecast, None, P_forecast_inverse
def advance(self, x_analysis, P_analysis, P_analysis_inverse): """Advance the state""" LOG.info("Pinty-fying the prior") # Defining the prior sigma = np.array([0.12, 0.7, 0.0959, 0.15, 1.5, 0.2, 0.5]) # broadly TLAI 0->7 for 1sigma x0 = np.array([0.17, 1.0, 0.1, 0.7, 2.0, 0.18, np.exp(-0.5 * 1.5)]) x0 = np.array([x0 for i in xrange(n_pixels)]).flatten() # The individual covariance matrix little_p = np.diag(sigma**2).astype(np.float32) little_p[5, 2] = 0.8862 * 0.0959 * 0.2 little_p[2, 5] = 0.8862 * 0.0959 * 0.2 inv_p = np.linalg.inv(little_p) xlist = [inv_p for m in xrange(n_pixels)] P_forecast_inverse = block_diag(xlist, dtype=np.float32) P_forecast = None x_forecast = x_analysis LOG.info("Pinty-fied!") return x_forecast, P_forecast, P_forecast_inverse
def hessian_correction(gp, x0, R_mat, innovation, mask, state_mask, band, nparams): """Calculates higher order Hessian correction for the likelihood term. Needs the GP, the Observational uncertainty, the mask....""" if not hasattr(gp, "hessian"): # The observation operator does not provide a Hessian method. We just # return 0, meaning no Hessian correction. return 0. C_obs_inv = R_mat.diagonal()[state_mask.flatten()] mask = mask[state_mask].flatten() little_hess = [] for i, (innov, C, m) in enumerate(zip(innovation, C_obs_inv, mask)): if not m: # Pixel is masked hessian_corr = np.zeros((nparams, nparams)) else: # Get state for current pixel x0_pixel = x0.squeeze()[(nparams * i):(nparams * (i + 1))] # Calculate the Hessian correction for this pixel hessian_corr = m * hessian_correction_pixel( gp, x0_pixel, C, innov, band, nparams) little_hess.append(hessian_corr) hessian_corr = block_diag(little_hess) return hessian_corr
def propagate_information_filter_LAI(x_analysis, P_analysis, P_analysis_inverse, M_matrix, Q_matrix, prior=None, state_propagator=None, date=None): x_forecast = M_matrix.dot(x_analysis) x_prior, c_prior, c_inv_prior = tip_prior() n_pixels = len(x_analysis) / 7 x0 = np.array([x_prior for i in xrange(n_pixels)]).flatten() x0[6::7] = x_forecast[6::7] # Update LAI print "LAI:", -2 * np.log(x_forecast[6::7]) lai_post_cov = P_analysis_inverse.diagonal() c_inv_prior_mat = [] for n in xrange(n_pixels): c_inv_prior[6, 6] = lai_post_cov[n] c_inv_prior_mat.append(c_inv_prior) P_forecast_inverse = block_diag(c_inv_prior_mat, dtype=np.float32) return x0, None, P_forecast_inverse
n_params=7) # Defining the prior sigma = np.array([0.12, 0.7, 0.0959, 0.15, 1.5, 0.2, 0.5]) # broadly TLAI 0->7 for 1sigma x0 = np.array([0.17, 1.0, 0.1, 0.7, 2.0, 0.18, np.exp(-0.5 * 1.5)]) x0 = np.array([x0 for i in xrange(n_pixels)]).flatten() # The individual covariance matrix little_p = np.diag(sigma**2).astype(np.float32) little_p[5, 2] = 0.8862 * 0.0959 * 0.2 little_p[2, 5] = 0.8862 * 0.0959 * 0.2 inv_p = np.linalg.inv(little_p) xlist = [inv_p for m in xrange(n_pixels)] P_forecast_inv = block_diag(xlist, dtype=np.float32) Q = np.ones(n_pixels * 7) * 0.0001 Q[6::7] = 0.0001 # TLAI kalman.set_trajectory_model(tilewidth, tilewidth) #(( 512, 512) kalman.set_trajectory_uncertainty(Q, tilewidth, tilewidth) # 512, 512) # Need to set the trajectory model and uncertainty inflation # Prior needs to be reorganised to be block diagonal kalman.run(x0, None, P_forecast_inv, diag_str="half_pinty", approx_diagonal=True, refine_diag=False,