def simulate_network_dynamics(self, ntrials, params, connectivity_relaxation=1 / 0.15, AR_coefficient=0.6, verbose=True): adj_mat = params['network'] time_step = params['time_step'] time_period = params['time_period'] connectivity_strength = params['connection_strength'] p = adj_mat.shape[0] relaxation_coef = -(AR_coefficient - 1) / time_step adj_mat *= connectivity_strength connectivity_relaxation_mat = connectivity_relaxation * np.ones((p, p)) connectivity_relaxation_mat[np.where(np.identity(p) == 1)] = 0 source_relaxation = relaxation_coef * np.ones((p, )) MA_constants = 10**15 * np.ones((p, )) self.dynamic_parameters = { "number_sources": p, "connectivity_weights": adj_mat, # connectivity matrix of n x n "connectivity_relaxations_constants": connectivity_relaxation_mat, "moving_average_time_constants": MA_constants, "relaxation_constants": source_relaxation } if verbose: print('Generating {:d} samples...'.format(ntrials), end='', flush=True) utility.tic() self.run_sampler(number_samples=ntrials) if verbose: print('done ({:0.2f} seconds).'.format(utility.toc())) t = self.time_meshgrid["time_range"] trials = self.samples ground_truth = np.zeros((int(time_period / time_step), p, p)) for i in range(0, p): for j in range(0, p): if i != j: ground_truth[:, i, j] = self.conn_function( t, connectivity_relaxation_mat, adj_mat, i, j) return (trials, ground_truth)
'network': adj_mat, 'connection_strength': connection_strength, 'time_step': time_step, 'time_period': time_period } """ Simulation settings. We generate <ntrials_train> trials to train the dynamic parameters on, and <ntrials_test> to learn the GP posterior. """ ntrials_train = 5 ntrials_test = 5 simulation = sim.integroDifferential_simulator() print('Generating {:d} simulation samples'.format(ntrials_train + ntrials_test)) utility.tic() (training_samples, testing_samples, ground_truth) = simulation.simulate_network_dynamics(ntrials_train, ntrials_test, simulation_params) utility.toc() """ Plot a few samples to see the generated time series. """ diagnostics.plot_samples(training_samples[0:3]) """ Simulation is done. Time to bake some cake! """ cake = gpcake.gpcake()
def run_analysis(self, data, onlyTrials=False, show_diagnostics=False): ## private functions def run_analysis_body(sample): sample_connectivity = np.zeros((nsources, nsources, nfrequencies)) x = self.__get_fourier_time_series(sample) Px = self.__get_modified_processes(x, dynamic_polynomials) observation_models = self.__get_observation_models(x, moving_average_kernels) for j in range(0, nsources): # target total_covariance_matrix = self.__get_total_covariance_matrix(covariance_matrices, observation_models, j) for i in range(0, nsources): # source if self.structural_constraint[i,j]: connectivity_kernel = self.__posterior_kernel_cij_temporal(observation_models[i], covariance_matrices[i][j], # check this! [i,j] or [j,i]?? total_covariance_matrix, Px[j]) sample_connectivity[i, j, :] = connectivity_kernel return sample_connectivity # def run_analysis_serial(data): connectivity = np.zeros((nsamples, nsources, nsources, nfrequencies)) for s_ix, sample in enumerate(data): connectivity[s_ix, :, :, :] = run_analysis_body(sample) return connectivity # def run_analysis_parallel(data): ## private function def run_analysis_parallel_wrapper(self, parallel_args_struct): return self.__run_analysis_body(sample=parallel_args_struct['sample'], moving_average_kernels=parallel_args_struct['MA'], covariance_matrix=parallel_args_struct['cov'], dynamic_polynomials=parallel_args_struct['dypoly']) # function body connectivity = np.zeros((nsamples, nsources, nsources, nfrequencies)) # Initialize parallelization from multiprocessing.dummy import Pool as ThreadPool pool = ThreadPool(processes=self.parallelthreads) parallel_args = [] for sample in data: #? parallel_args_struct = {} parallel_args_struct['sample'] = sample parallel_args_struct['MA'] = moving_average_kernels parallel_args_struct['cov'] = covariance_matrices parallel_args_struct['dypoly'] = dynamic_polynomials parallel_args += [parallel_args_struct] # Execute parallel computation parallel_results_list = pool.map(run_analysis_parallel_wrapper, parallel_args) # Collect results for i in range(0, nsamples): connectivity[i,:,:,:] = parallel_results_list[i] pool.close() pool.join() return connectivity # def run_analysis_parallel_flatloop(data): ## private function def posterior_kernel_cij_temporal_parwrap(parallel_args_struct): """ The bread-and-butter of the method. """ # unpack y = np.matrix(parallel_args_struct['Px_j']).T covariance_matrices = parallel_args_struct['cov'] observation_models = parallel_args_struct['obs_models'] i = parallel_args_struct['i'] j = parallel_args_struct['j'] total_cov_matrix = self.__get_total_covariance_matrix(covariance_matrices, observation_models, j, self.noise_level) c_ij = covariance_matrices * observation_models[i].H \ * matrix_division(divider = total_cov_matrix, divided = y, side = "left", cholesky = "no") return np.real(np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(np.array(c_ij).flatten()), axis = 0))) # body connectivity = np.zeros((nsamples, nsources, nsources, nfrequencies)) # Initialize parallelization from multiprocessing.dummy import Pool as ThreadPool pool = ThreadPool(processes=self.parallelthreads) parallel_iterable = [] for k, sample in enumerate(data): #? x = self.__get_fourier_time_series(sample) Px = self.__get_modified_processes(x, dynamic_polynomials) observation_models = self.__get_observation_models(x, moving_average_kernels) for j in range(0, nsources): for i in range(0, nsources): if self.structural_constraint[i,j]: parallel_args_struct = {} parallel_args_struct['cov'] = covariance_matrices[i] parallel_args_struct['k'] = k parallel_args_struct['obs_models'] = observation_models parallel_args_struct['i'] = i parallel_args_struct['j'] = j parallel_args_struct['Px_j'] = Px[j] parallel_iterable += [parallel_args_struct] # Execute parallel computation parallel_results_list = pool.map(posterior_kernel_cij_temporal_parwrap, parallel_iterable) # todo: properly unwrap this, instead of this ugly hack: for m, result in enumerate(parallel_results_list): k = parallel_iterable[m]['k'] i = parallel_iterable[m]['i'] j = parallel_iterable[m]['j'] connectivity[k,i,j,:] = result return connectivity ## function body of run_analysis() assert self.parameter_matrices != None, "Please specify the parameters of the causal response function first." dynamic_polynomials = self.__get_dynamic_polynomials() moving_average_kernels = self.__get_moving_average_kernels() covariance_matrices = self.__get_covariance_matrices() (nsamples, nsources, nfrequencies) = np.shape(np.asarray(data)) self.__get_frequency_range() if self.structural_constraint is None: self.structural_constraint = np.ones(shape=(nsources, nsources)) - np.eye(nsources) if show_diagnostics: print('GP CaKe parameters:') print('\nTime scales (nu_ij):') print(np.array(utility.fill_diagonal(utility.nested_map(lambda x: x[0], self.parameter_matrices), 0))) print('\nTime shifts (t_ij):') print(np.array(utility.fill_diagonal(utility.nested_map(lambda x: x[1], self.parameter_matrices), 0))) print('\nSpectral smoothing: (theta_ij)') print(np.array(utility.fill_diagonal(utility.nested_map(lambda x: x[2], self.parameter_matrices), 0))) print('\nNoise levels (sigma_i):') print(np.array(self.noise_vector)) print('\nConnectivity constraint (G_ij):') print(self.structural_constraint) utility.tic() connectivity = None if self.parallelthreads > 1 and not onlyTrials: print('Parallel implementation (p = {:d}).'.format(self.parallelthreads)) connectivity = run_analysis_parallel_flatloop(data) elif self.parallelthreads > 1 and onlyTrials: print('Parallel implementation (p = {:d}, over trials only).'.format(self.parallelthreads)) connectivity = run_analysis_parallel(data) else: #print('Serial implementation.') connectivity = run_analysis_serial(data) if show_diagnostics: utility.toc() return connectivity