def update_lfp(self, new_lfp, t, x=None): if x is not None: self.x = x self.spatial_cov.reset_x(x) self.t = t for tcov in self.temporal_cov_list: tcov.t = t self.lfp = np.atleast_3d(new_lfp)
def hess_func(*args): result = anp.atleast_3d( elementwise_hess(argwrapper)(anp.array( anp.broadcast_arrays(*args)))) # Put 'hessian' axes at end axes = list(range(len(result.shape))) result = result.transpose(*chain(axes[2:], axes[0:2])) return result
def hess_func(*args): # Note we're mixing anp with np calls here, on purpose result = anp.atleast_3d( elementwise_hess(argwrapper)(np.array(np.broadcast_arrays(*args), dtype=np.float))) # Put 'hessian' axes at end axes = list(range(len(result.shape))) result = result.transpose(*chain(axes[2:], axes[0:2])) return result
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple: """ Computes the Expected Improvement and its derivative. We use autograd [TODO cite] to find the gradient. :param x: points where the acquisition is evaluated. """ if not AUTOGRAD: raise NotImplementedError() elif self.grad_fun is None: self.grad_fun = egrad(self._l2_ei) y_min = ((self.model.Y - self.target)**2).sum(axis=1).min() k = self.target.shape[-1] # Values and derivatives for GP mean and variance w.r.t. input means, variances = self.model.predict(x) if variances.shape[-1] < k: variances = np.repeat(variances, k, axis=1) dmean_dx, dvariance_dx = self.model.model.predictive_gradients(x) if dvariance_dx.ndim == 2: dvariance_dx = np.atleast_3d(dvariance_dx) dvariance_dx = np.repeat(dvariance_dx, k, axis=2) # Values and derivatives for Expected Improvement w.r.t. mean and # variances ei = self._l2_ei((means, variances), y_min) dei_dmean, dei_dvariance = self.grad_fun((means, variances), y_min) # Derivatives for Expected Improvement w.r.t. input dei_dx = np.zeros_like(x) for i in range(len(dei_dx)): dei_dmean_dx = np.dot(dmean_dx[i], dei_dmean[i]) dei_dvariance_dx = np.dot(dvariance_dx[i], dei_dvariance[i]) dei_dx[i] = dei_dmean_dx + dei_dvariance_dx return np.atleast_2d(ei).T, dei_dx
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple: """ Computes the negative lower confidence bound and its derivative. We use autograd [TODO cite] to find the gradient. :param x: points where the acquisition is evaluated. """ if not AUTOGRAD: raise NotImplementedError() elif self.grad_fun is None: self.grad_fun = egrad(self._l2_bound) # Values and derivatives for GP mean and variance w.r.t. input means, variances = self.model.predict(x) k = means.shape[-1] if variances.shape[-1] < k: variances = np.repeat(variances, k, axis=1) dmean_dx, dvariance_dx = self.model.model.predictive_gradients(x) if dvariance_dx.ndim == 2: dvariance_dx = np.atleast_3d(dvariance_dx) dvariance_dx = np.repeat(dvariance_dx, k, axis=2) # Values and derivatives for confidence bound w.r.t. mean and # variances bound = self._l2_bound((means, variances)) dbound_dmean, dbound_dvariance = self.grad_fun((means, variances)) # Derivatives for confidence bound w.r.t. input dbound_dx = np.zeros_like(x) for i in range(len(dbound_dx)): dbound_dmean_dx = np.dot(dmean_dx[i], dbound_dmean[i]) dbound_dvariance_dx = np.dot(dvariance_dx[i], dbound_dvariance[i]) dbound_dx[i] = dbound_dmean_dx + dbound_dvariance_dx return np.atleast_2d(bound).T, dbound_dx
def hess_func(*args): result = anp.atleast_3d(elementwise_hess(argwrapper)(anp.array(anp.broadcast_arrays(*args)))) # Put 'hessian' axes at end axes = list(range(len(result.shape))) result = result.transpose(*chain(axes[2:], axes[0:2])) return result
def __init__(self, lfp, x, t, a1=None, b1=None, a2=None, b2=None, ngl1=20, ngl2=60, spatial_cov=None, temporal_cov_list=None, R_prior=None, sig2n_prior=None, eps=None): """ :param lfp: LFP array, shape (n_spatial_lfp, n_time, n_trials); recommend rescaling to approximately std dev = 1 :param x: LFP observed spatial locations, shape (n_spatial_lfp, 2), in microns :param t: LFP observed time points, shape (n_time, 1), in milliseconds :param a1: Edge of range for integration in first spatial direction (defaults to np.min(x[:, 0])) :param b1: Edge of range for integration in first spatial direction (defaults to np.max(x[:, 0])) :param a2: Edge of range for integration in second spatial direction (defaults to np.min(x[:, 1])) :param b2: Edge of range for integration in second spatial direction (defaults to np.max(x[:, 1])) :param ngl1: order of Gauss-Legendre integration in first spatial direction (defaults to 100) :param ngl2: order of Gauss-Legendre integration in second spatial direction (defaults to 100) :param spatial_cov: Instance of GPCSD2DSpatialCovSE :param temporal_cov_list: list of instances of temporal covariance objects (GPCSDTemporalCovSE or GPCSDTemporalCovMatern) :param R_prior: Instance of a prior for R (defaults to GPCSDInvGammaPrior) :param sig2n_prior: Instance of a prior for noise variance (defaults to GPCSDHalfNormalPrior) """ lfp = np.atleast_3d(lfp) self.lfp = lfp self.x = x self.t = t if a1 is None: a1 = np.min(x[:, 0]) if b1 is None: b1 = np.max(x[:, 0]) self.a1 = a1 self.b1 = b1 if a2 is None: a2 = np.min(x[:, 1]) if b2 is None: b2 = np.max(x[:, 1]) self.a2 = a2 self.b2 = b2 self.ngl1 = ngl1 self.ngl2 = ngl2 if spatial_cov is None: spatial_cov = GPCSD2DSpatialCovSE(self.x, a1=a1, b1=b1, a2=a2, b2=b2, ngl1=ngl1, ngl2=ngl2) self.spatial_cov = spatial_cov if temporal_cov_list is None: temporal_cov_list = [ GPCSDTemporalCovSE(t), GPCSDTemporalCovMatern(t) ] self.temporal_cov_list = temporal_cov_list x1, x2 = reduce_grid(x) min_delta_x = np.min( [np.min(np.diff(x1.squeeze())), np.min(np.diff(x2.squeeze()))]) max_delta_x = np.max([b1 - a1, b2 - a2]) if R_prior is None: R_prior = GPCSDInvGammaPrior() R_prior.set_params(min_delta_x, 0.5 * max_delta_x) self.R = { 'value': R_prior.sample(), 'prior': R_prior, 'min': 0.5 * min_delta_x, 'max': 0.8 * max_delta_x } if eps is None: eps = 5 * min_delta_x self.eps = eps if sig2n_prior is None: sig2n_prior = GPCSDHalfNormalPrior(1.0) self.sig2n = { 'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 10.0 } elif isinstance(sig2n_prior, list): self.sig2n = { 'value': np.array([sp.sample() for sp in sig2n_prior]), 'prior': sig2n_prior, 'min': [1e-8] * len(sig2n_prior), 'max': [10.0] * len(sig2n_prior) } else: self.sig2n = { 'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 10.0 }
def __init__(self, lfp, x, t, a=None, b=None, ngl=100, spatial_cov=None, temporal_cov_list=None, R_prior=None, sig2n_prior=None): """ :param lfp: LFP array, shape (n_spatial, n_time, n_trials); recommend rescaling to approximately std dev = 1 :param x: LFP observed spatial locations shape (n_spatial, 1), in microns :param t: LFP observed time points, shape (n_time, 1), in milliseconds :param a: Edge of range for integration (defaults to np.min(x)) :param b: Edge of range for integration (defaults to np.max(x)) :param ngl: order of Gauss-Legendre integration (defaults to 100) :param spatial_cov: Instance of GPCSD1DSpatialCovSE :param temporal_cov_list: list of instances of temporal covariance objects (GPSDTemporalCovSE or GPCSDTemporalCovMatern) :param R_prior: Instance of a prior for R (defaults to GPCSDInvGammaPrior) :param sig2n_prior: Instance of a prior for noise variance (defaults to GPCSDHalfNormalPrior) """ self.lfp = np.atleast_3d(lfp) self.x = x self.t = t if a is None: a = np.min(x) if b is None: b = np.max(x) self.a = a self.b = b self.ngl = ngl if spatial_cov is None: spatial_cov = GPCSD1DSpatialCovSE(x, a=a, b=b, ngl=ngl) self.spatial_cov = spatial_cov if temporal_cov_list is None: temporal_cov_list = [ GPCSDTemporalCovSE(t), GPCSDTemporalCovMatern(t) ] self.temporal_cov_list = temporal_cov_list if R_prior is None: R_prior = GPCSDInvGammaPrior() R_prior.set_params( np.min(np.diff(self.x.squeeze())), 0.5 * (np.max(self.x.squeeze()) - np.min(self.x.squeeze()))) self.R = { 'value': R_prior.sample(), 'prior': R_prior, 'min': 0.5 * np.min(np.diff(self.x.squeeze())), 'max': 0.8 * (np.max(self.x) - np.min(self.x)) } if sig2n_prior is None: sig2n_prior = GPCSDHalfNormalPrior(0.1) self.sig2n = { 'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 0.5 } elif isinstance(sig2n_prior, list): self.sig2n = { 'value': np.array([sp.sample() for sp in sig2n_prior]), 'prior': sig2n_prior, 'min': [1e-8] * len(sig2n_prior), 'max': [0.5] * len(sig2n_prior) } else: self.sig2n = { 'value': sig2n_prior.sample(), 'prior': sig2n_prior, 'min': 1e-8, 'max': 0.5 }
eps=eps) gpcsd_gen.R['value'] = R_true gpcsd_gen.sig2n['value'] = sig2n_true gpcsd_gen.spatial_cov.params['ell1']['value'] = ellSE1_true gpcsd_gen.spatial_cov.params['ell2']['value'] = ellSE2_true gpcsd_gen.temporal_cov_list[0].params['ell']['value'] = elltSE_true gpcsd_gen.temporal_cov_list[0].params['sigma2']['value'] = sig2tSE_true gpcsd_gen.temporal_cov_list[1].params['ell']['value'] = elltM_true gpcsd_gen.temporal_cov_list[1].params['sigma2']['value'] = sig2tM_true # %% Generate CSD on dense spatial grid csd_dense, _ = gpcsd_gen.sample_prior(1, type="csd") csd_dense_rect = csd_dense.reshape((nz1, nz2, nt, -1)) # %% Pass through forward model to get LFP at sparse spatial grid lfp_sparse = np.atleast_3d( fwd_model_2d(csd_dense_rect, z1, z2, x_grid, R_true, gpcsd_gen.eps)) lfp_sparse += np.random.normal(0, np.sqrt(sig2n_true), lfp_sparse.shape) lfp_sparse_rect = lfp_sparse.reshape((nx1, nx2, nt, -1)) #%% Visualize plt.figure(figsize=(14, 10)) for ti in [0, 1, 2, 3]: plt.subplot(2, 4, ti + 1) plt.imshow(lfp_sparse_rect[:, :, ti, 0].T, aspect='auto', cmap='bwr', vmin=-np.nanmax(np.abs(lfp_sparse_rect[:, :, ti, 0])), vmax=np.nanmax(np.abs(lfp_sparse_rect[:, :, ti, 0]))) plt.xlabel('x1') plt.ylabel('x2') plt.title('LFP (t = %0.2f)' % t[ti])