def get_scene_sed(sky_coord, observations): """Get the SED at `position` in `img` in the scene. Function made for combined resolution images. Parameters ---------- sky_coord: tuple Center of the source scene: `scarlet.observation.Scene` The scene that the model lives in. observations: list of `~scarlet.observation.Observation` Observations to extract SED from. Returns ------- SED: `~numpy.array` SED for a single source """ sed = [] band = 0 for obs in observations: pixel = obs.get_pixel(sky_coord) sed.append(obs.images[:, np.int(pixel[0]), np.int(pixel[1])]) band += obs.B sed = np.array(sed) if np.all(sed <= 0): # If the flux in all bands is <=0, # the new sed will be filled with NaN values, # which will cause the code to crash later msg = "Zero or negative flux at y={0}, x={1}" raise SourceInitError(msg.format(*sky_coord)) return sed.reshape(-1)
def _fit_pixel_center(morph, center, window=None): cy, cx = np.int(center[0]), np.int(center[1]) if window is None: window = slice(cy - 2, cy + 3), slice(cx - 2, cx + 3) _morph = morph[window] yx0 = np.array([window[0].start, window[1].start]) return tuple(np.unravel_index(np.argmax(_morph), _morph.shape) + yx0)
def init_combined_extended_source(sky_coord, frame, observations, bg_rms, obs_idx=0, thresh=1., symmetric=True, monotonic=True): """Initialize the source that is symmetric and monotonic See `ExtendedSource` for a description of the parameters """ try: iter(observations) except TypeError: observations = [observations] # determine initial SED from peak position # SED in the frame for source detection seds = [] for obs in observations: _sed = get_psf_sed(sky_coord, obs, frame) seds.append(_sed) sed = np.concatenate(seds).flatten() if np.any(sed <= 0): # If the flux in all channels is <=0, # the new sed will be filled with NaN values, # which will cause the code to crash later msg = "Zero or negative SED {} at y={}, x={}".format(sed, *sky_coord) if np.all(sed <= 0): logger.warning(msg) else: logger.info(msg) morph, bg_cutoff = build_detection_coadd(seds[obs_idx], bg_rms[obs_idx], observations[obs_idx], thresh) # amplitude is in sed center = frame.get_pixel(sky_coord) # Apply the necessary constraints if symmetric: morph = operator.prox_uncentered_symmetry(morph, 0, center=center, algorithm="sdss") if monotonic: # use finite thresh to remove flat bridges prox_monotonic = operator.prox_strict_monotonic(morph.shape, use_nearest=False, center=center, thresh=.1) morph = prox_monotonic(morph, 0).reshape(morph.shape) # trim morph to pixels above threshold mask = morph > bg_cutoff if mask.sum() == 0: msg = "No flux above threshold={2} for source at y={0} x={1}" raise SourceInitError(msg.format(*center, bg_cutoff)) morph[~mask] = 0 # normalize to unity at peak pixel cy, cx = center center_morph = morph[np.int(cy), np.int(cx)] morph /= center_morph return sed, morph
def init_combined_extended_source(sky_coord, scene, observations, bg_rms, obs_idx=0, thresh=1., symmetric=True, monotonic=True): """Initialize the source that is symmetric and monotonic See `ExtendedSource` for a description of the parameters """ try: iter(observations) except TypeError: observations = [observations] # determine initial SED from peak position # SED in the scene for source detection sed = get_pixel_sed(sky_coord, observations) morph, bg_cutoff = build_detection_coadd(sed, bg_rms, observations[obs_idx], scene, thresh) # amplitude is in sed center = scene.get_pixel(sky_coord) # Apply the necessary constraints if symmetric: morph = operator.prox_uncentered_symmetry(morph, 0, center=center, use_soft=False) if monotonic: # use finite thresh to remove flat bridges prox_monotonic = operator.prox_strict_monotonic(morph.shape, use_nearest=False, center=center, thresh=.1) morph = prox_monotonic(morph, 0).reshape(morph.shape) # trim morph to pixels above threshold mask = morph > bg_cutoff if mask.sum() == 0: msg = "No flux above threshold={2} for source at y={0} x={1}" raise SourceInitError(msg.format(*center, bg_cutoff)) morph[~mask] = 0 # normalize to unity at peak pixel cy, cx = center center_morph = morph[np.int(cy), np.int(cx)] morph /= center_morph return sed, morph
def match_psfs(psf_hr, psf_lr, wcs_hr, wcs_lr): '''psf matching between different dataset Matches PSFS at different resolutions by interpolating psf_lr on the same grid as psf_hr Parameters ---------- psf_hr: array centered psf of the high resolution scene psf_lr: array centered psf of the low resolution scene wcs_hr: WCS object wcs of the high resolution scene wcs_lr: WCS object wcs of the low resolution scene Returns ------- psf_match_hr: array high rresolution psf at mactching size psf_match_lr: array low resolution psf at matching size and resolution ''' ny_hr, nx_hr = psf_hr.shape ny_lr, nx_lr = psf_lr.shape if np.size(wcs_hr.array_shape) == 2: wcs_hr.wcs.crval = 0., 0. wcs_hr.wcs.crpix = ny_hr / 2., nx_hr / 2. elif np.size(wcs_hr.array_shape) == 3: wcs_hr.wcs.crval = 0., 0., 0. wcs_hr.wcs.crpix = ny_hr / 2., nx_hr / 2., 0. if np.size(wcs_lr.array_shape) == 2: wcs_lr.wcs.crval = 0., 0. wcs_lr.wcs.crpix = ny_lr / 2., nx_lr / 2. elif np.size(wcs_lr.array_shape) == 3: wcs_lr.wcs.crval = 0., 0., 0. wcs_lr.wcs.crpix = ny_lr / 2., nx_lr / 2., 0 mask, p_lr, p_hr = match_patches(psf_hr.shape, psf_lr.data.shape, wcs_hr, wcs_lr) cmask = np.where(mask == 1) n_p = np.int((np.size(cmask[0]))**0.5) psf_match_lr = interpolation.sinc_interp(cmask, p_hr[::-1], (psf_lr).flatten()).reshape( n_p, n_p) psf_match_hr = psf_hr[np.int((ny_hr - n_p) / 2):np.int((ny_hr + n_p) / 2), np.int((nx_hr - n_p) / 2):np.int((nx_hr + n_p) / 2)] psf_match_hr /= np.max(psf_match_hr) psf_match_lr /= np.max(psf_match_lr) return psf_match_hr, psf_match_lr
def __init__(self, dimension, inputs, obs, mini_batch=False): """These functions implement a standard multi-layer perceptron, vectorized over both training examples and weight samples.""" self.dimension = dimension self.prior = FiniteDimensionalPrior(self.dimension) self.inputs = inputs self.inputs_size = len(inputs) self.obs = obs self.mini_batch = mini_batch if self.mini_batch: self.it = 0 self.mini_batch_size = 32 self.number_batchs = np.int( np.ceil(self.inputs_size / self.mini_batch_size)) self.inputs_all = np.copy(inputs) self.obs_all = np.copy(obs) self.inputs = inputs[:self.mini_batch_size] self.obs = obs[:self.mini_batch_size] self.gx = grad(self.cost) self.J = jacobian(self.forward) self.hx = hessian_vector_product(self.cost) self.hvp = hvp(self.hx)
def coupling_layer_specifications(num_hidden_units, num_hidden_layers, z_len): """ We specify the FNN based networks over here. A single network produce both s and t parts. Coupling Layer currently comprises of 2 transforms. """ d_1 = np.int(z_len // 2) d_2 = np.int(z_len - d_1) coupling_layer_sizes = [] coupling_layer_sizes.append([d_1] + num_hidden_layers * [num_hidden_units] + [2 * d_2]) coupling_layer_sizes.append([d_2] + num_hidden_layers * [num_hidden_units] + [2 * d_1]) return coupling_layer_sizes
def get_starlet_shape(shape, lvl=None): """ Get the pad shape for a starlet transform """ #Number of levels for the Starlet decomposition lvl_max = np.int(np.log2(np.min(shape[-2:]))) if (lvl is None) or lvl > lvl_max: lvl = lvl_max return int(lvl)
def BinarySplit(z, j): D = z.shape[-1] d = D//2 if D % 2 == 1: d += (np.int(j) % 2) return np.array_split(z, [d], -1)
def getSigSeriesG(sts, nt, a, mu, sig): # sts has shape T x M # the rest are numbers gaus = a*np.exp(-(np.arange(nt)-mu)**2/sig**2) nper = np.int(nt/sts.shape[0]) stsRepeated = np.vstack([np.repeat(sts,nper,axis=0),np.zeros((nt-nper*6,sts.shape[1]))]) return (stsRepeated.T*gaus).T
def _get_H_log_post(W1D, Wprior, H, y, X, testing=False): """Returns multinomial Hessian (total or independent between classes) of the negative log posterior probability with C classes. Parameters ---------- W1D : array-like, shape (C*p, ) Flattened vector of parameters at which the negative log posterior is to be evaluated Wprior : array-like, shape (C, p) vector of prior means on the parameters to be fit H : array-like, shape (C*p, C*p) or independent between classes (C, p, p) Array of prior Hessian (inverse covariance of prior distribution of parameters) y : array-like, shape (N, ) starting at 0 vector of binary ({0, 1, ... C} possible responses) X : array-like, shape (N, p) array of features Returns ------- H_log_post : array-like, shape like `H` Hessian of negative log posterior References ---------- Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012) Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006) """ # calculate Hessian log likelihood C, p = Wprior.shape W = W1D.reshape(C, p) mu = _get_softmax_probs(X, W) # shape (N, C) H_log_likelihood = np.zeros_like(H) if H.shape == (C, p, p): for c in range(C): s = mu[:, c] * (1 - mu[:, c]) H_log_likelihood[c] = X.T @ (X * s.reshape(-1, 1)) + H[c] # equals np.outer(X, X*S) elif H.shape == (C * p, C * p): for c1 in range(C): for c2 in range(c1, C): s = mu[:, c1] * (np.int(c1 == c2) - mu[:, c2]) m = X.T @ (X * s.reshape(-1, 1)) # equals np.outer(X, X*S) # c1, c2 sub-block H_log_likelihood[c1 * p:(c1 + 1) * p, c2 * p:(c2 + 1) * p] = m # H is symmetric H_log_likelihood[c2 * p:(c2 + 1) * p, c1 * p:(c1 + 1) * p] = m if H.shape == (C * p, C * p): H_log_post = H_log_likelihood + H if testing: return H_log_post, H_log_likelihood, H return H_log_post
def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() if height > 1: height = np.int(height) ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom', fontsize=10)
def contact_rate(self, t): if self.number_group == 1: return 1. else: # ###### definition of the contact rate # contact_full = np.block([ewm(contact_full, np.tile(1-high_risk_distribution, [number_age_group, 1])), # ewm(contact_full, np.tile(high_risk_distribution, [number_age_group, 1]))]) # contact_full = np.tile(contact_full, [number_risk_group, 1]) # contact_full = np.tile(contact_full, [2, 2]) # contact_full = 5*np.ones((number_group, number_group)) if self.calendar[np.int(np.floor(t))] == 2: contact = self.c_home + self.c_school + self.c_work + self.c_other elif self.calendar[np.int(np.floor(t))] == 1: contact = self.c_home + self.c_work + self.c_other else: contact = self.c_home + self.c_other # else: # contact = c_home + 0.1 * (c_work + c_other) # if calendar[np.int(np.floor(t))] == 1: # contact = c_home + 0.1*(c_work + c_other) # else: # contact = c_home # # construct contact matrix by splitting each age group into two by low and high risk proportion # contact = np.block([ewm(contact, np.tile(1 - high_risk_distribution, [number_age_group, 1])), # ewm(contact, np.tile(high_risk_distribution, [number_age_group, 1]))]) # contact = np.tile(contact, [number_risk_group, 1]) contact = np.tile(contact, [2, 2]) # constant contact # contact = 10*np.ones((number_group, number_group)) return contact
def get_callback_arg_dict(hparams): if hparams['advi_use'] is True: buffer_len = np.int( max( 0.01 * hparams['max_iters'] / hparams['advi_callback_iteration'], 2)) delta_results = collections.deque(maxlen=buffer_len) return {"delta_results": delta_results, "hparams": hparams} else: return {}
def _threshold(morph): """Find the threshold value for a given morphology """ _morph = morph[morph > 0] _bins = 50 # Decrease the bin size for sources with a small number of pixels if _morph.size < 500: _bins = max(np.int(_morph.size / 10), 1) if _bins == 1: return 0, _bins hist, bins = np.histogram(np.log10(_morph).reshape(-1), _bins) cutoff = np.where(hist == 0)[0] # If all of the pixels are used there is no need to threshold if len(cutoff) == 0: return 0, _bins return 10**bins[cutoff[-1]], _bins
def ess_compute_Z(diagonal, num_peds, robot_mu_x, robot_mu_y, \ ped_mu_x, ped_mu_y, cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, cov_ped_x, cov_ped_y, \ inv_cov_ped_x, inv_cov_ped_y, \ one_over_cov_sum_x, one_over_cov_sum_y, normalize): delta0 = [0. for _ in range(num_peds)] norm_delta0 = [0. for _ in range(num_peds)] norm_delta0_normalized = [0. for _ in range(num_peds)] T = np.size(robot_mu_x) # for var in range(np.size(var_x_ess)): for ped in range(num_peds): # if normalize == True: # normalize_x = np.multiply(np.power(2*np.pi,-0.5), one_over_std_sum_x) # normalize_y = np.multiply(np.power(2*np.pi,-0.5), one_over_std_sum_y) # else: normalize_x = 1. normalize_y = 1. vel_x = robot_mu_x - ped_mu_x[ped] vel_y = robot_mu_y - ped_mu_y[ped] vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) one_over_var_sum_x = np.diag(one_over_cov_sum_x[ped]) one_over_var_sum_y = np.diag(one_over_cov_sum_y[ped]) quad_x = np.multiply(one_over_var_sum_x, vel_x_2) quad_y = np.multiply(one_over_var_sum_y, vel_y_2) Z_x = np.multiply(normalize_x, np.exp(-0.5 * quad_x)) Z_y = np.multiply(normalize_y, np.exp(-0.5 * quad_y)) Z = np.multiply(Z_x, Z_y) norm_delta0[ped] = np.abs(np.sum(np.log1p(-Z))) norm_delta0_normalized = norm_delta0 / (np.sum(norm_delta0)) ess = 1. / np.sum(np.power(norm_delta0_normalized, 2)) ess = np.int(ess) top_Z_indices = np.argsort(norm_delta0_normalized)[::-1] return ess, top_Z_indices
def sinc_interp(coord_hr, coord_lr, sample_lr): ''' Parameters ---------- coord_hr: array (2xN) Coordinates of the high resolution grid coord_lr: array (2xM) Coordinates of the low resolution grid sample_lr: array (N) Sample at positions coord_hr Returns ------- result: interpolated samples at positions coord_hr ''' y_hr, x_hr = coord_hr y_lr, x_lr = coord_lr hy = np.abs(y_lr[1] - y_lr[0]) hx = np.abs(x_lr[np.int(np.sqrt(np.size(x_lr))) + 1] - x_lr[0]) assert hy != 0 return np.array([sample_lr * sinc2D((y_hr[:, np.newaxis] - y_lr) / (hy), (x_hr[:, np.newaxis] - x_lr) / (hx))]).sum(axis=2)
newBlf = scipy.sparse.coo_matrix( (vals, (rows, cols)), shape=(shape[0] * M, shape[1] * M)).tocsr() return newBlf if __name__ == "__main__": global img_file img_file = sys.argv[1] KS_file_name = sys.argv[2] Thickness_file_name = sys.argv[3] output_prefix = sys.argv[4] W_w = np.float(sys.argv[5]) W_sparse = np.float(sys.argv[6]) print 'W_sparse', W_sparse solve_choice = np.int(sys.argv[7]) W_spatial = np.float(sys.argv[8]) print 'W_spatial', W_spatial global save_for_application_path_prefix save_for_application_path_prefix = "./Application_Files/" W_neighbors = 0.0 if solve_choice == 3 or solve_choice == 6: #### solve per pixel with neighborhood info constraints W_neighbors = np.float(sys.argv[9]) print 'W_neighbors', W_neighbors START = time.time() img = np.asarray(Image.open(img_file).convert('RGB'))
def match(self, model_frame, coverage='union'): if self.frame.dtype != model_frame.dtype: self.images = self.images.copy().astype(model_frame.dtype) if type(self.weights) is np.ndarray: self.weights = self.weights.copy().astype(model_frame.dtype) if self.frame._psfs is not None: self.frame._psfs.update_dtype(model_frame.dtype) # channels of model that are represented in this observation self._band_slice = slice(None) if self.frame.channels is not model_frame.channels: bmin = model_frame.channels.index(self.frame.channels[0]) bmax = model_frame.channels.index(self.frame.channels[-1]) self._band_slice = slice(bmin, bmax + 1) # Affine transform try: model_affine = model_frame.wcs.wcs.pc except AttributeError: model_affine = model_frame.wcs.cd try: self_affine = self.frame.wcs.wcs.pc except AttributeError: self_affine = self.frame.wcs.cd model_pix = np.sqrt( np.abs(model_affine[0, 0]) * np.abs(model_affine[1, 1] - model_affine[0, 1] * model_affine[1, 0])) self_pix = np.sqrt( np.abs(self_affine[0, 0]) * np.abs(self_affine[1, 1] - self_affine[0, 1] * self_affine[1, 0])) # Pixel scale ratio self.h = self_pix / model_pix # Vector giving the direction of the x-axis of each frame self_framevector = np.sum(self_affine, axis=0)[:2] / self_pix model_framevector = np.sum(model_affine, axis=0)[:2] / model_pix # normalisation self_framevector /= np.sum(self_framevector**2)**0.5 model_framevector /= np.sum(model_framevector**2)**0.5 # sin of the angle between datasets (normalised cross product) self.sin_rot = np.cross(self_framevector, model_framevector) # cos of the angle. (normalised scalar product) self.cos_rot = np.dot(self_framevector, model_framevector) # Is the angle larger than machine precision? self.isrot = (np.abs(self.sin_rot)**2) > np.finfo(float).eps if not self.isrot: self.sin_rot = 0 self.cos_rot = 1 angle = None else: angle = (self.cos_rot, self.sin_rot) # Get pixel coordinates in each frame. coord_lr, coord_hr, coordhr_over = resampling.match_patches( model_frame.shape, self.frame.shape, model_frame.wcs, self.frame.wcs, isrot=self.isrot, coverage=coverage) # shape of the low resolutino image in the intersection or union self.lr_shape = ( np.max(coord_lr[0]) - np.min(coord_lr[0]) + 1, np.max(coord_lr[1]) - np.min(coord_lr[1]) + 1, ) # BBox of the low resolution pixels in model frame # 1) channels of model that are represented in this observation if self.frame.channels is not model_frame.channels: cmin = model_frame.channels.index(self.frame.channels[0]) cmax = model_frame.channels.index(self.frame.channels[-1]) else: cmin, cmax = 0, self.frame.C # 2) use the bounds of coord_lr self.bbox = Box.from_bounds( (cmin, cmax + 1), (np.min( coord_lr[0]).astype(int), np.max(coord_lr[0]).astype(int) + 1), (np.min( coord_lr[1]).astype(int), np.max(coord_lr[1]).astype(int) + 1), ) self.slices = self.bbox.slices_for(model_frame.shape) # Coordinates for all model frame pixels self.frame_coord = ( np.array(range(model_frame.Ny)), np.array(range(model_frame.Nx)), ) diff_psf = self.build_diffkernel(model_frame, angle) # 1D convolutions convolutions of the model are done along the smaller axis, therefore, # psf is convolved along the frame's longer axis. # the smaller frame axis: self.small_axis = self.frame.Nx <= self.frame.Ny self._fft_shape = fft._get_fft_shape( model_frame.psf, np.zeros(model_frame.shape), padding=3, axes=[-2, -1], max=True, ) self.diff_psf = fft.Fourier( fft._pad(diff_psf.image, self._fft_shape, axes=(1, 2))) center_y = np.int(self._fft_shape[0]/2.-(self._fft_shape[0]-model_frame.Ny)/2.) - \ ((self._fft_shape[0] % 2) != 0) * ((model_frame.Ny % 2) == 0) center_x = np.int(self._fft_shape[1]/2.-(self._fft_shape[1]-model_frame.Nx)/2.) - \ ((self._fft_shape[1] % 2) != 0) * ((model_frame.Nx % 2) == 0) if self.isrot: # Unrotated coordinates: Y_unrot = ((coord_hr[0] - center_y) * self.cos_rot + (coord_hr[1] - center_x) * self.sin_rot).reshape( self.lr_shape) X_unrot = ((coord_hr[1] - center_x) * self.cos_rot - (coord_hr[0] - center_y) * self.sin_rot).reshape( self.lr_shape) # Removing redundancy self.Y_unrot = Y_unrot[:, 0] self.X_unrot = X_unrot[0, :] if self.small_axis: self.shifts = [ self.Y_unrot * self.cos_rot, self.Y_unrot * self.sin_rot ] self.other_shifts = [ -self.sin_rot * self.X_unrot, self.cos_rot * self.X_unrot, ] else: self.shifts = [ -self.sin_rot * self.X_unrot, self.cos_rot * self.X_unrot, ] self.other_shifts = [ self.Y_unrot * self.cos_rot, self.Y_unrot * self.sin_rot, ] axes = (1, 2) # aligned case. else: axes = [int(not self.small_axis) + 1] self.shifts = np.array(coord_hr) self.shifts[0] -= center_y self.shifts[1] -= center_x self.other_shifts = np.copy(self.shifts) # Computes the resampling/convolution matrix resconv_op = self.sinc_shift(self.diff_psf, self.shifts, axes) self._resconv_op = np.array(resconv_op, dtype=self.frame.dtype) * self.h**2 if self.isrot: self._resconv_op = self._resconv_op.reshape( *self._resconv_op.shape[:2], -1) return self if self.small_axis: self._resconv_op = self._resconv_op.reshape( *self._resconv_op.shape[:2], -1) return self else: self._resconv_op = self._resconv_op.reshape( self._resconv_op.shape[0], -1, self._resconv_op.shape[-1]) return self
def match(self, model_frame): """ matches the observation to a frame Parameters ---------- model_frame: `Frame` Frame to match to the observation coord: `array` coordinates of the pixels in the frame to fit """ if self.frame.dtype != model_frame.dtype: self.images = self.images.copy().astype(model_frame.dtype) if type(self.weights) is np.ndarray: self.weights = self.weights.copy().astype(model_frame.dtype) if self.frame._psfs is not None: self.frame._psfs.update_dtype(model_frame.dtype) self.angle, self.h = interpolation.get_angles(self.frame.wcs, model_frame.wcs) # Is the angle larger than machine precision? self.isrot = (np.abs(self.angle[1]) ** 2) > np.finfo(float).eps if not self.isrot: self.angle = None # Get pixel coordinates in each frame. coord_lr, coord_hr = resampling.match_patches( self, model_frame, isrot=self.isrot ) # shape of the low resolution image in the intersection or union lr_shape = ( np.max(coord_lr[0]) - np.min(coord_lr[0]) + 1, np.max(coord_lr[1]) - np.min(coord_lr[1]) + 1, ) # Coordinates for all model frame pixels self.frame_coord = ( np.array(range(model_frame.Ny)), np.array(range(model_frame.Nx)), ) diff_psf, target = self.build_diffkernel(model_frame) # 1D convolutions convolutions of the model are done along the smaller axis, therefore, # psf is convolved along the frame's longer axis. # the smaller frame axis: self.small_axis = self.frame.Nx <= self.frame.Ny self._fft_shape = fft._get_fft_shape( target, np.zeros(model_frame.shape), padding=3, axes=[-2, -1], max=False, ) # Cutting diff_psf if needded and keeping the parity if (self._fft_shape[-2] < diff_psf.shape[-2]) or ( self._fft_shape[-1] < diff_psf.shape[-1] ): diff_psf = fft._centered( diff_psf, np.array([diff_psf.shape[0] + 1, *self._fft_shape]) - 1 ) self._diff_kernels = fft.Fourier( fft._pad(diff_psf.image, self._fft_shape, axes=(-2, -1)) ) center_y = ( np.int( self._fft_shape[0] / 2.0 - (self._fft_shape[0] - model_frame.Ny) / 2.0 ) + ((self._fft_shape[0] % 2) != 0) * ((model_frame.Ny % 2) == 0) + model_frame.origin[-2] ) center_x = ( np.int( self._fft_shape[1] / 2.0 - (self._fft_shape[1] - model_frame.Nx) / 2.0 ) - ((self._fft_shape[1] % 2) != 0) * ((model_frame.Nx % 2) == 0) + model_frame.origin[-1] ) if self.isrot: # Unrotated coordinates: Y_unrot = ( (coord_hr[0] - center_y) * self.angle[0] - (coord_hr[1] - center_x) * self.angle[1] ).reshape(lr_shape) X_unrot = ( (coord_hr[1] - center_x) * self.angle[0] + (coord_hr[0] - center_y) * self.angle[1] ).reshape(lr_shape) # Removing redundancy self.Y_unrot = Y_unrot[:, 0] self.X_unrot = X_unrot[0, :] if self.small_axis: self.shifts = np.array( [self.Y_unrot * self.angle[0], self.Y_unrot * self.angle[1]] ) self.other_shifts = np.array( [-self.angle[1] * self.X_unrot, self.angle[0] * self.X_unrot,] ) else: self.shifts = np.array( [-self.angle[1] * self.X_unrot, self.angle[0] * self.X_unrot,] ) self.other_shifts = np.array( [self.Y_unrot * self.angle[0], self.Y_unrot * self.angle[1],] ) axes = (1, 2) # aligned case. else: axes = [int(not self.small_axis) + 1] self.shifts = np.array(coord_hr) self.shifts[0] -= center_y self.shifts[1] -= center_x self.other_shifts = np.copy(self.shifts) # Computes the resampling/convolution matrix resconv_op = self.sinc_shift(self._diff_kernels, self.shifts, axes) self._resconv_op = np.array(resconv_op, dtype=self.frame.dtype) * self.h ** 2 if self.isrot: self._resconv_op = self._resconv_op.reshape(*self._resconv_op.shape[:2], -1) return self if self.small_axis: self._resconv_op = self._resconv_op.reshape(*self._resconv_op.shape[:2], -1) return self else: self._resconv_op = self._resconv_op.reshape( self._resconv_op.shape[0], -1, self._resconv_op.shape[-1] ) return self
for i in range(2): plt.close('all') cont_i = deceased[i] low_risk = np.ceil(cont_i[:9]) high_risk = np.ceil(cont_i[9:]) x = np.arange(len(labels)) # the label locations width = 0.35 # the width of the bars fig, ax = plt.subplots() rects1 = ax.bar(x - width / 2, low_risk, width, label='low risk') rects2 = ax.bar(x + width / 2, high_risk, width, label='high risk') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('# deaths', fontsize=16) title = titles[i] + ", total = " + str(np.int(np.sum(deceased[i]))) ax.set_title(title, fontsize=16) ax.set_xticks(x) ax.set_xticklabels(labels, fontsize=10) ax.legend(fontsize=16) autolabel(rects1) autolabel(rects2) fig.tight_layout() plt.savefig(filenames[i]) # plt.show() # plt.close() ########### plot the control measures
def d_ll(x, num_peds, ess, robot_mu_x, robot_mu_y, ped_mu_x, ped_mu_y, \ cov_robot_x, cov_robot_y, inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x, cov_ped_y, inv_cov_ped_x, inv_cov_ped_y, \ one_over_cov_sum_x, one_over_cov_sum_y, \ one_over_cov_sumij_x, one_over_cov_sumij_y, normalize, T): T = np.size(robot_mu_x) d_beta = [0. for _ in range(2*T*np.int(np.round(ess+1)))] d_llambda = np.asarray([0. for _ in range(2*T*np.int(np.round(ess+1)))]) # DERIVATIVE WRT ROBOT i = 2 for ped in range(ess): vel_x = np.tile(x[:T],(T,1)).T - np.tile(x[i*T:(i+1)*T],(T,1)) vel_y = np.tile(x[T:2*T],(T,1)).T - np.tile(x[(i+1)*T:(i+2)*T],(T,1)) vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) quad_x = np.multiply(one_over_cov_sum_x[ped], vel_x_2) quad_y = np.multiply(one_over_cov_sum_y[ped], vel_y_2) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) alpha_x = np.multiply(X, np.multiply(vel_x, one_over_cov_sum_x[ped])) alpha_y = np.multiply(X, np.multiply(vel_y, one_over_cov_sum_y[ped])) d_llambda[:T] = np.add(d_llambda[:T], np.sum(alpha_x, axis=1)) d_llambda[T:2*T] = np.add(d_llambda[T:2*T], np.sum(alpha_y, axis=1)) i = i + 2 d_beta[:T] = -np.dot(x[:T]-robot_mu_x, inv_cov_robot_x) d_beta[T:2*T] = -np.dot(x[T:2*T]-robot_mu_y, inv_cov_robot_y) d_llambda[0:2*T] = np.add(d_llambda[0:2*T], d_beta[0:2*T]) # DERIVATIVE WRT PED: ROBOT-PED DERIVATIVE, FIRST TERM i = 2 for ped in range(ess): vel_x = np.tile(x[:T],(T,1)) - np.tile(x[i*T:(i+1)*T],(T,1)).T vel_y = np.tile(x[T:2*T],(T,1)) - np.tile(x[(i+1)*T:(i+2)*T],(T,1)).T vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) quad_x = np.multiply(vel_x_2, one_over_cov_sum_x[ped]) quad_y = np.multiply(vel_y_2, one_over_cov_sum_y[ped]) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) alpha_x = np.multiply(X, np.multiply(vel_x, one_over_cov_sum_x[ped])) alpha_y = np.multiply(X, np.multiply(vel_y, one_over_cov_sum_y[ped])) d_llambda[i*T:(i+1)*T] = -np.sum(alpha_x, axis=1) d_llambda[(i+1)*T:(i+2)*T] = -np.sum(alpha_y, axis=1) i = i + 2 # DERIVATIVE WRT PED: PED-PED DERIVATIVE, SECOND TERM i = 2 j = 2 for ped_i in range(ess): for ped_j in range(ess): if i != j: vel_x = np.tile(x[i*T:(i+1)*T],(T,1)).T - np.tile(x[j*T:(j+1)*T], (T,1)) vel_y = np.tile(\ x[(i+1)*T:(i+2)*T],(T,1)).T - np.tile(x[(j+1)*T:(j+2)*T],(T,1)) vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) quad_x = np.multiply(vel_x_2, one_over_cov_sumij_x[ped_i][ped_j]) quad_y = np.multiply(vel_y_2, one_over_cov_sumij_y[ped_i][ped_j]) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) alpha_x = np.multiply(X, np.multiply(vel_x, \ one_over_cov_sumij_x[ped_i][ped_j])) alpha_y = np.multiply(X, np.multiply(vel_y, \ one_over_cov_sumij_y[ped_i][ped_j])) d_llambda[i*T:(i+1)*T] = np.add(d_llambda[i*T:(i+1)*T], \ np.sum(alpha_x, axis=1)) d_llambda[(i+1)*T:(i+2)*T] = np.add(d_llambda[(i+1)*T:(i+2)*T], \ np.sum(alpha_y, axis=1)) j = j + 2 i = i + 2 j = 2 # DERIVATIVE WRT PED: PED-PED DERIVATIVE, THIRD TERM i = 2 for ped in range(ess): d_beta[i*T:(i+1)*T] = -np.dot(x[i*T:(i+1)*T]-ped_mu_x[ped], \ inv_cov_ped_x[ped]) d_beta[(i+1)*T:(i+2)*T] = -np.dot(x[(i+1)*T:(i+2)*T]-ped_mu_y[ped], \ inv_cov_ped_y[ped]) i = i + 2 d_llambda[2*T:] = np.add(d_llambda[2*T:], d_beta[2*T:]) return -1.*d_llambda
def fo_ess_compute_newton(diagonal, num_peds, robot_mu_x, robot_mu_y, \ ped_mu_x, ped_mu_y, cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, cov_ped_x, cov_ped_y, \ inv_cov_ped_x, inv_cov_ped_y, \ one_over_cov_sum_x, one_over_cov_sum_y, normalize): delta0 = [0. for _ in range(num_peds)] norm_delta0 = [0. for _ in range(num_peds)] norm_delta0_normalized = [0. for _ in range(num_peds)] T = np.size(robot_mu_x) for ped in range(num_peds): x0 = np.zeros(4 * T) x0 = robot_mu_x x0 = np.concatenate((x0, robot_mu_y)) x0 = np.concatenate((x0, ped_mu_x[ped])) x0 = np.concatenate((x0, ped_mu_y[ped])) if diagonal: g_ll = fo_diag_ess.d_ll(x0, T, \ robot_mu_x, robot_mu_y, \ ped_mu_x[ped], ped_mu_y[ped], \ cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x[ped], cov_ped_y[ped], \ inv_cov_ped_x[ped], inv_cov_ped_y[ped], \ one_over_cov_sum_x[ped], one_over_cov_sum_y[ped], \ normalize) h_ll = fo_diag_ess.dd_ll(x0, T, \ robot_mu_x, robot_mu_y, \ ped_mu_x[ped], ped_mu_y[ped], \ cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x[ped], cov_ped_y[ped], \ inv_cov_ped_x[ped], inv_cov_ped_y[ped], \ one_over_cov_sum_x[ped], one_over_cov_sum_y[ped], \ normalize) else: g_ll = fo_dense_ess.d_ll(x0, T, \ robot_mu_x, robot_mu_y, \ ped_mu_x[ped], ped_mu_y[ped], \ cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x[ped], cov_ped_y[ped], \ inv_cov_ped_x[ped], inv_cov_ped_y[ped], \ one_over_cov_sum_x[ped], one_over_cov_sum_y[ped], \ normalize) h_ll = fo_dense_ess.dd_ll(x0, T, \ robot_mu_x, robot_mu_y, \ ped_mu_x[ped], ped_mu_y[ped], \ cov_robot_x, cov_robot_y, \ inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x[ped], cov_ped_y[ped], \ inv_cov_ped_x[ped], inv_cov_ped_y[ped], \ one_over_cov_sum_x[ped], one_over_cov_sum_y[ped], \ normalize) delta0[ped] = np.linalg.solve(h_ll, -g_ll) norm_delta0[ped] = np.linalg.norm(delta0[ped]) #############################MINIMIZE ON EACH AGENT # x0 = np.zeros(4*T) # x0 = robot_mu_x # x0 = np.concatenate((x0, robot_mu_y)) # x0 = np.concatenate((x0, ped_mu_x[ped])) # x0 = np.concatenate((x0, ped_mu_y[ped])) # f = sp.optimize.minimize(diag_ll_ess, x0, \ # args=(T, robot_mu_x, robot_mu_y, \ # ped_mu_x[ped], ped_mu_y[ped], \ # inv_cov_robot_x, inv_cov_robot_y, \ # inv_cov_ped_x[ped], inv_cov_ped_y[ped], \ # one_over_cov_sum_x[ped], one_over_cov_sum_y[ped], \ # one_over_std_sum_x[ped], one_over_std_sum_y[ped]), \ # method='trust-krylov',\ # jac=fo_diag_ess.d_ll, hess=so_diag_ess.dd_ll) # norm_delta0[ped] = np.linalg.norm(f.x[:T]-robot_mu_x) + \ # np.linalg.norm(f.x[T:2*T]-robot_mu_y) # norm_z_ess_normalized = np.divide(norm_z_ess, (np.sum(norm_z_ess))) # ess = 1./np.sum(np.power(norm_z_ess_normalized, 2)) # top_Z_indices = np.argsort(norm_z_ess_normalized)[::-1] norm_delta0_normalized = norm_delta0 / (np.sum(norm_delta0)) ess = np.power(np.sum(np.power(norm_delta0_normalized, 2)), -1) if np.isnan(ess): ess = 1. print(f"ESS IS 0 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") else: ess = np.int(ess) top_Z_indices = np.argsort(norm_delta0_normalized)[::-1] return ess, top_Z_indices
def dd_ll(x, num_peds, ess, robot_mu_x, robot_mu_y, ped_mu_x, ped_mu_y, \ cov_robot_x, cov_robot_y, inv_cov_robot_x, inv_cov_robot_y, \ cov_ped_x, cov_ped_y, inv_cov_ped_x, inv_cov_ped_y, \ one_over_cov_sum_x, one_over_cov_sum_y, \ one_over_cov_sumij_x, one_over_cov_sumij_y, normalize, T): T = np.size(robot_mu_x) H = np.zeros((2*T*np.int(ess+1),2*T*np.int(ess+1)), float) sum_alpha = [0. for _ in range(2*T*np.int(np.round(ess+1)))] # ROBOT DIAG AND OFF DIAG (ROBOT-PED) COMPUTATION i = 2 for ped in range(ess): vel_x = np.tile(x[:T],(T,1)).T - np.tile(x[i*T:(i+1)*T],(T,1)) vel_y = np.tile(x[T:2*T],(T,1)).T - np.tile(x[(i+1)*T:(i+2)*T],(T,1)) vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) vel_x_y = np.multiply(vel_x, vel_y) one_over_cov_x_y = np.multiply(one_over_cov_sum_x[ped], \ one_over_cov_sum_y[ped]) quad_x = np.multiply(one_over_cov_sum_x[ped], vel_x_2) quad_y = np.multiply(one_over_cov_sum_y[ped], vel_y_2) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) X_2 = np.power(X, 2) X_plus_X2 = np.add(X, X_2) alpha_x = np.multiply(X, one_over_cov_sum_x[ped]) alpha_x = np.add(alpha_x, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_x, one_over_cov_sum_x[ped]), 2))) alpha_y = np.multiply(X, one_over_cov_sum_y[ped]) alpha_y = np.add(alpha_y, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_y, one_over_cov_sum_y[ped]), 2))) sum_alpha[:T] = np.add(sum_alpha[:T], np.sum(alpha_x, axis=1)) sum_alpha[T:2*T] = np.add(sum_alpha[T:2*T], np.sum(alpha_y, axis=1)) d_off_alpha = -np.multiply(X_plus_X2, np.multiply(vel_x_y, \ one_over_cov_x_y)) # ROBOT OFF DIAG (ROBOT-PED) ENTRY H[:T,T:2*T] = np.add(H[:T,T:2*T], np.diag(np.sum(d_off_alpha, axis=1))) H[:T,i*T:(i+1)*T] = -1.*alpha_x H[i*T:(i+1)*T,:T] = H[:T,i*T:(i+1)*T].T H[T:2*T,(i+1)*T:(i+2)*T] = -1.*alpha_y H[(i+1)*T:(i+2)*T,T:2*T] = H[T:2*T,(i+1)*T:(i+2)*T].T H[T:2*T,i*T:(i+1)*T] = np.multiply(X_plus_X2, np.multiply(vel_x_y, \ one_over_cov_x_y)) H[i*T:(i+1)*T,T:2*T] = H[T:2*T,i*T:(i+1)*T].T H[:T,(i+1)*T:(i+2)*T] = np.multiply(X_plus_X2, np.multiply(vel_x_y, \ one_over_cov_x_y)) H[(i+1)*T:(i+2)*T,:T] = H[:T,(i+1)*T:(i+2)*T].T i = i + 2 # ROBOT DIAG ENTRY H[:T,:T] = np.add(np.diag(sum_alpha[:T]), -1.*inv_cov_robot_x) H[T:2*T,T:2*T] = np.add(np.diag(sum_alpha[T:2*T]), -1.*inv_cov_robot_y) H[T:2*T,:T] = H[:T,T:2*T].T # PED i-PED i DIAG COMPUTATION: ROBOT-PED i, FO TERMS, TAU-T INDEXING i = 2 for ped in range(ess): vel_x = np.tile(x[:T],(T,1)) - np.tile(x[i*T:(i+1)*T],(T,1)).T vel_y = np.tile(x[T:2*T],(T,1)) - np.tile(x[(i+1)*T:(i+2)*T],(T,1)).T vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) vel_x_y = np.multiply(vel_x, vel_y) one_over_cov_x_y = np.multiply(one_over_cov_sum_x[ped], \ one_over_cov_sum_y[ped]) quad_x = np.multiply(one_over_cov_sum_x[ped], vel_x_2) quad_y = np.multiply(one_over_cov_sum_y[ped], vel_y_2) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) X_2 = np.power(X, 2) X_plus_X2 = np.add(X, X_2) alpha_x = np.multiply(X, one_over_cov_sum_x[ped]) alpha_x = np.add(alpha_x, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_x, one_over_cov_sum_x[ped]), 2))) alpha_y = np.multiply(X, one_over_cov_sum_y[ped]) alpha_y = np.add(alpha_y, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_y, one_over_cov_sum_y[ped]), 2))) # PED i-PED i DIAG ENTRY, FO TERMS H[i*T:(i+1)*T,i*T:(i+1)*T] = np.diag(np.sum(alpha_x, axis=1)) - \ inv_cov_ped_x[ped] H[(i+1)*T:(i+2)*T,(i+1)*T:(i+2)*T] = np.diag(np.sum(alpha_y, axis=1)) - \ inv_cov_ped_y[ped] H[i*T:(i+1)*T,(i+1)*T:(i+2)*T] = -np.diag(np.sum(np.multiply(X_plus_X2, \ np.multiply(vel_x_y, one_over_cov_x_y)), axis=1)) H[(i+1)*T:(i+2)*T,i*T:(i+1)*T] = H[i*T:(i+1)*T,(i+1)*T:(i+2)*T].T i = i + 2 # PED i-PED i DIAG COMPUTATION: ROBOT TO PED i, SO TERMS, T-TAU INDEX # TAU,T INDEXING = np.tile(x[:T],(T,1)) - np.tile(x[i*T:(i+1)*T],(T,1)).T # T,TAU INDEX = np.tile(x[i*T:(i+1)*T],(T,1)).T - np.tile(x[j*T:(j+1)*T], (T,1)) i = 2 j = 2 for ped_i in range(ess): for ped_j in range(ess): if i != j: vel_x = np.tile(x[i*T:(i+1)*T],(T,1)).T - np.tile(x[j*T:(j+1)*T], (T,1)) vel_y = np.tile(\ x[(i+1)*T:(i+2)*T],(T,1)).T - np.tile(x[(j+1)*T:(j+2)*T],(T,1)) vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) vel_x_y = np.multiply(vel_x, vel_y) one_over_covij_x_y = np.multiply(one_over_cov_sumij_x[ped_i][ped_j], \ one_over_cov_sumij_y[ped_i][ped_j]) quad_x = np.multiply(one_over_cov_sumij_x[ped_i][ped_j], vel_x_2) quad_y = np.multiply(one_over_cov_sumij_y[ped_i][ped_j], vel_y_2) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) X_2 = np.power(X, 2) X_plus_X2 = np.add(X, X_2) alpha_x = np.multiply(X, one_over_cov_sumij_x[ped_i][ped_j]) alpha_x = np.add(alpha_x, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_x, one_over_cov_sumij_x[ped_i][ped_j]), 2))) alpha_y = np.multiply(X, one_over_cov_sumij_y[ped_i][ped_j]) alpha_y = np.add(alpha_y, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_y, one_over_cov_sumij_y[ped_i][ped_j]), 2))) H[i*T:(i+1)*T,i*T:(i+1)*T] = np.add(\ H[i*T:(i+1)*T,i*T:(i+1)*T], np.diag(np.sum(alpha_x, axis=1))) H[(i+1)*T:(i+2)*T,(i+1)*T:(i+2)*T] = np.add(\ H[(i+1)*T:(i+2)*T,(i+1)*T:(i+2)*T], np.diag(np.sum(alpha_y, axis=1))) d_off_alpha = np.multiply(X_plus_X2, \ np.multiply(vel_x_y, one_over_covij_x_y)) H[i*T:(i+1)*T,(i+1)*T:(i+2)*T] = np.add(\ H[i*T:(i+1)*T,(i+1)*T:(i+2)*T], -np.diag(np.sum(d_off_alpha, axis=1))) j = j + 2 H[(i+1)*T:(i+2)*T, i*T:(i+1)*T] = H[i*T:(i+1)*T,(i+1)*T:(i+2)*T] i = i + 2 j = 2 # PED i-PED j OFF DIAG COMPUTATION i = 2 j = 2 for ped_i in range(ess): for ped_j in range(ess): if i != j: vel_x = np.tile(x[i*T:(i+1)*T],(T,1)).T - np.tile(x[j*T:(j+1)*T], (T,1)) vel_y = np.tile(\ x[(i+1)*T:(i+2)*T],(T,1)).T - np.tile(x[(j+1)*T:(j+2)*T],(T,1)) vel_x_2 = np.power(vel_x, 2) vel_y_2 = np.power(vel_y, 2) vel_x_y = np.multiply(vel_x, vel_y) one_over_covij_x_y = np.multiply(one_over_cov_sumij_x[ped_i][ped_j], \ one_over_cov_sumij_y[ped_i][ped_j]) quad_x = np.multiply(one_over_cov_sumij_x[ped_i][ped_j], vel_x_2) quad_y = np.multiply(one_over_cov_sumij_y[ped_i][ped_j], vel_y_2) Z_x = np.exp(-0.5*quad_x) Z_y = np.exp(-0.5*quad_y) Z = np.multiply(Z_x, Z_y) X = np.divide(Z, 1.-Z) X_2 = np.power(X, 2) X_plus_X2 = np.add(X, X_2) alpha_x = np.multiply(X, one_over_cov_sumij_x[ped_i][ped_j]) alpha_x = np.add(alpha_x, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_x, one_over_cov_sumij_x[ped_i][ped_j]), 2))) alpha_y = np.multiply(X, one_over_cov_sumij_y[ped_i][ped_j]) alpha_y = np.add(alpha_y, -np.multiply(X_plus_X2, np.power(\ np.multiply(vel_y, one_over_cov_sumij_y[ped_i][ped_j]), 2))) alpha_x_y = np.multiply(\ X_plus_X2, np.multiply(vel_x_y, one_over_covij_x_y)) H[i*T:(i+1)*T,j*T:(j+1)*T] = -1.*alpha_x H[j*T:(j+1)*T,i*T:(i+1)*T] = H[i*T:(i+1)*T,j*T:(j+1)*T].T H[(i+1)*T:(i+2)*T,(j+1)*T:(j+2)*T] = -1.*alpha_y H[(j+1)*T:(j+2)*T,(i+1)*T:(i+2)*T] = \ H[(i+1)*T:(i+2)*T,(j+1)*T:(j+2)*T].T H[(i+1)*T:(i+2)*T,j*T:(j+1)*T] = alpha_x_y H[j*T:(j+1)*T,(i+1)*T:(i+2)*T] = H[(i+1)*T:(i+2)*T,(j)*T:(j+1)*T].T H[i*T:(i+1)*T,(j+1)*T:(j+2)*T] = alpha_x_y H[(j+1)*T:(j+2)*T,i*T:(i+1)*T] = H[i*T:(i+1)*T,(j+1)*T:(j+2)*T].T j = j + 2 i = i + 2 j = 2 return -1.*H
daily = [] for i in range(number_sample): solution = solutions[i] ax.plot(dates_prediction, np.diff(solution[:, index]), '--', linewidth=0.5) daily.append(np.diff(solution[:, index])) daily = np.array(daily) number_days = len(daily[0,:]) daily_average = np.zeros(number_days) daily_plus = np.zeros(number_days) daily_minus = np.zeros(number_days) for i in range(number_days): daily_sort = np.sort(daily[:,i]) # daily_average[i], daily_plus[i], daily_minus[i] = mean_confidence_interval(daily[:,i]) daily_average[i], daily_plus[i], daily_minus[i] = \ np.mean(daily_sort), daily_sort[np.int(2.5/100*number_sample)], daily_sort[np.int(97.5/100*number_sample)] ax.plot(dates_prediction, daily_average, '.-', linewidth=2, label=labels[3]) ax.fill_between(dates_prediction, daily_minus, daily_plus, color='gray', alpha=.2) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b')) ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1)) ax.legend(loc='best') ax.grid(True) plt.title(daily_titles[ind]) plt.legend() filename = filename_prex + daily_titles[ind] + ".pdf" plt.savefig(filename) # # plot total fix, ax = plt.subplots()
def hyperopt_train_test(params): clf = rxn_estimator(np.float32(params[0]), np.float32(params[1]), np.int(params[2]), other_param_dict) return cross_val_score(clf, X, y, cv=3).mean()
def AddJitterOp(inputs: anp.ndarray, initial_jitter_factor=INITIAL_JITTER_FACTOR, jitter_growth=JITTER_GROWTH, debug_log='false'): """ Finds smaller jitter to add to diagonal of square matrix to render the matrix positive definite (in that linalg.potrf works). Given input x (positive semi-definite matrix) and sigsq_init (nonneg scalar), find sigsq_final (nonneg scalar), so that: sigsq_final = sigsq_init + jitter, jitter >= 0, x + sigsq_final * Id positive definite (so that potrf call works) We return the matrix x + sigsq_final * Id, for which potrf has not failed. For the gradient, the dependence of jitter on the inputs is ignored. The values tried for sigsq_final are: sigsq_init, sigsq_init + initial_jitter * (jitter_growth ** k), k = 0, 1, 2, ..., initial_jitter = initial_jitter_factor * max(mean(diag(x)), 1) Note: The scaling of initial_jitter with mean(diag(x)) is taken from GPy. The rationale is that the largest eigenvalue of x is >= mean(diag(x)), and likely of this magnitude. There is no guarantee that the Cholesky factor returned is well-conditioned enough for subsequent computations to be reliable. A better solution would be to estimate the condition number of the Cholesky factor, and to add jitter until this is bounded below a threshold we tolerate. See Higham, N. A Survey of Condition Number Estimation for Triangular Matrices MIMS EPrint: 2007.10 Algorithm 4.1 could work for us. """ assert initial_jitter_factor > 0. and jitter_growth > 1. n_square = inputs.shape[0] - 1 n = anp.int(anp.sqrt(n_square)) assert n_square % n == 0 and n_square // n == n, "x must be square matrix, shape (n, n)" x, sigsq_init = anp.reshape(inputs[:-1], (n, -1)), inputs[-1] def _get_constant_identity(x, constant): n, _ = x.shape return anp.diag(anp.ones((n, )) * constant) def _get_jitter_upperbound(x): # To define a safeguard in the while-loop of the forward, # we define an upperbound on the jitter we can reasonably add # the bound is quite generous, and is dependent on the scale of the input x # (the scale is captured via the trace of x) # the primary goal is avoid any infinite while-loop. return JITTER_UPPERBOUND_FACTOR * max(1., anp.mean(anp.diag(x))) jitter = 0. jitter_upperbound = _get_jitter_upperbound(x) must_increase_jitter = True x_plus_constant = None while must_increase_jitter and jitter <= jitter_upperbound: try: x_plus_constant = x + _get_constant_identity( x, sigsq_init + jitter) L = anp.linalg.cholesky(x_plus_constant) must_increase_jitter = False except anp.linalg.LinAlgError: if debug_log == 'true': logger.info("sigsq = {} does not work".format(sigsq_init + jitter)) if jitter == 0.0: jitter = initial_jitter_factor * max(1., anp.mean(anp.diag(x))) else: jitter = jitter * jitter_growth assert not must_increase_jitter, "The jitter ({}) has reached its upperbound ({}) while the Cholesky of the input matrix still cannot be computed.".format( jitter, jitter_upperbound) if debug_log == 'true': logger.info("sigsq_final = {}".format(sigsq_init + jitter)) return x_plus_constant
def match(self, model_frame): """ matches the observation to a frame Parameters ---------- model_frame: `Frame` Frame to match to the observation coord: `array` coordinates of the pixels in the frame to fit """ self.model_frame = model_frame # check dtype consistency if self.frame.dtype != model_frame.dtype: self.frame.dtype = model_frame.dtype self.images = self.images.copy().astype(model_frame.dtype) if type(self.weights) is np.ndarray: self.weights = self.weights.copy().astype(model_frame.dtype) # determine which channels are covered by data # this has to be done first because convolution is only determined for these self._channel_map = self.get_channel_map_for(model_frame) # check if data is rotated wrt to model_frame self.angle, self.h = interpolation.get_angles(self.frame.wcs, model_frame.wcs) self.isrot = (np.abs(self.angle[1])**2) > np.finfo(float).eps # Get pixel coordinates in each frame # TODO: can this be done with frame.convert_pixel_to??? # If so, we can remove all code in resampling coord_lr, coord_hr = resampling.match_patches(self, model_frame, isrot=self.isrot) # shape of the low resolution image in the intersection or union lr_shape = ( np.max(coord_lr[0]) - np.min(coord_lr[0]) + 1, np.max(coord_lr[1]) - np.min(coord_lr[1]) + 1, ) # TODO: should coords define a _slices_for_model/images? # compute diff kenel in model_frame pixels diff_psf, target = self.build_diffkernel(model_frame) # 1D convolutions convolutions of the model are done along the smaller axis, therefore, # psf is convolved along the frame's longer axis. # the smaller frame axis: self.small_axis = self.frame.Nx <= self.frame.Ny self._fft_shape = fft._get_fft_shape( target, np.zeros(model_frame.shape), padding=3, axes=[-2, -1], max=False, ) # Cutting diff_psf if needded and keeping the parity if (self._fft_shape[-2] < diff_psf.shape[-2]) or (self._fft_shape[-1] < diff_psf.shape[-1]): diff_psf = fft._centered( diff_psf, np.array([diff_psf.shape[0] + 1, *self._fft_shape]) - 1) self._diff_kernels = fft.Fourier( fft._pad(diff_psf.image, self._fft_shape, axes=(-2, -1))) center_y = (np.int(self._fft_shape[0] / 2.0 - (self._fft_shape[0] - model_frame.Ny) / 2.0) + ((self._fft_shape[0] % 2) != 0) * ((model_frame.Ny % 2) == 0) + model_frame.origin[-2]) center_x = (np.int(self._fft_shape[1] / 2.0 - (self._fft_shape[1] - model_frame.Nx) / 2.0) - ((self._fft_shape[1] % 2) != 0) * ((model_frame.Nx % 2) == 0) + model_frame.origin[-1]) if self.isrot: # Unrotated coordinates: Y_unrot = ( (coord_hr[0] - center_y) * self.angle[0] - (coord_hr[1] - center_x) * self.angle[1]).reshape(lr_shape) X_unrot = ( (coord_hr[1] - center_x) * self.angle[0] + (coord_hr[0] - center_y) * self.angle[1]).reshape(lr_shape) # Removing redundancy self.Y_unrot = Y_unrot[:, 0] self.X_unrot = X_unrot[0, :] if self.small_axis: self.shifts = np.array([ self.Y_unrot * self.angle[0], self.Y_unrot * self.angle[1] ]) self.other_shifts = np.array([ -self.angle[1] * self.X_unrot, self.angle[0] * self.X_unrot, ]) else: self.shifts = np.array([ -self.angle[1] * self.X_unrot, self.angle[0] * self.X_unrot, ]) self.other_shifts = np.array([ self.Y_unrot * self.angle[0], self.Y_unrot * self.angle[1], ]) axes = (1, 2) # aligned case. else: axes = [int(not self.small_axis) + 1] self.shifts = np.array(coord_hr) self.shifts[0] -= center_y self.shifts[1] -= center_x self.other_shifts = np.copy(self.shifts) # Computes the resampling/convolution matrix resconv_op = self.sinc_shift(self._diff_kernels, self.shifts, axes) self._resconv_op = np.array(resconv_op, dtype=self.frame.dtype) * self.h**2 if self.isrot: self._resconv_op = self._resconv_op.reshape( *self._resconv_op.shape[:2], -1) return self if self.small_axis: self._resconv_op = self._resconv_op.reshape( *self._resconv_op.shape[:2], -1) return self else: self._resconv_op = self._resconv_op.reshape( self._resconv_op.shape[0], -1, self._resconv_op.shape[-1]) return self
def extractPts(pts, idx): return pts[np.int(idx) - 2:np.int(idx) + 4]
def computeDist(sample): p = bspline(sample[1], extractPts(pts, sample[0] + startIdx)) return distObs[np.clip(np.int(p[0]), 0, distObs.shape[0] - 1), np.clip(np.int(p[1]), 0, distObs.shape[1] - 1)]