def direction_to_angles(v: ArrayLike) -> np.ndarray: """ Convert a cartesian unit vector to a zenith-azimuth pair. Parameters ---------- v : array-like A sequence of 3-vectors (shape (N, 3)) [unitless]. Returns ------- array-like A sequence of 2-vectors containing zenith and azimuth angles, where zenith = 0 corresponds to +z direction (shape (N, 2)) [rad]. """ v = np.atleast_1d(v) if v.ndim < 2: v = v.reshape((v.size // 3, 3)) if v.ndim > 2 or v.shape[1] != 3: raise ValueError(f"array must be of shape (N, 3), got {v.shape}") v = v / np.linalg.norm(v, axis=-1).reshape(len(v), 1) theta = np.arccos(v[..., 2]) phi = np.arctan2(v[..., 1], v[..., 0]) return np.vstack((theta, phi)).T
def uniform_hemisphere_to_square(v: ArrayLike) -> np.ndarray: """ Inverse of the mapping square_to_uniform_hemisphere. Parameters ---------- v : array-like A (N, 3) array of vectors on the unit sphere. Returns ------- ndarray Corresponding coordinates on the [0, 1]² square as a (N, 2) array. Notes ----- The function tries to be flexible with arrays with (N, 1) and (N,) arrays and attempts reshaping them to (N/3, 3). This, in particular, means that the following call will produce the expected result: .. code:: python uniform_hemisphere_to_square((0, 0, 1)) """ # Matches Mitsuba implementation v = np.atleast_1d(v) if v.ndim < 2: v = v.reshape((v.size // 3, 3)) if v.ndim > 2 or v.shape[1] != 3: raise ValueError(f"array must be of shape (N, 3), got {v.shape}") p = v[..., 0:2] return uniform_disk_to_square_concentric( p / np.sqrt(v[..., 2] + 1.0).reshape((len(p), 1)))
def set_adiacenta(self, M_adiacenta: npt.ArrayLike): """ :param M_adiacenta: adjacency matrix """ self.M_adiacenta = M_adiacenta.astype('int') self.nodes = self.get_nodes() self.edges = self.get_edges()
def nan_mask(data: npt.ArrayLike) -> np.ndarray: """ Replaces any masked array values with NaNs. As a consequence of filling the mask with NaNs, non-float arrays will be cast to float. Parameters ---------- data : ArrayLike The masked array to be filled with NaNs. Returns ------- ndarray The `data` with masked values replaced with NaNs. Notes ----- .. versionadded:: 0.1.0 """ if np.ma.isMaskedArray(data): if data.dtype.char not in np.typecodes["Float"]: dmsg = ( f"converting from '{np.typename(data.dtype.char)}' " f"to '{np.typename('f')}" ) logger.debug(dmsg) data = ma.asanyarray(data, dtype=float) data = data.filled(np.nan) return data
def _accumulate_sufficient_statistics(self, stats: dict, obs: ArrayLike, framelogprob: ArrayLike, posteriors: ArrayLike, fwdlattice: ArrayLike, bwdlattice: ArrayLike): """ Update sufficient statistics from a given sample. Parameters ---------- stats : dict Sufficient statistics as returned by _initialize_sufficient_statistics(). obs : ArrayLike Jump Distance sequence. framelogprob : ArrayLike Log-probabilities of each sample under each of the model states. posteriors : ArrayLike Posterior probabilities of each sample being generated by each of the model states. fwdlattice : ArrayLike Log-forward and log-backward probabilities. bwdlattice : ArrayLike Log-forward and log-backward probabilities. Returns ------- None. """ super()._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice) if 'd' in self.params: stats['post'] += posteriors.sum(axis=0) stats['obs'] += np.dot(posteriors.T, obs**2)
def fma(a: ArrayLike, b: ArrayLike, c: ArrayLike) -> np.ndarray: a = np.asarray(a) b = np.asarray(b) c = np.asarray(c) dtype = np.find_common_type([], [a.dtype, b.dtype, c.dtype]) a = a.astype(dtype) b = b.astype(dtype) c = c.astype(dtype) if dtype == np.single: return _pyfma.fmaf(a, b, c) elif dtype == np.double: return _pyfma.fma(a, b, c) assert dtype == np.longdouble return _pyfma.fmal(a, b, c)
def compare_render(orig_data: ArrayLike, rendered_data: ArrayLike, previous_render: Optional[ArrayLike] = None, atol: Optional[float] = 1.0): """Compare an expected original array with the rendered result. Parameters ---------- orig_data Expected output result array. This will be converted to an RGBA array to be compared against the rendered data. rendered_data Actual rendered result as an RGBA 8-bit unsigned array. previous_render Previous instance of a render that the current render should not be equal to. atol Absolute tolerance to be passed to :func:`numpy.testing.assert_allclose`. """ predicted = make_rgba(orig_data) np.testing.assert_allclose(rendered_data.astype(float), predicted.astype(float), atol=atol) if previous_render is not None: # assert not allclose pytest.raises(AssertionError, np.testing.assert_allclose, rendered_data, previous_render, atol=10)
def angles_to_direction(angles: ArrayLike) -> np.ndarray: r""" Convert a zenith and azimuth angle pair to a direction unit vector. Parameters ---------- theta : array-like Zenith angle [radian]. 0 corresponds to zenith, :math:`\pi/2` corresponds to the XY plane, :math:`\pi` corresponds to nadir. Negative values are allowed; :math:`(\theta, \varphi)` then maps to :math:`(| \theta |, \varphi + \pi)`. phi : array-like Azimuth angle [radian]. 0 corresponds to the X axis, :math:`\pi / 2` corresponds to the Y axis (*i.e.* rotation is counter-clockwise). Returns ------- ndarray Direction corresponding to the angular parameters [unitless]. """ angles = np.atleast_1d(angles) if angles.ndim < 2: angles = angles.reshape((angles.size // 2, 2)) if angles.ndim > 2 or angles.shape[1] != 2: raise ValueError(f"array must be of shape (N, 2), got {angles.shape}") negative_zenith = angles[:, 0] < 0 angles[negative_zenith, 0] *= -1 angles[negative_zenith, 1] += np.pi return cos_angle_to_direction(np.cos(angles[:, 0]), angles[:, 1])
def make_rgba(data_in: ArrayLike) -> ArrayLike: """Convert any array to an RGBA array. RGBA arrays have 3 dimensions where the last represents the channels. If an Alpha channel needs to be added it will be made completely opaque. Returns ------- 3D RGBA unsigned 8-bit array """ max_val = max_for_dtype(data_in.dtype) if data_in.ndim == 3 and data_in.shape[-1] == 1: data_in = data_in.squeeze() if data_in.ndim == 2: out = np.stack([data_in] * 4, axis=2) out[:, :, 3] = max_val elif data_in.shape[-1] == 3: out = np.concatenate((data_in, np.ones( (*data_in.shape[:2], 1)) * max_val), axis=2) else: out = data_in return np.round((out.astype(np.float) * 255 / max_val)).astype(np.uint8)
def position(self, position: npt.ArrayLike): position = np.asarray(position) if np.any((position < self.min_position) | (self.max_position < position)): raise ValueError("The position exceeds the robot's limits.") assert self.model.to_gazebo().reset_joint_positions(position.tolist())
def velocity(self, velocity: npt.ArrayLike): velocity = np.asarray(velocity) if np.any((velocity < self.min_velocity) | (self.max_velocity < velocity)): raise ValueError("The velocity exceeds the robot's limits.") assert self.model.to_gazebo().reset_joint_velocities(velocity.tolist())
def target_velocity(self, velocity: npt.ArrayLike): velocity = np.asarray(velocity) if np.any((velocity < self.min_velocity) | (self.max_velocity < velocity)): raise ValueError("The target velocity exceeds the robot's limits.") assert self.model.set_joint_velocity_targets(velocity.tolist())
def target_acceleration(self, acceleration: npt.ArrayLike): acceleration = np.asarray(acceleration) if np.any((acceleration < self.min_acceleration) | (self.max_acceleration < acceleration)): raise ValueError( "The target acceleration exceeds the robot's limits.") assert self.model.set_joint_acceleration_targets(acceleration.tolist())
def pmf(self, X:npt.ArrayLike) -> np.ndarray: X = np.array(X) try: return np.apply_along_axis(self._prob_of, 1, X) except np.AxisError: # Deal with 0-d arrays X = X.reshape(-1,1) return np.apply_along_axis(self._prob_of, 1, X)
def findZero(matrix: npt.ArrayLike): n, m = matrix.shape flatMatrix = matrix.reshape(n * m) # 1. find the zero zeroPosition = np.where(flatMatrix == 0)[0][0] zeroPosition = [0, zeroPosition] while zeroPosition[1] > n - 1: zeroPosition[1] = zeroPosition[1] - n zeroPosition[0] += 1 return tuple(zeroPosition)
def build_thumbnail(image_array: npt.ArrayLike, thumbnail_dir: Path): image_array = image_array - np.min(image_array) + 1.001 image_array = np.log(image_array) image_array = 205 * image_array / (np.max(image_array)) auto_contrast_image = Image.fromarray(image_array.astype("uint8")) auto_contrast_image = ImageOps.autocontrast(auto_contrast_image, cutoff=0.1) filename = str(uuid4()) + ".png" # file = io.BytesIO() file = thumbnail_dir / Path(filename) auto_contrast_image.save(file, format="PNG") return file
def matrix_to_tex_string(matrix: npt.ArrayLike) -> str: matrix = np.array(matrix).astype("str") if matrix.ndim == 1: matrix = matrix.reshape((matrix.size, 1)) n_rows, n_cols = matrix.shape prefix = "\\left[ \\begin{array}{%s}" % ("c" * n_cols) suffix = "\\end{array} \\right]" rows = [ " & ".join(row) for row in matrix ] return prefix + " \\\\ ".join(rows) + suffix
def get_sampling_w(t: npt.ArrayLike, oversampling: Optional[int] = 8, max_freq: Optional[int] = 1) -> npt.ArrayLike: ''' Get sampling frequency of time-series Args: t: Sampling times oversampling: Oversampling factor max_freq: Maximum frequency scaling factor. Any value over 1 will ignore Nyquists frequency limit Returns: Sampling frequencies spaced by 1/(T * oversampling) where T is the interval spanned by t ''' T = (t.max() - t.min()) N = t.shape[0] return np.arange(1 / (T * oversampling), max_freq * N / (2 * T), 1 / (T * oversampling))
def SetData(self, binding, data: ArrayLike): # Shader must have been created if self.__shader is not None: # Convert np array to bytes packed = data.tobytes() # Either update or create buffer if binding in self.__SSBOs: ssbo = self.__SSBOs[binding][0] CS.UpdateSSBO(ssbo, packed) else: ssbo = CS.NewSSBO(packed) CS.UseSSBO(ssbo, self.__shader, binding) # Store info self.__SSBOs[binding] = (ssbo, data.shape, data.dtype, data.nbytes)
def corr(y1: ArrayLike, y2: ArrayLike, axis: Union[None, int, Tuple[int]] = -1, eps: int = 1e-8, **kwargs) -> np.ndarray: """ Compute the correlation between two NumPy arrays along the specified dimension(s). Args: y1: first NumPy array y2: second NumPy array axis: dimension(s) along which the correlation is computed. Any valid NumPy axis spec works here eps: offset to the standard deviation to avoid exploding the correlation due to small division (default 1e-8) **kwargs: passed to final numpy.mean operation over standardized y1 * y2 Returns: correlation array """ y1 = (y1 - y1.mean(axis=axis, keepdims=True)) / ( y1.std(axis=axis, keepdims=True, ddof=0) + eps) y2 = (y2 - y2.mean(axis=axis, keepdims=True)) / ( y2.std(axis=axis, keepdims=True, ddof=0) + eps) return (y1 * y2).mean(axis=axis, **kwargs)
def _asarray(x: ArrayLike, dtype: Optional[DTypeLike] = None) -> np.ndarray: """Convert an array-like to an array. Unlike np.ndarray, this will reject astropy Quantities with dimensions and convert dimensionless quantities correctly even if they have scale. When a dtype is specified, uses ``same_kind`` casting. """ if isinstance(x, u.Quantity): array = x.to_value(u.dimensionless_unscaled) else: array = np.asarray(x) if dtype is not None: array = array.astype(dtype, copy=False, casting='same_kind') return array
def decryption_vigenere(keys: npt.ArrayLike, ct_numbers: npt.ArrayLike, current_interrupter: npt.ArrayLike) -> np.ndarray: mt = ct_numbers.copy() if len(mt.shape) == 1: mt = np.array([mt]) if len(keys.shape) == 1: keys = np.array([keys]) len_keys = keys.shape[1] indices = np.flatnonzero(np.logical_not(current_interrupter)) for s, t in enumerate(indices): mt[:, t] = (mt[:, t] - keys[:, s % len_keys]) return np.remainder(mt, 29)
def square_to_uniform_disk_concentric(sample: ArrayLike) -> np.ndarray: """ Low-distortion concentric square to disk mapping. Parameters ---------- sample : array-like A (N, 2) array of sample values. Returns ------- ndarray Sampled coordinates on the unit disk as a (N, 2) array. Notes ----- The function tries to be flexible with arrays with (N, 1) and (N,) arrays and attempts reshaping them to (N/2, 2). This, in particular, means that the following call will produce the expected result: .. code:: python square_to_uniform_disk_concentric((0.5, 0.5)) """ # Matches Mitsuba implementation sample = np.atleast_1d(sample) if sample.ndim < 2: sample = sample.reshape((sample.size // 2, 2)) if sample.ndim > 2 or sample.shape[1] != 2: raise ValueError(f"array must be of shape (N, 2), got {sample.shape}") x: ArrayLike = 2.0 * sample[..., 0] - 1.0 y: ArrayLike = 2.0 * sample[..., 1] - 1.0 is_zero = np.logical_and(x == 0.0, y == 0.0) quadrant_1_or_3 = np.abs(x) < np.abs(y) r = np.where(quadrant_1_or_3, y, x) rp = np.where(quadrant_1_or_3, x, y) phi = np.empty_like(r) phi[~is_zero] = 0.25 * np.pi * (rp[~is_zero] / r[~is_zero]) phi[quadrant_1_or_3] = 0.5 * np.pi - phi[quadrant_1_or_3] phi[is_zero] = 0.0 s, c = np.sin(phi), np.cos(phi) return np.vstack((r * c, r * s)).T
def uniform_disk_to_square_concentric(p: ArrayLike) -> np.ndarray: """ Inverse of the mapping square_to_uniform_disk_concentric. Parameters ---------- p : array-like A (N, 2) array of vectors on the unit disk. Returns ------- ndarray Corresponding coordinates on the [0, 1]² square as a (N, 2) array. Notes ----- The function tries to be flexible with arrays with (N, 1) and (N,) arrays and attempts reshaping them to (N/2, 2). This, in particular, means that the following call will produce the expected result: .. code:: python uniform_disk_to_square_concentric((0, 0)) """ # Matches Mitsuba implementation p = np.atleast_1d(p) if p.ndim < 2: p = p.reshape((p.size // 2, 2)) if p.ndim > 2 or p.shape[1] != 2: raise ValueError(f"array must be of shape (N, 2), got {p.shape}") quadrant_0_or_2 = np.abs(p[..., 0]) > np.abs(p[..., 1]) r_sign = np.where(quadrant_0_or_2, p[..., 0], p[..., 1]) r = np.copysign(np.linalg.norm(p, axis=-1), r_sign) phi = np.arctan2(p[..., 1] * np.sign(r_sign), p[..., 0] * np.sign(r_sign)) t = 4.0 / np.pi * phi t = np.where(quadrant_0_or_2, t, 2.0 - t) * r a = np.where(quadrant_0_or_2, r, t) b = np.where(quadrant_0_or_2, t, r) return np.vstack(((a + 1.0) * 0.5, (b + 1.0) * 0.5)).T
def _interpolate_frames(data: Union[Nifti1Image, npt.ArrayLike], mask: npt.ArrayLike, censor: npt.ArrayLike, t_r: float) -> Union[Nifti1Image, npt.ArrayLike]: ''' Interpolates `censor` using `img` data in `mask` using lombscargle non-uniform spectral interpolation Arguments: data: Input data to censor where last index denotes time-points [N1 x ,..., x T] mask: Non-censored array indices from uncensored data censor: Censored array indices that were removed from `img` t_r: Repetition time ''' if not censor.any(): return data is_nifti = False if isinstance(data, Nifti1Image): sgls = _image_to_signals(data) is_nifti = True else: sgls = data t_num_samples = len(censor) + len(mask) # Lombscargle interpolate expects already censored data if sgls.shape[1] == t_num_samples: sgls = sgls[:, mask] t = np.arange(0, t_num_samples) * t_r interp_vals = lombscargle_interpolate(t=t[mask], x=sgls, s=t) res = np.empty((sgls.shape[0], t_num_samples), dtype=sgls.dtype) res[:, mask] = sgls res[:, censor] = interp_vals[:, censor] res = res.reshape((*data.shape[:-1], t_num_samples)) if is_nifti: return nimg.new_img_like(data, res, copy_header=True) return res
def pdf_given_y(self, x:npt.ArrayLike, y:npt.ArrayLike) -> np.ndarray: x = np.array(x) y = np.array(y) if y.ndim == 2 or y.ndim == 0: y = y.reshape(-1) p_x_given_y = np.empty(shape=x.shape[0]) labels = np.unique(y, axis=0) for label in labels: try: kde = self._get_cond_kde(label) p_x_given_y[y == label] = np.exp(kde.score_samples(x[y == label])) except KeyError: # if no kde for the label is present, the entries can remain 0. p_x_given_y[y == label] = 0 return p_x_given_y
def decryption_autokey(keys: npt.ArrayLike, ct_numbers: npt.ArrayLike, current_interrupter: npt.ArrayLike) -> np.ndarray: mt = ct_numbers.copy() len_keys = keys.shape[1] indices = np.flatnonzero(np.logical_not(current_interrupter)) mt[:, 0:len_keys] = np.remainder( mt[:, 0:len_keys] - keys[:, indices[0:len_keys]], 29) step_size = np.arange(len_keys, indices.shape[0], len_keys) if step_size[-1] != mt.shape[1]: step_size = np.concatenate(step_size, indices.shape[0]) diff_step_size = np.cumsum(np.concatenate(([0], np.diff(step_size)))) for index in range(step_size.shape[0] - 1): mt[:, indices[step_size[index]:step_size[index + 1]]] = \ np.remainder(mt[:, indices[step_size[index]:step_size[index + 1]]] - mt[:, diff_step_size[index]:diff_step_size[index + 1]], 29) return mt
def wrap( longitudes: npt.ArrayLike, base: Optional[float] = -180.0, period: Optional[float] = 360.0, decimals: Optional[int] = 8, ) -> np.ndarray: """ Transform the longitude values to be within the closed interval [base, base + period]. Parameters ---------- longitudes : ArrayLike One or more longitude values (degrees) to be wrapped. base : float, default=-180.0 The start limit (degrees) of the closed interval. period : float, default=360.0 The end limit (degrees) of the closed interval expressed as a length from the `base`. Returns ------- ndarray The transformed longitude values. Notes ----- .. versionadded:: 0.1.0 """ # # TODO: support radians # if not isinstance(longitudes, Iterable): longitudes = [longitudes] longitudes = np.round(longitudes, decimals=decimals) result = ((longitudes.astype(np.float64) - base + period * 2) % period) + base return result
def _get_censor_mask( self, fds: npt.ArrayLike, fd_thres: float) -> tuple[npt.ArrayLike, npt.ArrayLike]: """ Apply Powers et al. 2014 censoring method using FD trace. Performs initial masking using fd_thres. Then checks for blocks with less than a set number of contiguous frames. If a block of volumes is less than the number of required contiguous frames, it is masked out """ initial_mask = fds.to_numpy() <= fd_thres under_min_contiguous = np.zeros_like(initial_mask) start_ind = None # No frames are censored, then return full set if not np.any(np.logical_not(initial_mask)): return np.where(initial_mask)[0], np.array([], dtype=np.int) for i in np.arange(0, len(initial_mask)): if initial_mask[i] == 1: if start_ind: continue else: start_ind = i if initial_mask[i] == 0 and start_ind: if i - start_ind < self._min_contiguous: under_min_contiguous[start_ind:i] = 1 start_ind = False mask_frames = initial_mask & np.logical_not(under_min_contiguous) return (np.where(mask_frames)[0], np.where(np.logical_not(mask_frames))[0])
def square_to_uniform_hemisphere(sample: ArrayLike) -> np.ndarray: """ Uniformly sample a vector on the unit hemisphere with respect to solid angles. Parameters ---------- sample : array-like A (N, 2) array of sample values. Returns ------- ndarray Sampled coordinates on the unit hemisphere as a (N, 3) array. Notes ----- The function tries to be flexible with arrays with (N, 1) and (N,) arrays and attempts reshaping them to (N/2, 2). This, in particular, means that the following call will produce the expected result: .. code:: python square_to_uniform_hemisphere((0.5, 0.5)) """ # Matches Mitsuba implementation sample = np.atleast_1d(sample) if sample.ndim < 2: sample = sample.reshape((sample.size // 2, 2)) if sample.ndim > 2 or sample.shape[1] != 2: raise ValueError(f"array must be of shape (N, 2), got {sample.shape}") p = square_to_uniform_disk_concentric(sample) z = 1.0 - np.multiply(p, p).sum(axis=1) p *= np.sqrt(z + 1.0).reshape((len(p), 1)) return np.vstack((p[..., 0], p[..., 1], z)).T