def _minimise_l_bfgs_b(f, vs, f_calls=10000, iters=1000, trace=False, names=None, jit=False): names = _convert_and_validate_names(names) # Run function once to ensure that all variables are initialised and # available. val_init = f(vs) # SciPy doesn't perform zero iterations, so handle that edge case # manually. if iters == 0 or f_calls == 0: return B.to_numpy(val_init) # Extract initial value. x0 = B.to_numpy(vs.get_latent_vector(*names)) # The optimiser expects to get `float64`s. def _convert(*xs): return [B.cast(np.float64, B.to_numpy(x)) for x in xs] # Wrap the function and get the list of function evaluations. f_vals, f_wrapped = wrap_f(vs, names, f, jit, _convert) # Perform optimisation routine. def perform_minimisation(callback_=lambda _: None): return fmin_l_bfgs_b( func=f_wrapped, x0=x0, maxiter=iters, maxfun=f_calls, callback=callback_, disp=0, ) if trace: # Print progress during minimisation. with out.Progress(name='Minimisation of "{}"'.format(f.__name__), total=iters) as progress: def callback(_): progress({"Objective value": np.min(f_vals)}) x_opt, val_opt, info = perform_minimisation(callback) with out.Section("Termination message"): out.out(convert(info["task"], str)) else: # Don't print progress; simply perform minimisation. x_opt, val_opt, info = perform_minimisation() vs.set_latent_vector(x_opt, *names) # Assign optimum. return val_opt # Return optimal value.
def approx(x, y, rtol=1e-7, atol=1e-12): """Assert that two objects are numerically close. Args: x (object): First object. y (object): Second object. rtol (float, optional): Relative tolerance. Defaults to `1e-7`. atol (float, optional): Absolute tolerance. Defaults to `1e-12`. """ approx(B.to_numpy(x), B.to_numpy(y), rtol=rtol, atol=atol)
def approx(a, b, rtol=1e-7, atol=1e-12): """Assert that two objects are approximately equal. Args: a (object): First object. b (object): Second object. rtol (:obj:`float`, optional): Relative tolerance. Defaults to `1e-7`. atol (:obj:`float`, optional): Absolute tolerance. Defaults to `1e-12`. """ assert_allclose(B.to_numpy(a), B.to_numpy(b), rtol=rtol, atol=atol)
def minimise_l_bfgs_b(f, vs, f_calls=10000, iters=1000, trace=False, names=None): names = [] if names is None else names # Run function once to ensure that all variables are initialised and # available. val_init = f(vs) # SciPy doesn't perform zero iterations, so handle that edge case # manually. if iters == 0 or f_calls == 0: return B.to_numpy(val_init) # Extract initial value. x0 = B.to_numpy(vs.get_vector(*names)) # Wrap the function and get the list of function evaluations. f_vals, f_wrapped = wrap_f(vs, names, f) # Perform optimisation routine. def perform_minimisation(callback_=lambda _: None): return fmin_l_bfgs_b(func=f_wrapped, x0=x0, maxiter=iters, maxfun=f_calls, callback=callback_, disp=0) if trace: # Print progress during minimisation. with out.Progress(name='Minimisation of "{}"'.format(f.__name__), total=iters) as progress: def callback(_): progress({'Objective value': np.min(f_vals)}) x_opt, val_opt, info = perform_minimisation(callback) with out.Section('Termination message'): out.out(info['task'].decode('utf-8')) else: # Don't print progress; simply perform minimisation. x_opt, val_opt, info = perform_minimisation() vs.set_vector(x_opt, *names) # Assign optimum. return val_opt # Return optimal value.
def test_shape(): shape = Shape(5, 2, 3) # Test indexing. assert shape[0] == 5 assert shape[1] == 2 assert shape[2] == 3 assert isinstance(shape[0:1], Shape) assert shape[0:2] == Shape(5, 2) # Test comparisons. assert shape == Shape(5, 2, 3) assert shape != Shape(5, 2, 4) # Test concatenation with another shape. shape2 = Shape(7, 8, 9) assert shape + shape2 == Shape(5, 2, 3, 7, 8, 9) assert shape.__radd__(shape2) == Shape(7, 8, 9, 5, 2, 3) assert isinstance((shape + shape2).dims[0], int) assert isinstance((shape.__radd__(shape2)).dims[0], int) # Test concatenation with a tuple. assert shape + (7, 8, 9) == Shape(5, 2, 3, 7, 8, 9) assert (7, 8, 9) + shape == Shape(7, 8, 9, 5, 2, 3) assert isinstance((shape + (7, 8, 9)).dims[0], int) assert isinstance(((7, 8, 9) + shape).dims[0], int) # Test conversion of doubly wrapped indices. assert isinstance(Shape(Dimension(1)).dims[0], int) # Test other operations. assert reversed(shape) == Shape(3, 2, 5) assert len(shape) == 3 assert tuple(shape) == (Dimension(5), Dimension(2), Dimension(3)) # Test representation. assert str(Shape()) == "()" assert repr(Shape()) == "Shape()" assert str(Shape(1)) == "(1,)" assert repr(Shape(1)) == "Shape(1)" assert str(Shape(1, 2)) == "(1, 2)" assert repr(Shape(1, 2)) == "Shape(1, 2)" # Test hashing. assert hash(Shape(1, 2)) == hash((1, 2)) # Test conversion to NumPy. assert isinstance(B.to_numpy(Shape(1, 2)), tuple) assert B.to_numpy(Shape(1, 2)) == (1, 2)
def assert_positive_definite(x): """Assert that a matrix is positive definite by testing that its Cholesky decomposition computes. Args: x (matrix): Matrix that should be positive definite. """ np.linalg.cholesky(B.to_numpy(x))
def project(self, x, y): """Project data. Args: x (matrix): Locations of data. y (matrix): Observations of data. Returns: tuple: Tuple containing the locations of the projection, the projection, weights associated with the projection, and a regularisation term. """ n = B.shape(x)[0] available = ~B.isnan(B.to_numpy(y)) # Optimise the case where all data is available. if B.all(available): return self._project_pattern(x, y, (True,) * self.p) # Extract patterns. patterns = list(set(map(tuple, list(available)))) if len(patterns) > 30: warnings.warn( f"Detected {len(patterns)} patterns, which is more " f"than 30 and can be slow.", category=UserWarning, ) # Per pattern, find data points that belong to it. patterns_inds = [[] for _ in range(len(patterns))] for i in range(n): patterns_inds[patterns.index(tuple(available[i]))].append(i) # Per pattern, perform the projection. proj_xs = [] proj_ys = [] proj_ws = [] total_reg = 0 for pattern, pattern_inds in zip(patterns, patterns_inds): proj_x, proj_y, proj_w, reg = self._project_pattern( B.take(x, pattern_inds), B.take(y, pattern_inds), pattern ) proj_xs.append(proj_x) proj_ys.append(proj_y) proj_ws.append(proj_w) total_reg = total_reg + reg return ( B.concat(*proj_xs, axis=0), B.concat(*proj_ys, axis=0), B.concat(*proj_ws, axis=0), total_reg, )
def min_phase(h): """Minimum phase transform using the Hilbert transform. Args: h (vector): Filter to transform. Returns: vector: Minimum phase filter version of `h`. """ h = B.to_numpy(h) spec = np.fft.fft(h) phase = np.imag(-hilbert(np.log(np.abs(spec)))) return np.real(np.fft.ifft(np.abs(spec) * np.exp(1j * phase)))
def summarise_samples(x, samples, db=False): """Summarise samples. Args: x (vector): Inputs of samples. samples (tensor): Samples, with the first dimension corresponding to different samples. db (bool, optional): Convert to decibels. Returns: :class:`collections.namedtuple`: Named tuple containing various statistics of the samples. """ x, samples = B.to_numpy(x, samples) random_inds = np.random.permutation(B.shape(samples)[0])[:3] def transform(x): if db: return 10 * np.log10(x) else: return x perm = tuple(reversed(range(B.rank(samples)))) # Reverse all dimensions. return collect( x=B.to_numpy(x), mean=transform(B.mean(samples, axis=0)), var=transform(B.std(samples, axis=0))**2, err_68_lower=transform(B.quantile(samples, 0.32, axis=0)), err_68_upper=transform(B.quantile(samples, 1 - 0.32, axis=0)), err_95_lower=transform(B.quantile(samples, 0.025, axis=0)), err_95_upper=transform(B.quantile(samples, 1 - 0.025, axis=0)), err_99_lower=transform(B.quantile(samples, 0.0015, axis=0)), err_99_upper=transform(B.quantile(samples, 1 - 0.0015, axis=0)), samples=transform(B.transpose(samples, perm=perm)[..., random_inds]), all_samples=transform(B.transpose(samples, perm=perm)), )
def autocorr(x, lags=None, cov=False, window=False): """Estimate the autocorrelation. Args: x (vector): Time series to estimate autocorrelation of. lags (int, optional): Number of lags. Defaults to all lags. cov (bool, optional): Compute covariances rather than correlations. Defaults to `False`. window (bool, optional): Apply a triangular window to the estimate. Defaults to `False`. Returns: vector: Autocorrelation. """ # Convert to NumPy for compatibility with frameworks. x = B.to_numpy(x) # Compute autocovariance. x = np.reshape(x, -1) # Flatten the input. x = x - np.mean(x) k = np.correlate(x, x, mode="full") k = k[k.size // 2:] if window: # Do not undo the triangular window. k = k / x.size else: # Divide by the precise numbers of estimates. k = k / np.arange(x.size, 0, -1) # Get the right number of lags. if lags is not None: k = k[:lags + 1] # Divide by estimate of variance if computing correlations. if not cov: k = k / k[0] return k
def test_to_numpy_list(check_lazy_shapes): x = B.to_numpy([tf.constant(1)]) assert isinstance(x[0], (B.Number, B.NPNumeric))
def test_to_numpy_dense(dense1): assert isinstance(B.to_numpy(dense1), B.NP) approx(B.to_numpy(dense1), B.dense(dense1))
def _minimise_adam( f, vs, iters=1000, rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-8, local_rates=True, trace=False, names=None, jit=False, ): names = _convert_and_validate_names(names) # Run function once to ensure that all variables are initialised and # available. val_init = f(vs) # Handle the edge case of zero iterations. if iters == 0: return B.to_numpy(val_init) # Extract initial value. x0 = B.to_numpy(vs.get_latent_vector(*names)) # Wrap the function. _, f_wrapped = wrap_f(vs, names, f, jit, B.to_numpy) def perform_minimisation(callback_=lambda _: None): # Perform optimisation routine. x = x0 obj_value = None adam = ADAM( rate=rate, beta1=beta1, beta2=beta2, epsilon=epsilon, local_rates=local_rates, ) for i in range(iters): obj_value, grad = f_wrapped(x) callback_(obj_value) x = adam.step(x, grad) return x, obj_value if trace: # Print progress during minimisation. with out.Progress(name='Minimisation of "{}"'.format(f.__name__), total=iters) as progress: def callback(obj_value): progress({"Objective value": obj_value}) x_opt, obj_value = perform_minimisation(callback) else: x_opt, obj_value = perform_minimisation() vs.set_latent_vector(x_opt, *names) # Assign optimum. return obj_value # Return last objective value.
def test_to_numpy_tuple(check_lazy_shapes): x = B.to_numpy((tf.constant(1), )) assert isinstance(x[0], (B.Number, B.NPNumeric))
def test_to_numpy_dict(check_lazy_shapes): x = B.to_numpy({"a": tf.constant(1)}) assert isinstance(x["a"], (B.Number, B.NPNumeric))
def _convert(*xs): return [B.cast(np.float64, B.to_numpy(x)) for x in xs]
def approx(x, y, **kw_args): approx(B.to_numpy(x), B.to_numpy(y), **kw_args)
def approx(x, y, rtol=1e-7, atol=0): return assert_allclose(B.to_numpy(x), B.to_numpy(y), rtol=rtol, atol=atol)
def minimise_adam(f, vs, iters=1000, rate=1e-3, beta1=0.9, beta2=0.999, epsilon=1e-8, trace=False, names=None): names = [] if names is None else names # Run function once to ensure that all variables are initialised and # available. val_init = f(vs) # Handle the edge case of zero iterations. if iters == 0: return B.to_numpy(val_init) # Extract initial value. x0 = B.to_numpy(vs.get_vector(*names)) # Wrap the function. _, f_wrapped = wrap_f(vs, names, f) def perform_minimisation(callback_=lambda _: None): # Perform optimisation routine. x = x0 obj_value = None m = np.zeros_like(x0) v = np.zeros_like(x0) for i in range(iters): obj_value, grad = f_wrapped(x) callback_(obj_value) # Update estimates of moments. m = beta1 * m + (1 - beta1) * grad v = beta2 * v + (1 - beta2) * grad ** 2 # Correct for bias of initialisation. m_corr = m / (1 - beta1 ** (i + 1)) v_corr = v / (1 - beta2 ** (i + 1)) # Perform update. x = x - rate * m_corr / (v_corr ** .5 + epsilon) return x, obj_value if trace: # Print progress during minimisation. with out.Progress(name='Minimisation of "{}"'.format(f.__name__), total=iters) as progress: def callback(obj_value): progress({'Objective value': obj_value}) x_opt, obj_value = perform_minimisation(callback) else: x_opt, obj_value = perform_minimisation() vs.set_vector(x_opt, *names) # Assign optimum. return obj_value # Return last objective value.
def format(x: B.Numeric, info: bool): return format(B.to_numpy(x), info)
def approx(x, y, atol=1e-12, rtol=1e-8): assert_allclose(*B.to_numpy(x, y), atol=atol, rtol=rtol)
def _convert(x: B.Numeric): return B.squeeze(B.to_numpy(x))
def test_to_numpy_multiple_objects(check_lazy_shapes): assert B.to_numpy(tf.constant(1), tf.constant(1)) == (1, 1)
def estimate_psd(t, k, n_zero=2_000, db=False): """Estimate the PSD from samples of the kernel. Args: t (vector): Time points of the kernel, which should be a linear space starting from the origin. k (vector): Kernel. n_zero (int, optional): Zero padding. Defaults to `2_000`. db (bool, optional): Convert to decibel. Defaults to `False`. Returns: vector: PSD, correctly scaled. """ # Convert to NumPy for compatibility with frameworks. t, k = B.to_numpy(t, k) if t[0] != 0: raise ValueError("Time points must start at zero.") # Perform zero padding. k = B.concat(k, B.zeros(n_zero)) # Symmetrise and Fourier transform. k_symmetric = B.concat(k, k[1:-1][::-1]) psd = np.fft.fft(k_symmetric) freqs = np.fft.fftfreq(len(psd)) / (t[1] - t[0]) # Should be real and positive, but the numerics may not be in our favour. psd = np.abs(np.real(psd))
def to_numpy(a: AbstractMatrix): return B.to_numpy(B.dense(a))