def downselect_morris_trajectories(samples, ntrajectories): nvars = samples.shape[0] assert samples.shape[1] % (nvars+1) == 0 ncandidate_trajectories = samples.shape[1]//(nvars+1) # assert 10*ntrajectories<=ncandidate_trajectories trajectories = np.reshape( samples, (nvars, nvars+1, ncandidate_trajectories), order='F') distances = np.zeros((ncandidate_trajectories, ncandidate_trajectories)) for ii in range(ncandidate_trajectories): for jj in range(ii+1): distances[ii, jj] = cdist( trajectories[:, :, ii].T, trajectories[:, :, jj].T).sum() distances[jj, ii] = distances[ii, jj] get_combinations = combinations( np.arange(ncandidate_trajectories), ntrajectories) ncombinations = nchoosek(ncandidate_trajectories, ntrajectories) print('ncombinations', ncombinations) # values = np.empty(ncombinations) best_index = None best_value = -np.inf for ii, index in enumerate(get_combinations): value = np.sqrt(np.sum( [distances[ix[0], ix[1]]**2 for ix in combinations(index, 2)])) if value > best_value: best_value = value best_index = index samples = trajectories[:, :, best_index].reshape( nvars, ntrajectories*(nvars+1), order='F') return samples
def test_barycentric_weights_1d(self): eps = 1e-12 # test barycentric weights for uniform points using direct calculation abscissa = np.linspace(-1, 1., 5) weights = compute_barycentric_weights_1d(abscissa, normalize_weights=False) n = abscissa.shape[0] - 1 h = 2. / n true_weights = np.empty((n + 1), np.double) for j in range(n + 1): true_weights[j] = (-1.)**(n - j) * nchoosek( n, j) / (h**n * factorial(n)) assert np.allclose(true_weights, weights, eps) # test barycentric weights for uniform points using analytical formula # and with scaling on weights = compute_barycentric_weights_1d(abscissa, interval_length=1, normalize_weights=False) weights_analytical = equidistant_barycentric_weights(5) ratio = weights / weights_analytical # assert the two weights array differ by only a constant factor assert np.allclose(np.min(ratio), np.max(ratio)) # test barycentric weights for clenshaw curtis points level = 7 abscissa, tmp = clenshaw_curtis_pts_wts_1D(level) n = abscissa.shape[0] weights = compute_barycentric_weights_1d(abscissa, normalize_weights=False, interval_length=2) true_weights = np.empty((n), np.double) true_weights[0] = true_weights[n - 1] = 0.5 true_weights[1:n - 1] = [(-1)**ii for ii in range(1, n - 1)] factor = true_weights[1] / weights[1] assert np.allclose(true_weights / factor, weights, atol=eps) # check barycentric weights are correctly computed regardless of # order of points. Eventually ordering can effect numerical stability # but not until very high level abscissa, tmp = clenshaw_curtis_in_polynomial_order(level) II = np.argsort(abscissa) n = abscissa.shape[0] weights = compute_barycentric_weights_1d( abscissa, normalize_weights=False, interval_length=abscissa.max() - abscissa.min()) true_weights = np.empty((n), np.double) true_weights[0] = true_weights[n - 1] = 0.5 true_weights[1:n - 1] = [(-1)**ii for ii in range(1, n - 1)] factor = true_weights[1] / weights[II][1] assert np.allclose(true_weights / factor, weights[II], eps) num_samples = 65 abscissa, tmp = gauss_hermite_pts_wts_1D(num_samples) weights = compute_barycentric_weights_1d( abscissa, normalize_weights=False, interval_length=abscissa.max() - abscissa.min()) print(weights) print(np.absolute(weights).max(), np.absolute(weights).min()) print(np.absolute(weights).max() / np.absolute(weights).min())