def evaluate_function(self, indices): """ Evaluates target function in the new point(s) """ indices = [indices] if not self.batch_update else indices print('indices') print(indices) if self.simulate_measurement: for idx in indices: self.y_sparse[tuple(idx)] = self.y_true[tuple(idx)] else: if self.extent is not None: for idx in indices: _idx = [] for i, e in zip(idx, self.extent): _idx.append(i + e[0]) _idx = tuple(_idx) self.y_sparse[tuple(idx)] = self.target_function(_idx) else: if not self.batch_update: print('entered first loop') value = self.target_function(indices) self.y_sparse[tuple(indices)] = value else: values = self.target_function(indices) print('values') print(values) for i, idx in enumerate(indices): self.y_sparse[tuple(idx)] = values[i] self.X_sparse = gprutils.get_sparse_grid(self.y_sparse, self.extent) self.target_func_vals.append(self.y_sparse.copy()) return
def test_boptim(acqf, result): Z_sparse = initial_seed() X_full = gprutils.get_full_grid(Z_sparse) X_sparse = gprutils.get_sparse_grid(Z_sparse) expected_result = np.load(result) boptim = boptimizer(X_sparse, Z_sparse, X_full, trial_func, acquisition_function=acqf, exploration_steps=20, use_gpu=False, verbose=1) boptim.run() assert_allclose(boptim.target_func_vals[-1], expected_result)
def test_skgpr_2d(kernel): # sanity check only, due to comput cost R = get_dummy_data() X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) mean, sd, _ = skgpr.skreconstructor(X, R, X_true, kernel=kernel, learning_rate=0.1, iterations=2, use_gpu=False, verbose=False).run() assert_(mean.shape == sd.shape == R.shape) assert_(not np.isnan(mean).any()) assert_(not np.isnan(sd).any())
def test_gpr_3d(kernel): # sanity check only due to comput cost R = np.load(test_data3d) X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) mean, sd, _ = gpr.reconstructor(X, R, X_true, kernel=kernel, lengthscale=None, indpoints=50, learning_rate=0.1, iterations=2, use_gpu=False, verbose=True).run() assert_(mean.shape == sd.shape == R.flatten().shape) assert_(not np.isnan(mean).any()) assert_(not np.isnan(sd).any())
def test_gpr_2d(kernel): R = np.load(test_data2d) R_ = np.load(test2d_expected_result) X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) mean, _, _ = gpr.reconstructor(X, R, X_true, kernel=kernel, lengthscale=[[1., 1.], [4., 4.]], indpoints=250, learning_rate=0.1, iterations=200, use_gpu=False, verbose=False).run() assert_(ssim(mean, R_) > 0.95) assert_(np.linalg.norm(mean - R_) < 3)
def test_skgpr_3d(kernel): # sanity check only, due to comput cost R = np.load(test_data3d) X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) (mean, sd), _ = skgpr.skreconstructor(X, R, X_true, kernel=kernel, lengthscale=None, grid_points_ratio=.25, learning_rate=0.1, iterations=2, num_batches=100, calculate_sd=True, use_gpu=False, verbose=True).run() assert_(mean.shape == sd.shape == R.flatten().shape) assert_(not np.isnan(mean).any()) assert_(not np.isnan(sd).any())
def test_skgpr_2d(kernel): R = np.load(test_data) R_ = np.load(test_expected_result) X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) mean, _ = skgpr.skreconstructor(X, R, X_true, kernel=kernel, lengthscale=[[1., 1.], [4., 4.]], grid_points_ratio=1., learning_rate=0.1, iterations=20, calculate_sd=False, num_batches=1, use_gpu=False, verbose=False).run() assert_(ssim(mean, R_) > 0.98) assert_(np.linalg.norm(mean - R_) < 1)
def evaluate_function(self, indices, y_measured=None): """ Evaluates target function in the new point(s) """ indices = [indices] if not self.batch_update else indices if self.simulate_measurement: for idx in indices: self.y_sparse[tuple(idx)] = self.y_true[tuple(idx)] elif y_measured is not None: for idx in indices: self.y_sparse[tuple(idx)] = y_measured[tuple(idx)] else: for idx in indices: if self.extent is not None: _idx = [] for i, e in zip(idx, self.extent): _idx.append(i + e[0]) _idx = tuple(_idx) else: _idx = tuple(idx) self.y_sparse[tuple(idx)] = self.target_function(_idx) self.X_sparse = gprutils.get_sparse_grid(self.y_sparse, self.extent) self.target_func_vals.append(self.y_sparse.copy()) return
type=str, help="Directory to save outputs") args = parser.parse_args() # Load "ground truth" data (N x M x L spectroscopic grid) # (in real experiment we will just get an empty array) R_true = np.load(args.FILEPATH) if args.NORMALIZE and np.isnan(R_true).any() is False: R_true = (R_true - np.amin(R_true)) / np.ptp(R_true) # Make initial set of measurements for exploration analysis. # Let's start with "opening" several points along each edge R = R_true * 0 R[R == 0] = np.nan R = gprutils.open_edge_points(R, R_true) # Get sparse and full grid indices X = gprutils.get_sparse_grid(R) X_true = gprutils.get_full_grid(R) dist_edge = [0, 0] # set to non-zero vals when edge points are not "opened" # Construct lengthscale constraints for all 3 dimensions LENGTH_CONSTR = [[float(args.LENGTH_CONSTR_MIN) for i in range(3)], [float(args.LENGTH_CONSTR_MAX) for i in range(3)]] # Run exploratory analysis uncert_idx_all, uncert_val_all, mean_all, sd_all, R_all = [], [], [], [], [] if not os.path.exists(args.SAVEDIR): os.makedirs(args.SAVEDIR) indpts_r = args.INDUCING_POINTS_RATIO for i in range(args.ESTEPS): print('Exploration step {}/{}'.format(i, args.ESTEPS)) # Make the number of inducing points dependent on the number of datapoints indpoints = len(gprutils.prepare_training_data(X, R)[0]) // indpts_r # clip to make sure it fits into GPU memory indpoints = 2000 if indpoints > 2000 else indpoints