def main(): # Load G = torch.from_numpy( np.load(os.path.join(data_folder, "F_full_surface.npy"))).float().detach() grid = Grid.load(os.path.join(data_folder, "grid.pickle")) volcano_coords = torch.from_numpy(grid.cells).float().detach() data_coords = torch.from_numpy( np.load(os.path.join(data_folder,"surface_data_coords.npy"))).float() ground_truth = torch.from_numpy(np.load(ground_truth_path)) # Dictionary between the original Niklas data and our discretization. niklas_data_inds = torch.from_numpy( np.load(os.path.join(data_folder, "niklas_data_inds_insurf.npy"))).long() niklas_coords = data_coords[niklas_data_inds].numpy() # -------------------------------- # DEFINITION OF THE EXCURSION SET. # -------------------------------- THRESHOLD_low = 700.0 excursion_inds = (ground_truth >= THRESHOLD_low).nonzero()[:, 0] # Load results. visited_inds_IVR = np.load(os.path.join( results_folder_IVR, "visited_inds.npy")) visited_inds_wIVR = np.load(os.path.join( results_folder_wIVR, "visited_inds.npy")) # Observation operators. # G_stacked_IVR = G[visited_inds_IVR, :] G_stacked_wIVR = G[visited_inds_wIVR, :] # Reload the GPs. # gpIVR = UpdatableGP.load(os.path.join(results_folder_IVR, "gp_state.pkl")) # gpwIVR = UpdatableGP.load(os.path.join(results_folder_wIVR, "gp_state.pkl")) gpINFILL = UpdatableGP.load(os.path.join(results_folder_INFILL, "gp_state.pkl")) # Produce posterior realization. for reskrig_sample_nr in range(300, 400): prior_realization = torch.from_numpy(np.load( os.path.join(reskrig_samples_folder, "prior_sample_{}.npy".format(reskrig_sample_nr)))) myReal = UpdatableRealization.bootstrap(prior_realization, G_stacked_wIVR, data_std=0.1, gp_module=gpINFILL) np.save( os.path.join(results_folder_wIVR, "Cond_Reals_Infill/conditional_real_{}.npy".format(reskrig_sample_nr)), myReal._realization.detach().cpu().numpy())
def main(): # Load G = torch.from_numpy( np.load(os.path.join(data_folder, "F_full_surface.npy"))).float().detach() grid = Grid.load(os.path.join(data_folder, "grid.pickle")) volcano_coords = torch.from_numpy(grid.cells).float().detach() data_coords = torch.from_numpy( np.load(os.path.join(data_folder,"surface_data_coords.npy"))).float() ground_truth = torch.from_numpy(np.load(ground_truth_path)) # Dictionary between the original Niklas data and our discretization. niklas_data_inds = torch.from_numpy( np.load(os.path.join(data_folder, "niklas_data_inds_insurf.npy"))).long() niklas_coords = data_coords[niklas_data_inds].numpy() # -------------------------------- # DEFINITION OF THE EXCURSION SET. # -------------------------------- THRESHOLD_low = 700.0 excursion_inds = (ground_truth >= THRESHOLD_low).nonzero()[:, 0] # Reload the GPs. gpINFILL = UpdatableGP.load(os.path.join(results_folder_INFILL, "gp_state.pkl")) # Load results. visited_inds_INFILL = np.load(os.path.join( results_folder_INFILL, "visited_inds.npy")) # GP is only saved every 10 iterations, to cut the additional data. n_data = len(gpINFILL.covariance.inversion_ops) visited_inds_INFILL = visited_inds_INFILL[:n_data] # Observation operators. G_stacked_INFILL = G[visited_inds_INFILL, :] # Produce posterior realization. for reskrig_sample_nr in range(200, 400): prior_realization = torch.from_numpy(np.load( os.path.join(reskrig_samples_folder, "prior_sample_{}.npy".format(reskrig_sample_nr)))) myReal = UpdatableRealization.bootstrap(prior_realization, G_stacked_INFILL, data_std=0.1, gp_module=gpINFILL) np.save( os.path.join(output_folder, "conditional_real_{}.npy".format(reskrig_sample_nr)), myReal._realization.detach().cpu().numpy()) """ irregular_array_to_point_cloud(volcano_coords.numpy(), myReal._realization.detach().cpu().numpy(), os.path.join(results_folder_wIVR, "Cond_Reals/conditional_real_{}.vtk".format(reskrig_sample_nr)), fill_nan_val=-20000.0) """ """
def run(self, start_ind, n_steps, data_std, output_folder=None, max_step=None, min_step=None, restart_from_save=None): """ Run the startegy. Note that this works with any criterion to choose the next point, the only requirement is that selt.get_next_ind is defined before running the strategy. Parameters ---------- start_ind n_steps: int Number of steps to run the strategy for. data_std: float Standard deviation of observation noise (homoscedactic). output_folder: string Path to folder where to save results. max_step: float If provided, then instead of only walking to neighbors at each step, can go to any cell within distance max_step. min_step: float If provided, then only consider neigbors farther away than min_step (must be used in conjunction with max_step). restart_from_save: string If a path to a folder is provided, then will restart the run from the saved data and finish it. """ if restart_from_save is None: self.current_ind = start_ind self.visited_inds = [] self.observed_data = [] self.n_steps = n_steps self.data_std = data_std self.max_step = max_step self.min_step = min_step else: self.visited_inds = list(np.load(os.path.join(output_folder, "visited_inds.npy"))) i = len(self.visited_inds) - 1 print("Restarting from step {}.".format(i)) self.observed_data = list(np.load(os.path.join(output_folder, "observed_data.npy"), allow_pickle=True)) self.gp = UpdatableGP.load(os.path.join(output_folder, "gp_state.pkl")) print(self.gp.mean.m) metadata = np.load(os.path.join(output_folder, "metadata.npy"), allow_pickle='TRUE').item() self.current_ind = metadata['next_ind_to_visit'].item() print(self.current_ind) self.max_step = metadata['max_step'] try: self.min_step = metadata['min_step'] except: pass self.data_std = metadata['data_std'] # Remaining steps to perform. self.n_steps = metadata['remaining_steps'] # Change the get neighbors routine if can jump more that one step. if self.max_step is not None: self.get_neighbors = lambda x: self.get_neighbors_bigstep(x, r=self.max_step, rmin=self.min_step) else: self.get_neighbors = lambda x: self.get_nearest_neighbors(x) for i in range(n_steps): # Observe at currennt location and update model. self.visited_inds.append(self.current_ind) y = self.data_feed(self.current_ind).detach().float() self.observed_data.append(y.detach().float().numpy()) # Make sure that the observation operator has # at least two dimensions. G = self.G[self.current_ind,:] if len(G.shape) <= 1: G = G.reshape(1, -1) self.gp.update(G, y, data_std) # Update the conditional realizations. # Since by default it is an empty list, everything works # as intended. for real in self.realizations: real.update(G, y, data_std) # Extract current coverage function (after observing at current # location). self.current_coverage = self.gp.coverage(self.lower, self.upper) # Now evaluate where to go next. next_ind = self.get_next_ind() self.current_ind = next_ind print("Go to cell {} for step {}.".format(self.current_ind, len(self.visited_inds))) # Save coverage each iteration. if output_folder is not None: self.save_state(output_folder, coverage_only=True) # Save full state every 3 iterations. if i % 3 == 0 and output_folder is not None: self.save_state(output_folder) return self.visited_inds, self.observed_data