def design_random(problem, field, n_iterations, nodes=None): print('design_random ', end='') if nodes is None: nodes = Nodes() n_eval = np.arange(n_iterations + 1) + nodes.idx.size grid = problem.grid n_sample = grid.shape[0] this_field = field.condition_to(nodes, grid) loglikelihoods = np.zeros((n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(grid, problem.data) for i_iteration in range(n_iterations): print('.', end='') # choose random point new_index = pick_random_node_without_duplicates(n_sample, nodes) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes, grid) this_ll = this_field.estimate_loglikelihood(grid, problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_old(problem, field, n_iterations, nodes=None): print('design_linearized ', end='') if nodes is None: nodes = Nodes() else: nodes = copy.deepcopy( nodes) # make a copy in order to not overwrite the node n_eval = np.arange(n_iterations + 1) + nodes.idx.size this_field = field.condition_to(nodes) loglikelihoods = np.zeros((field.n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data) for i_iteration in range(n_iterations): print('.', end='') new_index = find_optimal_node_linear_old(nodes, this_field, problem.data) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes) this_ll = this_field.estimate_loglikelihood(problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_hybrid(problem, field, n_iterations, nodes=None): print('design_hybrid ', end='') # same as design sampled, but if 95% of weights are concentrated on one # subfield, then discard all other subfields and use linearized criterion if nodes is None: nodes = Nodes() n_eval = np.arange(n_iterations + 1) + nodes.idx.size this_field = field.condition_to(nodes) loglikelihoods = np.zeros((field.n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data) for i_iteration in range(n_iterations): print('.', end='') if this_field.is_almost_gpe(): map_field = this_field.get_map_field() new_index = find_optimal_node_linear(nodes, map_field, problem.data) else: new_index = find_optimal_node_sampled(nodes, this_field, problem.data) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes) this_ll = this_field.estimate_loglikelihood(problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_sampled(problem, field, n_iterations, nodes=None, use_dimension_trick=False): print('design_sampled ', end='') if nodes is None: nodes = Nodes() n_eval = np.arange(n_iterations + 1) + nodes.idx.size this_field = field.condition_to(nodes) loglikelihoods = np.zeros((field.n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data) for i_iteration in range(n_iterations): print('.', end='') new_index = find_optimal_node_sampled(nodes, this_field, problem.data) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes) this_ll = this_field.estimate_loglikelihood(problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_map(problem, fields, n_iterations, nodes=None, n_subsample=None): print('design_map ', end='') if nodes is None: nodes = Nodes() else: nodes = copy.deepcopy(nodes) n_eval = np.arange(n_iterations + 1) + nodes.idx.size grid = problem.grid n_sample = grid.shape[0] this_prior_field = fields.get_map_field(nodes, grid) this_field = this_prior_field.condition_to(nodes, grid) loglikelihoods = np.zeros((n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(grid, problem.data) for i_iteration in range(n_iterations): print('.', end='') subgrid, subindex = make_subgrid(grid, n_subsample, nodes) discrete_field = this_field.discretize(subgrid) with warnings.catch_warnings(): warnings.filterwarnings('error') try: new_sub_index = find_optimal_node_linear( discrete_field, problem.data) new_index = subindex[new_sub_index] except Warning: print(subindex) print(nodes.idx) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_prior_field = fields.get_map_field(nodes, grid) this_field = this_prior_field.condition_to(nodes, grid) this_ll = this_field.estimate_loglikelihood(grid, problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_linearized(problem, field, n_iterations, nodes=None, n_subsample=None): print('design_linearized ', end='') if nodes is None: nodes = Nodes() else: nodes = copy.deepcopy(nodes) n_eval = np.arange(n_iterations + 1) + nodes.idx.size grid = problem.grid n_sample = grid.shape[0] this_field = field.condition_to(nodes, grid) loglikelihoods = np.zeros((n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(grid, problem.data) for i_iteration in range(n_iterations): print('.', end='') subgrid, subindex = make_subgrid(grid, n_subsample, nodes) discrete_field = this_field.discretize(subgrid) new_sub_index = find_optimal_node_linear(discrete_field, problem.data) # index here is global index (of the global grid) new_index = subindex[new_sub_index] y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes, grid) #print(nodes.idx) this_ll = this_field.estimate_loglikelihood(grid, problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval
def design_heuristic(problem, field, n_iterations, nodes=None): print('design_heuristic ', end='') if nodes is None: nodes = Nodes() n_eval = np.arange(n_iterations + 1) + nodes.idx.size this_field = field.condition_to(nodes) loglikelihoods = np.zeros((field.n_sample, n_iterations + 1)) loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data) for i_iteration in range(n_iterations): print('.', end='') # choose index according to heuristic new_index = find_heuristic_node(this_field, problem.data) y = problem.evaluate_model(new_index) nodes.append(new_index, y) this_field = field.condition_to(nodes) this_ll = this_field.estimate_loglikelihood(problem.data) loglikelihoods[:, i_iteration + 1] = this_ll print('') return loglikelihoods, nodes, n_eval