コード例 #1
0
def design_old(problem, field, n_iterations, nodes=None):
    print('design_linearized   ', end='')
    if nodes is None:
        nodes = Nodes()
    else:
        nodes = copy.deepcopy(
            nodes)  # make a copy in order to not overwrite the node
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    this_field = field.condition_to(nodes)
    loglikelihoods = np.zeros((field.n_sample, n_iterations + 1))
    loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')
        new_index = find_optimal_node_linear_old(nodes, this_field,
                                                 problem.data)
        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)
        this_field = field.condition_to(nodes)

        this_ll = this_field.estimate_loglikelihood(problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #2
0
def design_hybrid(problem, field, n_iterations, nodes=None):
    print('design_hybrid       ', end='')
    # same as design sampled, but if 95% of weights are concentrated on one
    # subfield, then discard all other subfields and use linearized criterion
    if nodes is None:
        nodes = Nodes()
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    this_field = field.condition_to(nodes)
    loglikelihoods = np.zeros((field.n_sample, n_iterations + 1))
    loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')

        if this_field.is_almost_gpe():
            map_field = this_field.get_map_field()
            new_index = find_optimal_node_linear(nodes, map_field,
                                                 problem.data)
        else:
            new_index = find_optimal_node_sampled(nodes, this_field,
                                                  problem.data)
        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)
        this_field = field.condition_to(nodes)

        this_ll = this_field.estimate_loglikelihood(problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #3
0
def design_random(problem, field, n_iterations, nodes=None):
    print('design_random       ', end='')
    if nodes is None:
        nodes = Nodes()
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    grid = problem.grid
    n_sample = grid.shape[0]

    this_field = field.condition_to(nodes, grid)
    loglikelihoods = np.zeros((n_sample, n_iterations + 1))
    loglikelihoods[:,
                   0] = this_field.estimate_loglikelihood(grid, problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')
        # choose random point
        new_index = pick_random_node_without_duplicates(n_sample, nodes)

        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)

        this_field = field.condition_to(nodes, grid)
        this_ll = this_field.estimate_loglikelihood(grid, problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #4
0
def design_sampled(problem,
                   field,
                   n_iterations,
                   nodes=None,
                   use_dimension_trick=False):
    print('design_sampled      ', end='')
    if nodes is None:
        nodes = Nodes()
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    this_field = field.condition_to(nodes)
    loglikelihoods = np.zeros((field.n_sample, n_iterations + 1))
    loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')
        new_index = find_optimal_node_sampled(nodes, this_field, problem.data)
        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)
        this_field = field.condition_to(nodes)

        this_ll = this_field.estimate_loglikelihood(problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #5
0
def design_map(problem, fields, n_iterations, nodes=None, n_subsample=None):
    print('design_map          ', end='')
    if nodes is None:
        nodes = Nodes()
    else:
        nodes = copy.deepcopy(nodes)
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    grid = problem.grid
    n_sample = grid.shape[0]

    this_prior_field = fields.get_map_field(nodes, grid)

    this_field = this_prior_field.condition_to(nodes, grid)

    loglikelihoods = np.zeros((n_sample, n_iterations + 1))
    loglikelihoods[:,
                   0] = this_field.estimate_loglikelihood(grid, problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')

        subgrid, subindex = make_subgrid(grid, n_subsample, nodes)
        discrete_field = this_field.discretize(subgrid)

        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                new_sub_index = find_optimal_node_linear(
                    discrete_field, problem.data)
                new_index = subindex[new_sub_index]
            except Warning:
                print(subindex)
                print(nodes.idx)

        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)

        this_prior_field = fields.get_map_field(nodes, grid)
        this_field = this_prior_field.condition_to(nodes, grid)

        this_ll = this_field.estimate_loglikelihood(grid, problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #6
0
def design_linearized(problem,
                      field,
                      n_iterations,
                      nodes=None,
                      n_subsample=None):
    print('design_linearized   ', end='')
    if nodes is None:
        nodes = Nodes()
    else:
        nodes = copy.deepcopy(nodes)

    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    grid = problem.grid
    n_sample = grid.shape[0]

    this_field = field.condition_to(nodes, grid)

    loglikelihoods = np.zeros((n_sample, n_iterations + 1))
    loglikelihoods[:,
                   0] = this_field.estimate_loglikelihood(grid, problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')

        subgrid, subindex = make_subgrid(grid, n_subsample, nodes)
        discrete_field = this_field.discretize(subgrid)

        new_sub_index = find_optimal_node_linear(discrete_field, problem.data)
        # index here is global index (of the global grid)
        new_index = subindex[new_sub_index]

        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)
        this_field = field.condition_to(nodes, grid)
        #print(nodes.idx)
        this_ll = this_field.estimate_loglikelihood(grid, problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #7
0
def design_heuristic(problem, field, n_iterations, nodes=None):
    print('design_heuristic    ', end='')
    if nodes is None:
        nodes = Nodes()
    n_eval = np.arange(n_iterations + 1) + nodes.idx.size

    this_field = field.condition_to(nodes)
    loglikelihoods = np.zeros((field.n_sample, n_iterations + 1))
    loglikelihoods[:, 0] = this_field.estimate_loglikelihood(problem.data)

    for i_iteration in range(n_iterations):
        print('.', end='')
        # choose index according to heuristic
        new_index = find_heuristic_node(this_field, problem.data)

        y = problem.evaluate_model(new_index)
        nodes.append(new_index, y)

        this_field = field.condition_to(nodes)
        this_ll = this_field.estimate_loglikelihood(problem.data)
        loglikelihoods[:, i_iteration + 1] = this_ll
    print('')
    return loglikelihoods, nodes, n_eval
コード例 #8
0
def approximate_criterion_sampled(index_list, field, data, n_subdivision=51):
    criterion = np.full(field.n_sample, -np.inf)
    for this_idx in index_list:
        #y_list = field.draw_realization_at_index(this_idx, n_subdivision)
        y_list, weights = field.quadrature_at_index(this_idx, n_subdivision)
        #weights = np.ones(y_list.shape[0])
        likelihoods = []
        for this_y in y_list:
            this_node = Nodes(this_idx, this_y)
            cond_field = field.condition_to(this_node)
            this_l = cond_field.estimate_likelihood(data)
            likelihoods.append(this_l)
        likelihoods = np.array(likelihoods)

        mean_likelihood = np.average(likelihoods, axis=0,
                                     weights=weights)[np.newaxis, :]
        var_likelihood = np.average((likelihoods - mean_likelihood)**2,
                                    axis=0,
                                    weights=weights)
        criterion[this_idx] = var_likelihood.mean()
        # these three lines to the same as the following one, but with weights:
        #criterion[this_idx] = likelihoods.var(axis=0).mean()
    return criterion
コード例 #9
0
def approximate_criterion_sampled_trick(index_list,
                                        field,
                                        data,
                                        n_subdivision=51):
    # "Dimension trick" means that I can get the same accuracy as a n-dimensional
    # grid by computing n 1-dimensional integrals.
    current_likelihood = field.estimate_likelihood(data)
    criterion = np.full(field.n_sample, -np.inf)

    for this_idx in index_list:
        y_list = field.y_list(this_idx, n_subdivision)
        likelihoods = []
        for this_y in y_list:
            cond_field = field.condition_to(Nodes(this_idx, this_y))

            this_ls = cond_field.estimate_componentwise_likelihood(data)
            likelihoods.append(this_ls)
        likelihoods = np.array(likelihoods)
        c_total = (likelihoods**2).mean(axis=0)
        c_var = np.prod(c_total, axis=1) - current_likelihood**2
        c_var = c_var.clip(min=0)
        criterion[this_idx] = np.sum(c_var) / field.n_sample
    return criterion
コード例 #10
0
def select_model(problem,
                 fields,
                 n_iters,
                 nodes=None,
                 crit='sigma',
                 n_subsample=None,
                 n_realizations=1000,
                 starting_phase=0):
    print('select_model  ', end='')
    n_models = problem.n_models
    n_eval = np.arange(n_iters + 1)  # todo: add nodes.idx.size
    grids = problem.grids

    # create empty nodes if necessary
    if nodes is None:
        nodes = [Nodes() for m in range(n_models)]
    else:
        for i_model in range(n_models):
            if nodes[i_model] is None:
                nodes[i_model] = Nodes()
            else:
                nodes[i_model] = copy.deepcopy(nodes[i_model])

    # condition prior fields to given nodes and compute iteration-zero-bme
    conditioned_fields = []
    lbmes = np.zeros((n_models, n_iters + 1))
    for i in range(n_models):
        if (isinstance(fields[i], AbstractMix)):
            this_prior_field = fields[i].get_map_field(nodes[i], grids[i])
            this_field = this_prior_field.condition_to(nodes[i], grids[i])
        else:
            this_field = fields[i].condition_to(nodes[i], grids[i])

        conditioned_fields.append(this_field)
        lbmes[i, 0] = this_field.estimate_lbme(grids[i], problem.data)

    model_idx = []
    criteria = np.zeros((n_models, n_iters))
    lbme_realizations = np.zeros((n_models, n_realizations))
    for i in range(n_iters):
        # compute criterion (for finding next node)
        next_node_idx = []
        crit_max = []
        for i_model, this_field in enumerate(conditioned_fields):
            subgrid, subindex = make_subgrid(grids[i_model], n_subsample,
                                             nodes[i_model])
            discrete_field = this_field.discretize(subgrid)

            realizations = discrete_field.draw_many_realizations(
                n_realizations)
            lbme_realizations[i_model, :] = compute_lbme_over_realizations(
                realizations, grids[i_model], problem.data)

            this_criterion = compute_criterion_linear(discrete_field,
                                                      problem.data)
            new_index = subindex[np.argmax(this_criterion)]
            next_node_idx.append(new_index)
            crit_max.append(np.max(this_criterion))

        # compute criterion (for allocation)
        if (crit == 'kldiv'):
            criteria[:, i] = compute_kl_distances(lbme_realizations, lbmes[:,
                                                                           i])
        elif (crit == 'sigma'):
            criteria[:, i] = compute_sigmas(lbme_realizations)
        elif (crit == 'alt'):
            criteria[i % n_models, i] = 1
        elif (crit == 'refine'):  # use refinement criterion of seq. des.
            criteria[:, i] = np.array(crit_max)
        else:
            raise NotImplementedError(
                'Unknown criterion given. please use sigma, alt or refine')

        # apply critaria, but only after starting phase
        # in starting phase, use even alternation
        if i >= n_models * starting_phase:
            i_max = criteria[:, i].argmax()
        else:
            i_max = i % n_models
            print('_', end='')

        print(i_max, end='')
        new_index = next_node_idx[i_max]
        model_idx.append(i_max)

        # advance respective model
        y = problem.evaluate_model(i_max, new_index)
        nodes[i_max].append(new_index, y)

        # map-estimate, if necessary
        if (isinstance(fields[i_max], AbstractMix)):
            this_prior_field = fields[i_max].get_map_field(
                nodes[i_max], grids[i_max])
            this_field = this_prior_field.condition_to(nodes[i_max],
                                                       grids[i_max])
        else:
            this_field = fields[i_max].condition_to(nodes[i_max], grids[i_max])
        conditioned_fields[i_max] = this_field

        lbmes[:, i + 1] = lbmes[:, i]
        lbmes[i_max, i + 1] = this_field.estimate_lbme(grids[i_max],
                                                       problem.data)

    print('')

    return lbmes, np.array(model_idx), n_eval, criteria
コード例 #11
0
def select_model_spacefilling(problem,
                              fields,
                              n_iters,
                              nodes=None,
                              n_subsample=None):
    print('space filling ', end='')
    n_models = problem.n_models
    n_eval = np.arange(n_iters + 1)  # todo: add nodes.idx.size
    grids = problem.grids

    # create empty nodes if necessary
    if nodes is None:
        nodes = [Nodes() for m in range(n_models)]
    else:
        for i_model in range(n_models):
            if nodes[i_model] is None:
                nodes[i_model] = Nodes()
            else:
                nodes[i_model] = copy.deepcopy(nodes[i_model])

    # condition prior fields to given nodes and compute iteration-zero-bme
    conditioned_fields = []
    lbmes = np.zeros((n_models, n_iters + 1))
    for i in range(n_models):
        if (isinstance(fields[i], AbstractMix)):
            this_prior_field = fields[i].get_map_field(nodes[i], grids[i])
            this_field = this_prior_field.condition_to(nodes[i], grids[i])
        else:
            this_field = fields[i].condition_to(nodes[i], grids[i])

        conditioned_fields.append(this_field)
        lbmes[i, 0] = this_field.estimate_lbme(grids[i], problem.data)

    model_idx = []
    for i in range(n_iters):
        i_max = i % n_models
        model_idx.append(i_max)
        print(i_max, end='')

        this_field = conditioned_fields[i_max]
        # choose variance-minimizing point
        subgrid, subindex = make_subgrid(grids[i_max], n_subsample,
                                         nodes[i_max])
        discrete_field = this_field.discretize(subgrid)

        diag_c = np.diag(discrete_field.c)
        criterion = np.zeros_like(diag_c)
        mask = (diag_c > 0)
        criterion[mask] = np.sum(discrete_field.c**2, axis=0)[mask] / np.diag(
            discrete_field.c)[mask]
        new_index = subindex[np.argmax(criterion)]

        y = problem.evaluate_model(i_max, new_index)
        nodes[i_max].append(new_index, y)

        # map-estimate, if necessary
        if (isinstance(fields[i_max], AbstractMix)):
            this_prior_field = fields[i_max].get_map_field(
                nodes[i_max], grids[i_max])
            this_field = this_prior_field.condition_to(nodes[i_max],
                                                       grids[i_max])
        else:
            this_field = fields[i_max].condition_to(nodes[i_max], grids[i_max])
        conditioned_fields[i_max] = this_field

        lbmes[:, i + 1] = lbmes[:, i]
        lbmes[i_max, i + 1] = this_field.estimate_lbme(grids[i_max],
                                                       problem.data)

    print('')

    return lbmes, np.array(model_idx), n_eval
コード例 #12
0
def select_model_old(subproblems,
                     problems,
                     fields,
                     n_iters,
                     nodes=None,
                     params=None,
                     crit='sigma',
                     n_realizations=1000):
    n_models = len(subproblems)
    n_eval = np.arange(n_iters + 1)
    n_subsample = subproblems[0].grid.shape[0]

    if nodes is None:
        nodes = np.array([Nodes() for m in range(n_models)])
    else:
        for nodes_idx in range(n_models):
            if nodes[nodes_idx] is None:
                nodes[nodes_idx] = Nodes()

    conditioned_fields = []
    lbmes = np.zeros((n_models, n_iters + 1))
    if (isinstance(fields[0], AbstractMix)):
        print('select_model_' + crit + '_map   ', end='')
        for m in range(n_models):
            map_field, map_params = get_field_and_params(fields[m], nodes[m])
            conditioned_fields.append(map_field.condition_to(nodes[m]))
            lbmes[m, 0] = estimate_lbme(subproblems[m], problems[m],
                                        map_params[0], map_params[1], nodes[m])
    else:
        print('select_model_' + crit + '   ', end='')
        for m in range(n_models):
            conditioned_fields.append(fields[m].condition_to(nodes[m]))
            lbmes[m, 0] = estimate_lbme(subproblems[m], problems[m],
                                        params[m][0], params[m][1], nodes[m])

    realizations = np.array([
        conditioned_fields[m].draw_many_realizations(n_realizations)
        for m in range(n_models)
    ])

    initial_ll_estimates = np.zeros((n_models, n_subsample))
    design_lbmes = np.zeros((n_models, n_iters + 1))
    lbme_realizations = np.zeros((n_models, n_realizations))
    for m in range(n_models):
        initial_ll_estimates[
            m, :] = conditioned_fields[m].estimate_loglikelihood(
                subproblems[m].data)
        #initial_l_estimates = conditioned_fields[m].estimate_likelihood(subproblems[m].data)
        design_lbmes[m, 0] = compute_lbme_from_ll(initial_ll_estimates[m])
        lbme_realizations[m, :] = compute_lbme_over_realizations(
            subproblems[m], realizations[m])
    #design_bmes[:,0] = np.mean(initial_l_estimates, axis=1)

    i_max = 0
    model_idx = np.zeros(n_iters + 1)
    criteria = np.zeros((n_models, n_iters))
    for i in range(n_iters):
        print('.', end='')
        if (i > 0):
            realizations[i_max] = conditioned_fields[
                i_max].draw_many_realizations(n_realizations)
            lbme_realizations[i_max] = compute_lbme_over_realizations(
                subproblems[i_max], realizations[i_max])

        if (crit == 'kldiv'):
            criteria[:, i] = compute_kl_distances(lbme_realizations,
                                                  design_lbmes[:, i])
            i_max = criteria[:, i].argmax()
        elif (crit == 'sigma'):
            criteria[:, i] = compute_sigmas(lbme_realizations)
            i_max = criteria[:, i].argmax()
        elif (crit == 'alt'):
            i_max = i % n_models

        model_idx[i + 1] = i_max + 1

        if (isinstance(fields[0], AbstractMix)):
            ll_estimate, this_params = advance_one_model_map(
                conditioned_fields, i_max, subproblems, fields, nodes)
        else:
            ll_estimate = advance_one_model(conditioned_fields, i_max,
                                            subproblems, fields, nodes)
            this_params = params[i_max]

        design_lbme_estimate = compute_lbme_from_ll(ll_estimate)
        design_lbmes = update_lbmes(design_lbmes, design_lbme_estimate, i_max,
                                    i)
        lbme_estimate = estimate_lbme(subproblems[i_max], problems[i_max],
                                      this_params[0], this_params[1],
                                      nodes[i_max])
        lbmes = update_lbmes(lbmes, lbme_estimate, i_max, i)

    print('')
    return lbmes, model_idx, n_eval