Exemplo n.º 1
0
def inputs():
    out = {}
    disc_fac = 0.9999
    num_states = 300
    num_buses = 200
    num_periods = 1000
    scale = 0.001
    init_dict = {
        "groups": "group_4",
        "binsize": 5000,
        "model_specifications": {
            "discount_factor": disc_fac,
            "number_states": num_states,
            "maint_cost_func": "linear",
            "cost_scale": scale,
        },
        "optimizer": {"approach": "NFXP", "algorithm": "scipy_L-BFGS-B"},
        "simulation": {
            "discount_factor": disc_fac,
            "seed": 123,
            "buses": num_buses,
            "periods": num_periods,
        },
    }
    out["trans_base"] = np.loadtxt(TEST_FOLDER + "repl_test_trans.txt")
    out["params_base"] = np.loadtxt(TEST_FOLDER + "repl_params_linear.txt")
    trans_mat = create_transition_matrix(num_states, out["trans_base"])
    costs = calc_obs_costs(num_states, lin_cost, out["params_base"], scale)
    ev_known = calc_fixp(trans_mat, costs, disc_fac)[0]
    df = simulate(init_dict["simulation"], ev_known, costs, trans_mat)
    result_trans, result_fixp = estimate(init_dict, df)
    out["trans_est"] = result_trans["x"]
    out["params_est"] = result_fixp["x"]
    out["status"] = result_fixp["status"]
    return out
Exemplo n.º 2
0
def test_regression_simulation(inputs):
    init_dict = random_init(inputs)

    # Draw parameter
    param_1 = np.random.normal(17.5, 2)
    param_2 = np.random.normal(21, 2)
    param_3 = np.random.normal(-2, 0.1)
    param_4 = np.random.normal(0.2, 0.1)
    params = np.array([param_1, param_2, param_3, param_4])

    disc_fac = init_dict["simulation"]["discount_factor"]
    probs = np.array(init_dict["simulation"]["known_trans"])
    num_states = 800

    trans_mat = create_transition_matrix(num_states, probs)

    costs = calc_obs_costs(num_states, cubic_costs, params, 0.00001)

    ev = calc_fixp(trans_mat, costs, disc_fac)[0]

    df = simulate(init_dict["simulation"], ev, costs, trans_mat)

    v_disc = discount_utility(df, disc_fac)

    assert_allclose(v_disc / ev[0], 1, rtol=1e-02)
Exemplo n.º 3
0
def simulate_data(
    seed,
    disc_fac,
    num_buses,
    num_periods,
    num_states,
    cost_params,
    trans_params,
    cost_func,
    scale,
):
    """
    simulates a single data set with a given specification using the ``simulate``
    function of ruspy.

    Parameters
    ----------
    seed : int
        seed for the simulation function.
    disc_fac : float
        the discount factor in the Rust Model.
    num_buses : int
        the amount of buses that should be simulated.
    num_periods : int
        The number of periods that should be simulated for each bus.
    num_states : int
        the number of states for the which the mileage state is discretized.
    cost_params : np.array
        the cost parameters for which the data is simulated.
    trans_params : np.array
        the cost parameters for which the data is simulated..
    cost_func : callable
        the cost function that underlies the data generating process.
    scale : float
        the scale of the cost function.

    Returns
    -------
    df : pd.DataFrame
        Simulated data set for the given data generating process.

    """
    init_dict = {
        "simulation": {
            "discount_factor": disc_fac,
            "periods": num_periods,
            "seed": seed,
            "buses": num_buses,
        },
    }

    costs = calc_obs_costs(num_states, cost_func, cost_params, scale)
    trans_mat = create_transition_matrix(num_states, trans_params)
    ev = calc_fixp(trans_mat, costs, disc_fac)[0]
    df = simulate(init_dict["simulation"], ev, costs, trans_mat)

    return df
Exemplo n.º 4
0
def loglike_cost_params_individual(
    params,
    maint_func,
    maint_func_dev,
    num_states,
    trans_mat,
    state_mat,
    decision_mat,
    disc_fac,
    scale,
    alg_details,
):
    """
    This is the individual logliklihood function for the estimation of the cost parameters
    needed for the BHHH optimizer.


    Parameters
    ----------
    params : pandas.DataFrame
        see :ref:`params`
    maint_func: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    disc_fac : numpy.float
        see :ref:`disc_fac`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    decision_mat : numpy.array
        see :ref:`decision_mat`

    Returns
    -------
    log_like : numpy.array
        A num_buses times num_periods dimensional array containing the negative
        log-likelihood contributions of the individuals.


    """
    params = params["value"].to_numpy()
    costs = calc_obs_costs(num_states, maint_func, params, scale)

    ev, contr_step_count, newt_kant_step_count = get_ev(
        params, trans_mat, costs, disc_fac, alg_details)
    config.total_contr_count += contr_step_count
    config.total_newt_kant_count += newt_kant_step_count

    p_choice = choice_prob_gumbel(ev, costs, disc_fac)
    log_like = like_hood_data_individual(np.log(p_choice), decision_mat,
                                         state_mat)
    return log_like
Exemplo n.º 5
0
def inputs_sim(inputs):
    out = {}
    out["init_dict"] = init_dict = random_init(inputs)["simulation"]

    # Draw parameter
    param1 = np.random.normal(10.0, 2)
    param2 = np.random.normal(2.3, 0.5)
    params = np.array([param1, param2])

    disc_fac = init_dict["discount_factor"]
    probs = np.array(init_dict["known_trans"])
    num_states = 300

    out["trans_mat"] = trans_mat = create_transition_matrix(num_states, probs)
    out["costs"] = costs = calc_obs_costs(num_states, lin_cost, params, 0.001)
    out["ev"] = ev = calc_fixp(trans_mat, costs, disc_fac)[0]
    out["df"] = simulate(
        init_dict,
        ev,
        costs,
        trans_mat,
    )
    return out
Exemplo n.º 6
0
def mpec_constraint_derivative(
    maint_func,
    maint_func_dev,
    num_states,
    num_params,
    disc_fac,
    scale,
    trans_mat,
    mpec_params,
):
    """
    Calculating the analytical Jacobian of the MPEC constraint.

    Parameters
    ----------
    maint_func: func
        see :ref:`maint_func`
    maint_func_dev: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    num_params : int
        Length of cost parameter vector.
    disc_fac : numpy.float
        see :ref:`disc_fac`
    scale : numpy.float
        see :ref:`scale`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    mpec_params : numpy.array
        see :ref:`mpec_params`

    Returns
    -------
    jacobian : numpy.array
        Jacobian of the MPEC constraint.

    """
    # Calculate a vector representing 1 divided by the right hand side of the MPEC
    # constraint
    ev = mpec_params[0:num_states]
    obs_costs = calc_obs_costs(num_states, maint_func,
                               mpec_params[num_states:], scale)

    maint_value = disc_fac * ev - obs_costs[:, 0]
    repl_value = disc_fac * ev[0] - obs_costs[0, 1] - obs_costs[0, 0]

    ev_max = np.max(np.array(maint_value, repl_value))

    exp_centered_maint_value = np.exp(maint_value - ev_max)
    exp_centered_repl_value = np.exp(repl_value - ev_max)
    log_sum_denom = 1 / (exp_centered_maint_value + exp_centered_repl_value)

    jacobian = np.zeros((num_states, num_states + num_params))

    # Calculate derivative to EV(0)
    jacobian[:, 0] = np.dot(disc_fac * exp_centered_repl_value * trans_mat,
                            log_sum_denom)
    jacobian[0, 0] = (jacobian[0, 0] +
                      (1 - log_sum_denom[0] * exp_centered_repl_value) *
                      disc_fac * trans_mat[0, 0])
    # Calculate derivative to EV(1) until EV(num_states)
    jacobian[:, 1:num_states] = (trans_mat[:, 1:] * log_sum_denom[1:] *
                                 disc_fac * exp_centered_maint_value[1:])
    # Calculate derivative to RC
    jacobian[:, num_states] = np.dot(trans_mat,
                                     -exp_centered_repl_value * log_sum_denom)
    # Calculate derivative to maintenance cost parameters
    log_sum_denom_temp = np.reshape(log_sum_denom, (num_states, 1))
    maint_cost_difference_dev = np.reshape(
        (-exp_centered_maint_value * maint_func_dev(num_states, scale).T).T -
        exp_centered_repl_value * maint_func_dev(num_states, scale)[0],
        (num_states, num_params - 1),
    )

    jacobian[:, num_states + 1:] = np.reshape(
        np.dot(trans_mat, log_sum_denom_temp * maint_cost_difference_dev),
        (num_states, num_params - 1),
    )
    # Calculate the Jacobian of EV
    ev_jacobian = np.hstack(
        (np.eye(num_states), np.zeros((num_states, num_params))))

    jacobian = jacobian - ev_jacobian

    return jacobian
Exemplo n.º 7
0
def mpec_loglike_cost_params_derivative(
    maint_func,
    maint_func_dev,
    num_states,
    num_params,
    disc_fac,
    scale,
    decision_mat,
    state_mat,
    mpec_params,
):
    """
    Computing the analytical gradient of the objective function for MPEC.

    Parameters
    ----------
    maint_func: func
        see :ref:`maint_func`
    maint_func_dev: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    num_params : int
        Length of cost parameter vector.
    disc_fac : numpy.float
        see :ref:`disc_fac`
    scale : numpy.float
        see :ref:`scale`
    decision_mat : numpy.array
        see :ref:`decision_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    mpec_params : numpy.array
        see :ref:`mpec_params`

    Returns
    -------
    gradient : numpy.array
        Vector that holds the derivative of the negative log likelihood function
        to the parameters.

    """
    # Calculate choice probabilities
    costs = calc_obs_costs(num_states, maint_func, mpec_params[num_states:],
                           scale)
    p_choice = choice_prob_gumbel(mpec_params[0:num_states], costs, disc_fac)

    # calculate the derivative based on the model
    derivative_both = mpec_loglike_cost_params_derivative_model(
        num_states, num_params, disc_fac, scale, maint_func_dev, p_choice)

    # Calculate actual gradient depending on the given data
    # get decision matrix into the needed shape
    decision_mat_temp = np.vstack((
        np.tile(decision_mat[0], (num_states + num_params, 1)),
        np.tile(decision_mat[1], (num_states + num_params, 1)),
    ))

    # calculate the gradient
    gradient_temp = -np.sum(
        decision_mat_temp * np.dot(derivative_both, state_mat), axis=1)
    # bring the calculated gradient into the correct shape
    gradient = np.reshape(gradient_temp, (num_states + num_params, 2),
                          order="F").sum(axis=1)

    return gradient
Exemplo n.º 8
0
def mpec_loglike_cost_params(
    maint_func,
    maint_func_dev,
    num_states,
    num_params,
    state_mat,
    decision_mat,
    disc_fac,
    scale,
    gradient,
    mpec_params,
    grad,
):
    """
    Calculate the negative partial log likelihood for MPEC depending on cost parameters
    as well as the discretized expected values.

    Parameters
    ----------
    maint_func: func
        see :ref:`maint_func`
    maint_func_dev: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    num_params : int
        Length of cost parameter vector.
    state_mat : numpy.array
        see :ref:`state_mat`
    decision_mat : numpy.array
        see :ref:`decision_mat`
    disc_fac : numpy.float
        see :ref:`disc_fac`
    scale : numpy.float
        see :ref:`scale`
    gradient : str
        Indicates whether analytical or numerical gradient should be used.
    mpec_params : numpy.array
        see :ref:`mpec_params`
    grad : numpy.array, optional
        The gradient of the function. The default is np.array([]).

    Returns
    -------
    log_like: float
        Contains the negative partial log likelihood for the given parameters.

    """
    if grad.size > 0:
        if gradient == "No":
            # numerical gradient
            partial_loglike_mpec = partial(
                mpec_loglike_cost_params,
                maint_func,
                maint_func_dev,
                num_states,
                num_params,
                state_mat,
                decision_mat,
                disc_fac,
                scale,
                gradient,
                grad=np.array([]),
            )
            grad[:] = approx_derivative(partial_loglike_mpec,
                                        mpec_params,
                                        method="2-point")
        else:
            # analytical gradient
            grad[:] = mpec_loglike_cost_params_derivative(
                maint_func,
                maint_func_dev,
                num_states,
                num_params,
                disc_fac,
                scale,
                decision_mat,
                state_mat,
                mpec_params,
            )

    costs = calc_obs_costs(num_states, maint_func, mpec_params[num_states:],
                           scale)
    p_choice = choice_prob_gumbel(mpec_params[0:num_states], costs, disc_fac)
    log_like = like_hood_data(np.log(p_choice), decision_mat, state_mat)
    return float(log_like)
Exemplo n.º 9
0
def mpec_constraint(
    maint_func,
    maint_func_dev,
    num_states,
    num_params,
    trans_mat,
    disc_fac,
    scale,
    gradient,
    result,
    mpec_params,
    grad,
):
    """
    Calulate the constraint of MPEC.

    Parameters
    ----------
    maint_func: func
        see :ref:`maint_func`
    maint_func_dev: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    num_params : int
        The number of parameters to be estimated.
    trans_mat : numpy.array
        see :ref:`trans_mat`
    disc_fac : numpy.float
        see :ref:`disc_fac`
    scale : numpy.float
        see :ref:`scale`
    gradient : str
        Indicates whether analytical or numerical gradient should be used.
    result : numpy.array
        Contains the left hand side of the constraint minus the right hand side
        for the nlopt solver. This should be zero for the constraint to hold.
    mpec_params : numpy.array
        see :ref:`mpec_params`
    grad : numpy.array, optional
        The gradient of the function. The default is np.array([]).

    Returns
    -------
    None.

    """
    if grad.size > 0:
        if gradient == "No":
            # numerical jacobian
            partial_constr_mpec_deriv = wrap_nlopt_constraint(
                mpec_constraint,
                args=(
                    maint_func,
                    maint_func_dev,
                    num_states,
                    num_params,
                    trans_mat,
                    disc_fac,
                    scale,
                    gradient,
                ),
            )
            grad[:, :] = approx_derivative(partial_constr_mpec_deriv,
                                           mpec_params,
                                           method="2-point")
        else:
            # analytical jacobian
            grad[:, :] = mpec_constraint_derivative(
                maint_func,
                maint_func_dev,
                num_states,
                num_params,
                disc_fac,
                scale,
                trans_mat,
                mpec_params,
            )

    ev = mpec_params[0:num_states]
    obs_costs = calc_obs_costs(num_states, maint_func,
                               mpec_params[num_states:], scale)

    maint_value = disc_fac * ev - obs_costs[:, 0]
    repl_value = disc_fac * ev[0] - obs_costs[0, 1] - obs_costs[0, 0]

    # Select the minimal absolute value to rescale the value vector for the
    # exponential function.
    ev_max = np.max(np.array(maint_value, repl_value))

    log_sum = ev_max + np.log(
        np.exp(maint_value - ev_max) + np.exp(repl_value - ev_max))

    ev_new = np.dot(trans_mat, log_sum)
    if result.size > 0:
        result[:] = ev_new - ev
    return ev_new - ev
Exemplo n.º 10
0
def test_cost_func(inputs, outputs):
    assert_array_almost_equal(
        calc_obs_costs(inputs["nstates"], inputs["cost_fct"], inputs["params"],
                       0.001),
        outputs["costs"],
    )
Exemplo n.º 11
0
def estimate_mpec_ipopt(
    disc_fac,
    num_states,
    maint_func,
    maint_func_dev,
    num_params,
    scale,
    decision_mat,
    trans_mat,
    state_mat,
    optimizer_options,
    transition_results,
):
    """
    Estimation function of Mathematical Programming with Equilibrium Constraints
    (MPEC) in ruspy.


    Parameters
    ----------
    disc_fac : numpy.float
        see :ref:`disc_fac`
    num_states : int
        The size of the state space.
    maint_func: func
        see :ref: `maint_func`
    maint_func_dev: func
        see :ref: `maint_func_dev`
    num_params : int
        The number of parameters to be estimated.
    scale : numpy.float
        see :ref:`scale`
    decision_mat : numpy.array
        see :ref:`decision_mat`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    optimizer_options : dict
        The options chosen for the optimization algorithm in the initialization
        dictionairy.
    transition_results : dict
        The results from ``estimate_transitions``.

    Returns
    -------
    transition_results : dictionary
        see :ref:`result_trans`
    mpec_cost_parameters : dictionary
        see :ref:`result_costs`


    """

    if not optional_package_is_available:
        raise NotImplementedError(
            """To use this you need to install cyipopt. If you are mac or Linux user
            the command is $ conda install -c conda-forge cyipopt. If you use
            Windows you have to install from source. A description can be found
            here: https://github.com/matthias-k/cyipopt""")

    del optimizer_options["algorithm"]
    gradient = optimizer_options.pop("derivative")
    params = optimizer_options.pop("params")
    lower_bounds = optimizer_options.pop("set_lower_bounds")
    upper_bounds = optimizer_options.pop("set_upper_bounds")
    bounds = np.vstack((lower_bounds, upper_bounds)).T
    bounds = list(map(tuple, bounds))
    if "get_expected_values" in optimizer_options:
        get_expected_values = optimizer_options.pop("get_expected_values")
    else:
        get_expected_values = "No"

    n_evaluations, neg_criterion = wrap_ipopt_likelihood(
        mpec_loglike_cost_params,
        args=(
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            state_mat,
            decision_mat,
            disc_fac,
            scale,
        ),
    )

    constraint_func = wrap_ipopt_constraint(
        mpec_constraint,
        args=(
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            trans_mat,
            disc_fac,
            scale,
        ),
    )

    if gradient == "No":

        def approx_gradient(params):
            fun = approx_derivative(neg_criterion, params, method="2-point")
            return fun

        gradient_func = approx_gradient

        def approx_jacobian(params):
            fun = approx_derivative(constraint_func, params, method="2-point")
            return fun

        jacobian_func = approx_jacobian
    else:
        gradient_func = partial(
            mpec_loglike_cost_params_derivative,
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            disc_fac,
            scale,
            decision_mat,
            state_mat,
        )
        jacobian_func = partial(
            mpec_constraint_derivative,
            maint_func,
            maint_func_dev,
            num_states,
            num_params,
            disc_fac,
            scale,
            trans_mat,
        )

    constraints = {
        "type": "eq",
        "fun": constraint_func,
        "jac": jacobian_func,
    }

    tic = time.perf_counter()
    if get_expected_values == "Yes":
        obs_costs = calc_obs_costs(num_states, maint_func, params, scale)
        ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0]
        params = np.concatenate((ev, params))
    results_ipopt = minimize_ipopt(
        neg_criterion,
        params,
        bounds=bounds,
        jac=gradient_func,
        constraints=constraints,
        **optimizer_options,
    )
    toc = time.perf_counter()
    timing = toc - tic

    mpec_cost_parameters = {}
    mpec_cost_parameters["x"] = results_ipopt["x"]
    mpec_cost_parameters["fun"] = results_ipopt["fun"]
    if results_ipopt["success"] is True:
        mpec_cost_parameters["status"] = True
    else:
        mpec_cost_parameters["status"] = False
    mpec_cost_parameters["n_iterations"] = results_ipopt["nit"]
    mpec_cost_parameters["n_evaluations"] = results_ipopt["nfev"]
    mpec_cost_parameters["time"] = timing
    mpec_cost_parameters["n_evaluations_total"] = n_evaluations[0]

    return transition_results, mpec_cost_parameters
Exemplo n.º 12
0
def estimate_mpec_nlopt(
    disc_fac,
    num_states,
    maint_func,
    maint_func_dev,
    num_params,
    scale,
    decision_mat,
    trans_mat,
    state_mat,
    optimizer_options,
    transition_results,
):
    """
    Estimation function of Mathematical Programming with Equilibrium Constraints
    (MPEC) in ruspy.


    Parameters
    ----------
    disc_fac : numpy.float
        see :ref:`disc_fac`
    num_states : int
        The size of the state space.
    maint_func: func
        see :ref: `maint_func`
    maint_func_dev: func
        see :ref: `maint_func_dev`
    num_params : int
        The number of parameters to be estimated.
    scale : numpy.float
        see :ref:`scale`
    decision_mat : numpy.array
        see :ref:`decision_mat`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    optimizer_options : dict
        The options chosen for the optimization algorithm in the initialization
        dictionairy.
    transition_results : dict
        The results from ``estimate_transitions``.

    Returns
    -------
    transition_results : dictionary
        see :ref:`result_trans`
    mpec_cost_parameters : dictionary
        see :ref:`result_costs`


    """

    gradient = optimizer_options.pop("derivative")

    # Calculate partial functions needed for nlopt
    n_evaluations, partial_loglike_mpec = wrap_mpec_loglike(args=(
        maint_func,
        maint_func_dev,
        num_states,
        num_params,
        state_mat,
        decision_mat,
        disc_fac,
        scale,
        gradient,
    ))

    partial_constr_mpec = partial(
        mpec_constraint,
        maint_func,
        maint_func_dev,
        num_states,
        num_params,
        trans_mat,
        disc_fac,
        scale,
        gradient,
    )

    # set up nlopt
    opt = nlopt.opt(eval("nlopt." + optimizer_options.pop("algorithm")),
                    num_states + num_params)
    opt.set_min_objective(partial_loglike_mpec)
    opt.add_equality_mconstraint(
        partial_constr_mpec,
        np.full(num_states, 1e-6),
    )

    # supply user choices
    params = optimizer_options.pop("params")
    if "get_expected_values" in optimizer_options:
        get_expected_values = optimizer_options.pop("get_expected_values")
    else:
        get_expected_values = "No"

    if "set_local_optimizer" in optimizer_options:
        sub = nlopt.opt(  # noqa: F841
            eval("nlopt." + optimizer_options.pop("set_local_optimizer")),
            num_states + num_params,
        )
        exec("opt.set_local_optimizer(sub)")
        for key, _value in optimizer_options.items():
            exec("sub." + key + "(_value)")

    for key, _value in optimizer_options.items():
        exec("opt." + key + "(_value)")

    # Solving nlopt
    tic = time.perf_counter()
    if get_expected_values == "Yes":
        obs_costs = calc_obs_costs(num_states, maint_func, params, scale)
        ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0]
        params = np.concatenate((ev, params))
    result = opt.optimize(params)
    toc = time.perf_counter()
    timing = toc - tic

    mpec_cost_parameters = {}
    mpec_cost_parameters["x"] = result
    mpec_cost_parameters["fun"] = opt.last_optimum_value()
    if opt.last_optimize_result() > 0:
        mpec_cost_parameters["status"] = True
    else:
        mpec_cost_parameters["status"] = False
    mpec_cost_parameters["n_iterations"] = opt.get_numevals()
    mpec_cost_parameters["n_evaluations"] = n_evaluations[0]
    mpec_cost_parameters["reason"] = opt.last_optimize_result()
    mpec_cost_parameters["time"] = timing

    return transition_results, mpec_cost_parameters
Exemplo n.º 13
0
def get_demand(init_dict, demand_dict, demand_params):
    """
    Calculates the implied demand for a range of replacement costs
    for a certain number of buses over a certain time period.

    Parameters
    ----------
    init_dict : dict
        see :ref:`init_dict`.
    demand_dict : dict
        see :ref:`demand_dict`.
    demand_params : np.array
        see :ref:`demand_params`

    Returns
    -------
    demand_results : pd.DataFrame
        see :ref:`demand_results`

    """
    params = demand_params.copy()
    (
        disc_fac,
        num_states,
        maint_func,
        maint_func_dev,
        num_params,
        scale,
    ) = select_model_parameters(init_dict)

    # Initialize the loop over the replacement costs
    rc_range = np.linspace(
        demand_dict["RC_lower_bound"],
        demand_dict["RC_upper_bound"],
        demand_dict["demand_evaluations"],
    )
    demand_results = pd.DataFrame(index=rc_range, columns=["demand", "success"])
    demand_results.index.name = "RC"

    for rc in rc_range:
        params[-num_params] = rc
        demand_results.loc[(rc), "success"] = "No"

        # solve the model for the given paramaters
        trans_mat = create_transition_matrix(num_states, params[:-num_params])

        obs_costs = calc_obs_costs(num_states, maint_func, params[-num_params:], scale)
        ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0]
        p_choice = choice_prob_gumbel(ev, obs_costs, disc_fac)

        # calculate initial guess for pi and run contraction iterations
        pi_new = np.full((num_states, 2), 1 / (2 * num_states))
        tol = 1
        iteration = 1
        while tol >= demand_dict["tolerance"]:
            pi = pi_new
            pi_new = p_choice * (
                np.dot(trans_mat.T, pi[:, 0])
                + np.dot(np.tile(trans_mat[0, :], (num_states, 1)).T, pi[:, 1])
            ).reshape((num_states, 1))
            tol = np.max(np.abs(pi_new - pi))
            iteration = +1
            if iteration > 200:
                break
            if tol < demand_dict["tolerance"]:
                demand_results.loc[(rc), "success"] = "Yes"

        demand_results.loc[(rc), "demand"] = (
            demand_dict["num_buses"] * demand_dict["num_periods"] * pi_new[:, 1].sum()
        )

    return demand_results
Exemplo n.º 14
0
def derivative_loglike_cost_params_individual(
    params,
    maint_func,
    maint_func_dev,
    num_states,
    trans_mat,
    state_mat,
    decision_mat,
    disc_fac,
    scale,
    alg_details,
):
    """
    This is the Jacobian of the individual log likelihood function of the cost
    parameter estimation with respect to all cost parameters needed for the BHHH.


    Parameters
    ----------
    params : pandas.DataFrame
        see :ref:`params`
    maint_func: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    disc_fac : numpy.float
        see :ref:`disc_fac`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    decision_mat : numpy.array
        see :ref:`decision_mat`

    Returns
    -------
    dev : numpy.array
        A num_buses + num_periods x dim(params) matrix in form of numpy array
        containing the derivative of the individual log-likelihood function for
        every cost parameter.


    """
    params = params["value"].to_numpy()
    dev = np.zeros((decision_mat.shape[1], len(params)))
    obs_costs = calc_obs_costs(num_states, maint_func, params, scale)

    ev = get_ev(params, trans_mat, obs_costs, disc_fac, alg_details)[0]

    p_choice = choice_prob_gumbel(ev, obs_costs, disc_fac)
    maint_cost_dev = maint_func_dev(num_states, scale)

    lh_values_rc = like_hood_vaules_rc(ev, obs_costs, p_choice, trans_mat,
                                       disc_fac)
    like_dev_rc = like_hood_data_individual(lh_values_rc, decision_mat,
                                            state_mat)
    dev[:, 0] = like_dev_rc

    for i in range(len(params) - 1):
        if len(params) == 2:
            cost_dev_param = maint_cost_dev
        else:
            cost_dev_param = maint_cost_dev[:, i]

        log_like_values_params = log_like_values_param(ev, obs_costs, p_choice,
                                                       trans_mat,
                                                       cost_dev_param,
                                                       disc_fac)
        dev[:, i + 1] = like_hood_data_individual(log_like_values_params,
                                                  decision_mat, state_mat)

    return dev
Exemplo n.º 15
0
def loglike_cost_params_individual(
    params,
    maint_func,
    maint_func_dev,
    num_states,
    trans_mat,
    state_mat,
    decision_mat,
    disc_fac,
    scale,
    alg_details,
):
    """
    This is the individual logliklihood function for the estimation of the cost parameters
    needed for the BHHH optimizer.


    Parameters
    ----------
    params : pandas.DataFrame
        see :ref:`params`
    maint_func: func
        see :ref:`maint_func`
    num_states : int
        The size of the state space.
    disc_fac : numpy.float
        see :ref:`disc_fac`
    trans_mat : numpy.array
        see :ref:`trans_mat`
    state_mat : numpy.array
        see :ref:`state_mat`
    decision_mat : numpy.array
        see :ref:`decision_mat`

    Returns
    -------
    log_like : numpy.array
        A num_buses times num_periods dimensional array containing the negative
        log-likelihood contributions of the individuals.


    """

    if "omega" in params.index:
        omega = params.loc["omega", "value"]
        cost_params = params.drop("omega")["value"].to_numpy()
        p_ml = trans_mat[0, 0:3]
        sample_size = 4292 / 78
        rho = chi2.ppf(omega, len(p_ml) - 1) / (2 * (sample_size))

        costs = calc_obs_costs(num_states, maint_func, cost_params, scale)
        v_start, _, _, _ = value_function_contraction(trans_mat,
                                                      costs,
                                                      disc_fac,
                                                      threshold=1e-12,
                                                      max_it=1000000)
        v_worst, worst_trans_mat, success, converge_crit_ev, num_eval = worst_value_fixp(
            v_start,
            trans_mat,
            costs,
            disc_fac,
            rho,
            threshold=1e-3,
            max_it=1000000)
        ev = np.dot(worst_trans_mat, v_worst)
        if not success:
            raise ValueError("Not converging.")
    else:
        cost_params = params["value"].to_numpy()
        costs = calc_obs_costs(num_states, maint_func, cost_params, scale)
        ev, contr_step_count, newt_kant_step_count = get_ev(
            cost_params, trans_mat, costs, disc_fac, alg_details)
        config.total_contr_count += contr_step_count
        config.total_newt_kant_count += newt_kant_step_count

    p_choice = choice_prob_gumbel(ev, costs, disc_fac)
    log_like = like_hood_data_individual(np.log(p_choice), decision_mat,
                                         state_mat)
    return log_like