def inputs(): out = {} disc_fac = 0.9999 num_states = 300 num_buses = 200 num_periods = 1000 scale = 0.001 init_dict = { "groups": "group_4", "binsize": 5000, "model_specifications": { "discount_factor": disc_fac, "number_states": num_states, "maint_cost_func": "linear", "cost_scale": scale, }, "optimizer": {"approach": "NFXP", "algorithm": "scipy_L-BFGS-B"}, "simulation": { "discount_factor": disc_fac, "seed": 123, "buses": num_buses, "periods": num_periods, }, } out["trans_base"] = np.loadtxt(TEST_FOLDER + "repl_test_trans.txt") out["params_base"] = np.loadtxt(TEST_FOLDER + "repl_params_linear.txt") trans_mat = create_transition_matrix(num_states, out["trans_base"]) costs = calc_obs_costs(num_states, lin_cost, out["params_base"], scale) ev_known = calc_fixp(trans_mat, costs, disc_fac)[0] df = simulate(init_dict["simulation"], ev_known, costs, trans_mat) result_trans, result_fixp = estimate(init_dict, df) out["trans_est"] = result_trans["x"] out["params_est"] = result_fixp["x"] out["status"] = result_fixp["status"] return out
def get_ev(params, trans_mat, obs_costs, disc_fac, alg_details): """ A auxiliary function, which allows the log-likelihood function as well as its derivative to share the same fixed point and to avoid the need to execute the computation double. Parameters ---------- params : numpy.array see :ref:`params` trans_mat : numpy.array see :ref:`trans_mat` obs_costs : numpy.array see :ref:`costs` disc_fac : numpy.float see :ref:`disc_fac` Returns ------- ev : numpy.array see :ref:`ev` """ global ev_intermed global current_params if (ev_intermed is not None) & np.array_equal(current_params, params): ev = ev_intermed ev_intermed = None else: ev = calc_fixp(trans_mat, obs_costs, disc_fac, **alg_details) ev_intermed = ev current_params = params return ev
def test_regression_simulation(inputs): init_dict = random_init(inputs) # Draw parameter param_1 = np.random.normal(17.5, 2) param_2 = np.random.normal(21, 2) param_3 = np.random.normal(-2, 0.1) param_4 = np.random.normal(0.2, 0.1) params = np.array([param_1, param_2, param_3, param_4]) disc_fac = init_dict["simulation"]["discount_factor"] probs = np.array(init_dict["simulation"]["known_trans"]) num_states = 800 trans_mat = create_transition_matrix(num_states, probs) costs = calc_obs_costs(num_states, cubic_costs, params, 0.00001) ev = calc_fixp(trans_mat, costs, disc_fac)[0] df = simulate(init_dict["simulation"], ev, costs, trans_mat) v_disc = discount_utility(df, disc_fac) assert_allclose(v_disc / ev[0], 1, rtol=1e-02)
def simulate_data( seed, disc_fac, num_buses, num_periods, num_states, cost_params, trans_params, cost_func, scale, ): """ simulates a single data set with a given specification using the ``simulate`` function of ruspy. Parameters ---------- seed : int seed for the simulation function. disc_fac : float the discount factor in the Rust Model. num_buses : int the amount of buses that should be simulated. num_periods : int The number of periods that should be simulated for each bus. num_states : int the number of states for the which the mileage state is discretized. cost_params : np.array the cost parameters for which the data is simulated. trans_params : np.array the cost parameters for which the data is simulated.. cost_func : callable the cost function that underlies the data generating process. scale : float the scale of the cost function. Returns ------- df : pd.DataFrame Simulated data set for the given data generating process. """ init_dict = { "simulation": { "discount_factor": disc_fac, "periods": num_periods, "seed": seed, "buses": num_buses, }, } costs = calc_obs_costs(num_states, cost_func, cost_params, scale) trans_mat = create_transition_matrix(num_states, trans_params) ev = calc_fixp(trans_mat, costs, disc_fac)[0] df = simulate(init_dict["simulation"], ev, costs, trans_mat) return df
def inputs_sim(inputs): out = {} out["init_dict"] = init_dict = random_init(inputs)["simulation"] # Draw parameter param1 = np.random.normal(10.0, 2) param2 = np.random.normal(2.3, 0.5) params = np.array([param1, param2]) disc_fac = init_dict["discount_factor"] probs = np.array(init_dict["known_trans"]) num_states = 300 out["trans_mat"] = trans_mat = create_transition_matrix(num_states, probs) out["costs"] = costs = calc_obs_costs(num_states, lin_cost, params, 0.001) out["ev"] = ev = calc_fixp(trans_mat, costs, disc_fac)[0] out["df"] = simulate( init_dict, ev, costs, trans_mat, ) return out
def test_fixp(inputs, outputs): assert_array_almost_equal( calc_fixp(outputs["trans_mat"], outputs["costs"], inputs["disc_fac"])[0], outputs["fixp"], )
def estimate_mpec_ipopt( disc_fac, num_states, maint_func, maint_func_dev, num_params, scale, decision_mat, trans_mat, state_mat, optimizer_options, transition_results, ): """ Estimation function of Mathematical Programming with Equilibrium Constraints (MPEC) in ruspy. Parameters ---------- disc_fac : numpy.float see :ref:`disc_fac` num_states : int The size of the state space. maint_func: func see :ref: `maint_func` maint_func_dev: func see :ref: `maint_func_dev` num_params : int The number of parameters to be estimated. scale : numpy.float see :ref:`scale` decision_mat : numpy.array see :ref:`decision_mat` trans_mat : numpy.array see :ref:`trans_mat` state_mat : numpy.array see :ref:`state_mat` optimizer_options : dict The options chosen for the optimization algorithm in the initialization dictionairy. transition_results : dict The results from ``estimate_transitions``. Returns ------- transition_results : dictionary see :ref:`result_trans` mpec_cost_parameters : dictionary see :ref:`result_costs` """ if not optional_package_is_available: raise NotImplementedError( """To use this you need to install cyipopt. If you are mac or Linux user the command is $ conda install -c conda-forge cyipopt. If you use Windows you have to install from source. A description can be found here: https://github.com/matthias-k/cyipopt""") del optimizer_options["algorithm"] gradient = optimizer_options.pop("derivative") params = optimizer_options.pop("params") lower_bounds = optimizer_options.pop("set_lower_bounds") upper_bounds = optimizer_options.pop("set_upper_bounds") bounds = np.vstack((lower_bounds, upper_bounds)).T bounds = list(map(tuple, bounds)) if "get_expected_values" in optimizer_options: get_expected_values = optimizer_options.pop("get_expected_values") else: get_expected_values = "No" n_evaluations, neg_criterion = wrap_ipopt_likelihood( mpec_loglike_cost_params, args=( maint_func, maint_func_dev, num_states, num_params, state_mat, decision_mat, disc_fac, scale, ), ) constraint_func = wrap_ipopt_constraint( mpec_constraint, args=( maint_func, maint_func_dev, num_states, num_params, trans_mat, disc_fac, scale, ), ) if gradient == "No": def approx_gradient(params): fun = approx_derivative(neg_criterion, params, method="2-point") return fun gradient_func = approx_gradient def approx_jacobian(params): fun = approx_derivative(constraint_func, params, method="2-point") return fun jacobian_func = approx_jacobian else: gradient_func = partial( mpec_loglike_cost_params_derivative, maint_func, maint_func_dev, num_states, num_params, disc_fac, scale, decision_mat, state_mat, ) jacobian_func = partial( mpec_constraint_derivative, maint_func, maint_func_dev, num_states, num_params, disc_fac, scale, trans_mat, ) constraints = { "type": "eq", "fun": constraint_func, "jac": jacobian_func, } tic = time.perf_counter() if get_expected_values == "Yes": obs_costs = calc_obs_costs(num_states, maint_func, params, scale) ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0] params = np.concatenate((ev, params)) results_ipopt = minimize_ipopt( neg_criterion, params, bounds=bounds, jac=gradient_func, constraints=constraints, **optimizer_options, ) toc = time.perf_counter() timing = toc - tic mpec_cost_parameters = {} mpec_cost_parameters["x"] = results_ipopt["x"] mpec_cost_parameters["fun"] = results_ipopt["fun"] if results_ipopt["success"] is True: mpec_cost_parameters["status"] = True else: mpec_cost_parameters["status"] = False mpec_cost_parameters["n_iterations"] = results_ipopt["nit"] mpec_cost_parameters["n_evaluations"] = results_ipopt["nfev"] mpec_cost_parameters["time"] = timing mpec_cost_parameters["n_evaluations_total"] = n_evaluations[0] return transition_results, mpec_cost_parameters
def estimate_mpec_nlopt( disc_fac, num_states, maint_func, maint_func_dev, num_params, scale, decision_mat, trans_mat, state_mat, optimizer_options, transition_results, ): """ Estimation function of Mathematical Programming with Equilibrium Constraints (MPEC) in ruspy. Parameters ---------- disc_fac : numpy.float see :ref:`disc_fac` num_states : int The size of the state space. maint_func: func see :ref: `maint_func` maint_func_dev: func see :ref: `maint_func_dev` num_params : int The number of parameters to be estimated. scale : numpy.float see :ref:`scale` decision_mat : numpy.array see :ref:`decision_mat` trans_mat : numpy.array see :ref:`trans_mat` state_mat : numpy.array see :ref:`state_mat` optimizer_options : dict The options chosen for the optimization algorithm in the initialization dictionairy. transition_results : dict The results from ``estimate_transitions``. Returns ------- transition_results : dictionary see :ref:`result_trans` mpec_cost_parameters : dictionary see :ref:`result_costs` """ gradient = optimizer_options.pop("derivative") # Calculate partial functions needed for nlopt n_evaluations, partial_loglike_mpec = wrap_mpec_loglike(args=( maint_func, maint_func_dev, num_states, num_params, state_mat, decision_mat, disc_fac, scale, gradient, )) partial_constr_mpec = partial( mpec_constraint, maint_func, maint_func_dev, num_states, num_params, trans_mat, disc_fac, scale, gradient, ) # set up nlopt opt = nlopt.opt(eval("nlopt." + optimizer_options.pop("algorithm")), num_states + num_params) opt.set_min_objective(partial_loglike_mpec) opt.add_equality_mconstraint( partial_constr_mpec, np.full(num_states, 1e-6), ) # supply user choices params = optimizer_options.pop("params") if "get_expected_values" in optimizer_options: get_expected_values = optimizer_options.pop("get_expected_values") else: get_expected_values = "No" if "set_local_optimizer" in optimizer_options: sub = nlopt.opt( # noqa: F841 eval("nlopt." + optimizer_options.pop("set_local_optimizer")), num_states + num_params, ) exec("opt.set_local_optimizer(sub)") for key, _value in optimizer_options.items(): exec("sub." + key + "(_value)") for key, _value in optimizer_options.items(): exec("opt." + key + "(_value)") # Solving nlopt tic = time.perf_counter() if get_expected_values == "Yes": obs_costs = calc_obs_costs(num_states, maint_func, params, scale) ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0] params = np.concatenate((ev, params)) result = opt.optimize(params) toc = time.perf_counter() timing = toc - tic mpec_cost_parameters = {} mpec_cost_parameters["x"] = result mpec_cost_parameters["fun"] = opt.last_optimum_value() if opt.last_optimize_result() > 0: mpec_cost_parameters["status"] = True else: mpec_cost_parameters["status"] = False mpec_cost_parameters["n_iterations"] = opt.get_numevals() mpec_cost_parameters["n_evaluations"] = n_evaluations[0] mpec_cost_parameters["reason"] = opt.last_optimize_result() mpec_cost_parameters["time"] = timing return transition_results, mpec_cost_parameters
def get_demand(init_dict, demand_dict, demand_params): """ Calculates the implied demand for a range of replacement costs for a certain number of buses over a certain time period. Parameters ---------- init_dict : dict see :ref:`init_dict`. demand_dict : dict see :ref:`demand_dict`. demand_params : np.array see :ref:`demand_params` Returns ------- demand_results : pd.DataFrame see :ref:`demand_results` """ params = demand_params.copy() ( disc_fac, num_states, maint_func, maint_func_dev, num_params, scale, ) = select_model_parameters(init_dict) # Initialize the loop over the replacement costs rc_range = np.linspace( demand_dict["RC_lower_bound"], demand_dict["RC_upper_bound"], demand_dict["demand_evaluations"], ) demand_results = pd.DataFrame(index=rc_range, columns=["demand", "success"]) demand_results.index.name = "RC" for rc in rc_range: params[-num_params] = rc demand_results.loc[(rc), "success"] = "No" # solve the model for the given paramaters trans_mat = create_transition_matrix(num_states, params[:-num_params]) obs_costs = calc_obs_costs(num_states, maint_func, params[-num_params:], scale) ev = calc_fixp(trans_mat, obs_costs, disc_fac)[0] p_choice = choice_prob_gumbel(ev, obs_costs, disc_fac) # calculate initial guess for pi and run contraction iterations pi_new = np.full((num_states, 2), 1 / (2 * num_states)) tol = 1 iteration = 1 while tol >= demand_dict["tolerance"]: pi = pi_new pi_new = p_choice * ( np.dot(trans_mat.T, pi[:, 0]) + np.dot(np.tile(trans_mat[0, :], (num_states, 1)).T, pi[:, 1]) ).reshape((num_states, 1)) tol = np.max(np.abs(pi_new - pi)) iteration = +1 if iteration > 200: break if tol < demand_dict["tolerance"]: demand_results.loc[(rc), "success"] = "Yes" demand_results.loc[(rc), "demand"] = ( demand_dict["num_buses"] * demand_dict["num_periods"] * pi_new[:, 1].sum() ) return demand_results