예제 #1
0
    HT_extensive.solve(outputFlag=0, TimeLimit=time_limit)
    print('lower bound', HT_extensive.objBound)
    print('upper bound', HT_extensive.objVal)
    print('total time', HT_extensive.total_time)
    print('gap', HT_extensive.MIPGap)
if solver == 'SDDiP':
    HydroThermal.binarize(bin_stage=T)
    HT_sddp = SDDiP(HydroThermal)
    HT_sddp.solve(
        logFile=0,
        logToConsole=0,
        cuts=[cut],
        n_processes=1,
        n_steps=1,
        max_iterations=n_iterations,
    )
    result = Evaluation(HydroThermal)
    if T in [2, 3]:
        result.run(random_state=666, n_simulations=-1)
    else:
        result.run(random_state=666, n_simulations=3000)
    resultTrue = EvaluationTrue(HydroThermal)
    resultTrue.run(random_state=666, n_simulations=3000)
    print('lower bound', result.db)
    if T in [2, 3]:
        print('upper bound', result.epv)
    else:
        print('CI appr.', result.CI)
    print('gap', result.gap)
    print('CI true', resultTrue.CI)
예제 #2
0
AssetMgt.add_Markovian_uncertainty(g)
for t in range(T):
    m = AssetMgt[t]
    now, past = m.addStateVars(2, lb=0, obj=0)
    if t == 0:
        m.addConstr(now[0] + now[1] == 55)
    if t in [1, 2]:
        m.addConstr(past[0] + past[1] == now[0] + now[1],
                    uncertainty_dependent={
                        past[0]: 0,
                        past[1]: 1
                    })
    if t == 3:
        y = m.addVar(obj=1)
        w = m.addVar(obj=-4)
        m.addConstr(past[0] + past[1] - y + w == 80,
                    uncertainty_dependent={
                        past[0]: 0,
                        past[1]: 1
                    })
AssetMgt.discretize(
    n_Markov_states=25,
    n_sample_paths=10000,
)
# Extensive(AssetMgt).solve()
SDDP(AssetMgt).solve(max_iterations=400)
result = Evaluation(AssetMgt)
result.run(n_simulations=1000)
resultTrue = EvaluationTrue(AssetMgt)
resultTrue.run(n_simulations=1000)
예제 #3
0
    def solve(self,
              n_processes=1,
              n_steps=1,
              max_iterations=10000,
              max_stable_iterations=10000,
              max_time=1000000.0,
              tol=0.001,
              freq_evaluations=None,
              percentile=95,
              tol_diff=float("-inf"),
              random_state=None,
              freq_evaluations_true=None,
              freq_comparisons=None,
              n_simulations=3000,
              n_simulations_true=3000,
              freq_clean=None,
              logFile=1,
              logToConsole=1):
        """Solve approximation model.

        Parameters
        ----------

        n_processes: int, optional (default=1)
            The number of processes to run in parallel. Run serial SDDP if 1.
            If n_steps is 1, n_processes is coerced to be 1.

        n_steps: int, optional (default=1)
            The number of forward/backward steps to run in each cut iteration.
            It is coerced to be 1 if n_processes is 1.

        max_iterations: int, optional (default=10000)
            The maximum number of iterations to run SDDP.

        max_stable_iterations: int, optional (default=10000)
            The maximum number of iterations to have same deterministic bound

        tol: float, optional (default=1e-3)
            tolerance for convergence of bounds

        freq_evaluations: int, optional (default=None)
            The frequency of evaluating gap on approximation model. It will be
            ignored if risk averse

        percentile: float, optional (default=95)
            The percentile used to compute confidence interval

        diff: float, optional (default=-inf)
            The stablization threshold

        freq_comparisons: int, optional (default=None)
            The frequency of comparisons of policies

        n_simulations: int, optional (default=10000)
            The number of simluations to run when evaluating a policy
            on approximation model

        freq_clean: int/list, optional (default=None)
            The frequency of removing redundant cuts.
            If int, perform cleaning at the same frequency for all stages.
            If list, perform cleaning at different frequency for each stage;
            must be of length T-1 (the last stage does not have any cuts).

        random_state: int, RandomState instance or None, optional (default=None)
            Used in evaluations and comparisons. (In the forward step, there is
            an internal random_state which is not supposed to be changed.)
            If int, random_state is the seed used by the random number
            generator;
            If RandomState instance, random_state is the random number
            generator;
            If None, the random number generator is the RandomState
            instance used by numpy.random.

        logFile: binary, optional (default=1)
            Switch of logging to log file

        logToConsole: binary, optional (default=1)
            Switch of logging to console
        """
        MSP = self.MSP
        if freq_clean is not None:
            if isinstance(freq_clean, (numbers.Integral, numpy.integer)):
                freq_clean = [freq_clean] * (MSP.T - 1)
            if isinstance(freq_clean, ((abc.Sequence, numpy.ndarray))):
                if len(freq_clean) != MSP.T - 1:
                    raise ValueError("freq_clean list must be of length T-1!")
            else:
                raise TypeError(
                    "freq_clean must be int/list instead of {}!".format(
                        type(freq_clean)))
        if not MSP._flag_update:
            MSP._update()
        stable_iterations = 0
        total_time = 0
        a = time.time()
        gap = 1.0
        right_end_of_CI = float("inf")
        db_past = MSP.bound
        self.percentile = percentile
        if MSP.measure != "risk neutral":
            freq_evaluations = None
        # distinguish pv_sim from pv
        pv_sim_past = None

        if n_processes != 1:
            self.n_steps = n_steps
            self.n_processes = min(n_steps, n_processes)
            self.jobs = allocate_jobs(self.n_steps, self.n_processes)

        logger_sddp = LoggerSDDP(
            logFile=logFile,
            logToConsole=logToConsole,
            n_processes=self.n_processes,
            percentile=self.percentile,
        )
        logger_sddp.header()
        if freq_evaluations is not None or freq_comparisons is not None:
            logger_evaluation = LoggerEvaluation(
                n_simulations=n_simulations,
                percentile=percentile,
                logFile=logFile,
                logToConsole=logToConsole,
            )
            logger_evaluation.header()
        if freq_comparisons is not None:
            logger_comparison = LoggerComparison(
                n_simulations=n_simulations,
                percentile=percentile,
                logFile=logFile,
                logToConsole=logToConsole,
            )
            logger_comparison.header()
        try:
            while (self.iteration < max_iterations and total_time < max_time
                   and stable_iterations < max_stable_iterations and tol < gap
                   and tol_diff < right_end_of_CI):
                start = time.time()

                self._compute_cut_type()

                if self.n_processes == 1:
                    pv = self._SDDP_single()
                else:
                    pv = self._SDDP_multiprocessesing()

                m = (MSP.models[0]
                     if MSP.n_Markov_states == 1 else MSP.models[0][0])
                m.optimize()
                if m.status not in [2, 11]:
                    m.write_infeasible_model("backward_" +
                                             str(m._model.modelName) + ".lp")
                db = m.objBound
                self.db.append(db)
                MSP.db = db
                if self.n_processes != 1:
                    CI = compute_CI(pv, percentile)
                self.pv.append(pv)

                if self.iteration >= 1:
                    if db_past == db:
                        stable_iterations += 1
                    else:
                        stable_iterations = 0
                self.iteration += 1
                db_past = db

                end = time.time()
                elapsed_time = end - start
                total_time += elapsed_time

                if self.n_processes == 1:
                    logger_sddp.text(
                        iteration=self.iteration,
                        db=db,
                        pv=pv[0],
                        time=elapsed_time,
                    )
                else:
                    logger_sddp.text(
                        iteration=self.iteration,
                        db=db,
                        CI=CI,
                        time=elapsed_time,
                    )
                if (freq_evaluations is not None
                        and self.iteration % freq_evaluations == 0
                        or freq_comparisons is not None
                        and self.iteration % freq_comparisons == 0):
                    start = time.time()
                    evaluation = Evaluation(MSP)
                    evaluation.run(
                        n_simulations=n_simulations,
                        random_state=random_state,
                        query_stage_cost=False,
                        percentile=percentile,
                    )
                    pandas.DataFrame({
                        'pv': evaluation.pv
                    }).to_csv("evaluation.csv")
                    elapsed_time = time.time() - start
                    gap = evaluation.gap
                    if n_simulations == -1:
                        logger_evaluation.text(
                            iteration=self.iteration,
                            db=db,
                            pv=evaluation.epv,
                            gap=gap,
                            time=elapsed_time,
                        )
                    elif n_simulations == 1:
                        logger_evaluation.text(
                            iteration=self.iteration,
                            db=db,
                            pv=evaluation.pv,
                            gap=gap,
                            time=elapsed_time,
                        )
                    else:
                        logger_evaluation.text(
                            iteration=self.iteration,
                            db=db,
                            CI=evaluation.CI,
                            gap=gap,
                            time=elapsed_time,
                        )
                if (freq_comparisons is not None
                        and self.iteration % freq_comparisons == 0):
                    start = time.time()
                    pv_sim = evaluation.pv
                    if self.iteration / freq_comparisons >= 2:
                        diff = MSP.sense * (numpy.array(pv_sim_past) -
                                            numpy.array(pv_sim))
                        if n_simulations == -1:
                            diff_mean = numpy.mean(diff)
                            right_end_of_CI = diff_mean
                        else:
                            diff_CI = compute_CI(diff, self.percentile)
                            right_end_of_CI = diff_CI[1]
                        elapsed_time = time.time() - start
                        if n_simulations == -1:
                            logger_comparison.text(
                                iteration=self.iteration,
                                ref_iteration=self.iteration -
                                freq_comparisons,
                                diff=diff_mean,
                                time=elapsed_time,
                            )
                        else:
                            logger_comparison.text(
                                iteration=self.iteration,
                                ref_iteration=self.iteration -
                                freq_comparisons,
                                diff_CI=diff_CI,
                                time=elapsed_time,
                            )
                    pv_sim_past = pv_sim
                if freq_clean is not None:
                    clean_stages = [
                        t for t in range(1, MSP.T - 1)
                        if self.iteration % freq_clean[t] == 0
                    ]
                    if len(clean_stages) != 0:
                        self._remove_redundant_cut(clean_stages)
                # self._clean()
        except KeyboardInterrupt:
            stop_reason = "interruption by the user"
        # SDDP iteration stops
        MSP.db = self.db[-1]
        if self.iteration >= max_iterations:
            stop_reason = "iteration:{} has reached".format(max_iterations)
        if total_time >= max_time:
            stop_reason = "time:{} has reached".format(max_time)
        if stable_iterations >= max_stable_iterations:
            stop_reason = "stable iteration:{} has reached".format(
                max_stable_iterations)
        if gap <= tol:
            stop_reason = "convergence tolerance:{} has reached".format(tol)
        if right_end_of_CI <= tol_diff:
            stop_reason = "stablization threshold:{} has reached".format(
                tol_diff)

        b = time.time()
        logger_sddp.footer(reason=stop_reason)
        if freq_evaluations is not None or freq_comparisons is not None:
            logger_evaluation.footer()
        if freq_comparisons is not None:
            logger_comparison.footer()
        self.total_time = total_time
예제 #4
0
        TS = m.addConstrs(inflow_now[i] + inflow_past[i] == 0
                          for i in range(4))
        m.add_continuous_uncertainty(
            uncertainty=sampler(t - 1),
            locations=([(TS[i], inflow_past[i])
                        for i in range(4)] + [TS[i] for i in range(4)]),
        )
HydroThermal.discretize(n_samples=n_samples, random_state=seed)
HT_sddp = SDDP(HydroThermal)
HT_sddp.solve(
    n_processes=6,
    n_steps=6,
    max_iterations=n_iterations,
)
result = Evaluation(HydroThermal)
result.run(random_state=666, n_simulations=n_simulations)
resultTrue = EvaluationTrue(HydroThermal)
resultTrue.run(random_state=666, n_simulations=n_simulations)
model = 'TS' + str(n_samples)
if model == 'TS100':
    fig = HT_sddp.plot_bounds(window=1, smooth=1)
    fig.tight_layout()
    fig.savefig("./result/{}_bounds.png".format(model), dpi=1200)
pandas.DataFrame({'pv': resultTrue.pv}).to_csv("./result/{}.csv".format(model))
print('model', model)
print('iter_SDDP', n_iterations)
print('time_SDDP', HT_sddp.total_time)
print('time_total', HT_sddp.total_time)
print('gap', result.gap)
print('bound', result.db)
print('CI for approx. model', result.CI)
예제 #5
0
         for j in range(N):
             m.addConstr(past[j] == capm[j], uncertainty_dependent={past[j]:j})
             m.addConstr(past[j] == idio[j], uncertainty={past[j]:f(alpha[j],sigma[j])})
         m.addConstr(now[N] == (1+rf) * past[N])
 AssetMgt.discretize(
     n_samples=100,
     method='input',
     Markov_states=Markov_states,
     transition_matrix=transition_matrix,
     random_state=888,
 )
 AssetMgt.set_AVaR(lambda_=lambda_, alpha_=0.25)
 AssetMgt_SDDP = SDDP(AssetMgt)
 AssetMgt_SDDP.solve(max_iterations=50, n_steps=3, n_processes=3)
 evaluation = Evaluation(AssetMgt)
 evaluation.run(n_simulations=1000, random_state=666)
 evaluationTrue = EvaluationTrue(AssetMgt)
 evaluationTrue.run(n_simulations=1000, random_state=666)
 result = {
     'mean':numpy.mean(evaluation.pv)-100,
     'std':numpy.std(evaluation.pv),
     'VAR': numpy.quantile(evaluation.pv,0.05)-100,
     'skewness': stats.skew(evaluation.pv),
     'kurtosis': 3+stats.kurtosis(evaluation.pv),
     'Sharpe Ratio':
         (numpy.mean(evaluation.pv)-100-0.005513771)/numpy.std(evaluation.pv)
 }
 evaluation_tab.append(pandas.Series(result))
 resultTrue = {
     'mean':numpy.mean(evaluationTrue.pv)-100,
     'std':numpy.std(evaluationTrue.pv),