示例#1
0
 def get_capacities(option, nominal_capacity, timeline):
     starting_date = int(option["StartingDate"]) if Utils.is_numeric(
         option["StartingDate"]) else -float("Inf")
     closing_date = int(option["ClosingDate"]) if Utils.is_numeric(
         option["ClosingDate"]) else float("Inf")
     arr = nominal_capacity * (1 - np.heaviside([t - closing_date for t in timeline], 1)) * \
           np.heaviside([t - starting_date for t in timeline], 1)
     return pd.Series(arr, index=timeline)
示例#2
0
 def set_opex(self, opex_df, raw_materials_df):
     """
     Setting opex from data dictionary
     :param opex_df: Dataframe
     :param raw_materials_df: Dataframe
     :return: None
     """
     self.main_input = "ALL"  #TODO: review this
     self.inputs = opex_df[
         opex_df.Product != "Total"].Product.unique().tolist()
     self.specific_consumptions = Utils.multidict(["MAP"], ["All"],
                                                  self.inputs, {})
     for item in self.inputs:
         #TODO: add warning if item already in
         self.specific_consumptions["MAP"]["All"][item] = opex_df[
             opex_df.Product == item]["sc/opex"]
     if "Total" in opex_df.Product:
         self.opex = opex_df[opex_df.Product == "Total"].rename(
             columns={"sc/opex": "opex"})
         self.opex = self.opex.opex
     else:
         self.opex = pd.merge(opex_df,
                              raw_materials_df,
                              left_on=["Product", "Tenor"],
                              right_on=["Item", "Tenor"],
                              how="left")
         self.opex["opex"] = self.opex["sc/opex"] * self.opex["price"]
         self.opex.reset_index().set_index("Tenor")
         self.opex = self.opex.groupby("Tenor").sum()["opex"]
示例#3
0
    def calculate_opex_per_ton(option,
                               spec_cons_df,
                               rm_prices,
                               outputs=None,
                               main_inputs=None):
        opex = spec_cons_df[spec_cons_df.Moniker == option.Moniker]
        opex_per_input_per_output = Utils.multidict(outputs, main_inputs, {})
        for input in main_inputs:
            for output in outputs:
                opex_input_output = opex[(opex.InputQuality == input)
                                         & (opex.OutputQuality == output)]
                if 'Total' in list(set(opex_input_output.Item)):
                    opex_per_input_per_output[output][
                        input] = opex_input_output[opex_input_output.Item ==
                                                   'Total']['sc/opex']
                else:
                    conso_with_prices = pd.merge(opex_input_output,
                                                 rm_prices,
                                                 on=['Item', 'Tenor'],
                                                 how='left')
                    conso_with_prices['opex'] = conso_with_prices[
                        'sc/opex'] * conso_with_prices['price']
                    opex_per_input_per_output[output][
                        input] = conso_with_prices.groupby(
                            'Tenor').sum()['opex']

        return opex_per_input_per_output
示例#4
0
def get_raw_material_sensitivity(scenario_id):
    shocks = {}
    for raw_material in raw_materials_df:
        item = raw_material["Item"]
        shocks[item] = 1
    risk_engine = RiskEngine()
    scenario = scenarios_df[scenarios_df["Scenario"] == scenario_id]
    scenarios_dic = Utils.get_scenario_from_df(scenario)
    deltas = risk_engine.compute_delta(scenarios_dic[scenario_id], shocks)
    return deltas
示例#5
0
 def get_yields(option, spec_prod, outputs, inputs):
     yields = spec_prod[spec_prod.Moniker == option.Moniker]
     d = Utils.multidict(outputs, inputs, {})
     for product in outputs:
         for input in inputs:
             c = yields[(yields.InputQuality == input)
                        & (yields.OutputQuality == product)]
             s = c['yield']
             d[product][input] = s
     return d
示例#6
0
    def __init__(self, name, info):
        """
        Port entity class
        :param name: entity name
        :param info: Dataframe
        """
        super().__init__(name=name,
                         layer=env.PipelineLayer.GRANULATION,
                         id=info.Moniker)
        capacity = info.filter(regex="Capacity.*")
        unit = Utils.extract_unit(capacity.keys()[0])
        starting_date = int(info["StartingDate"]) if Utils.is_numeric(
            info["StartingDate"]) else -float("Inf")
        closing_date = int(info["ClosingDate"]) if Utils.is_numeric(
            info["ClosingDate"]) else float("Inf")
        self.capacity = Capacity(capacity[0],
                                 unit,
                                 start=starting_date,
                                 end=closing_date)
        self.main_input = None  #TODO: should come from canvas dictionary

        self.action = info["Action"]
        self.location = info["Location"]
示例#7
0
    def get_wp_specific_consumptions(mine, beneficiation, raw_rock_consumption):
        """Calculates specific consumptions related to considered mine-p axis instead of perrock quality"""
        result = Utils.multidict(beneficiation.outputs, beneficiation.inputs, {})
        for product in beneficiation.outputs:
            for item in beneficiation.inputs:
                s = 0.
                for quality in beneficiation.specific_consumptions[product].keys():
                    s = s + (beneficiation.yields[product][quality] * beneficiation.specific_consumptions[product][quality][item]).fillna(0)
                    result[product][item] = raw_rock_consumption[product] * s

        #computing equivalent opex per output product with same formula
        opex = dict()
        for product in beneficiation.outputs:
            o = 0
            for quality in beneficiation.main_inputs:
                o = o + (mine.mine_composition[quality] * beneficiation.opex[product][quality]).fillna(0)
            opex[product] = o
        return result, opex
示例#8
0
 def __init__(self, *args, **kwargs):
     super(PricingTestSuite, self).__init__(*args, **kwargs)
     scenarios_df = pd.read_csv(env.APP_FOLDER +
                                "tests/data/one_scenario.csv")
     self.scenarios_dic = Utils.get_scenario_from_df(scenarios_df)
     scenario_id = 1
     self.simulator = Simulator(dm=dm,
                                monikers_filter=sum(
                                    self.scenarios_dic[scenario_id], []))
     scenarios = [
         self.simulator.nodes[layer] for layer in [
             PipelineLayer.PAP, PipelineLayer.SAP,
             PipelineLayer.BENEFICIATION, PipelineLayer.MINE,
             PipelineLayer.MINE_BENEFICIATION
         ] if layer in self.simulator.nodes
     ]
     self.scenario_generator = SGF.create_scenario_generator(
         ScenarioGeneratorType.SPECIFIC_SCENARIOS, self.simulator,
         [scenarios])
示例#9
0
 def get_data(self, randomize=False):
     if not randomize:
         return [{
             "Moniker": self.moniker,
             "Layer": str(self.layer).replace("PipelineLayer.", ""),
             "Name": self.name,
             "Location": self.get_location(),
             "Capacity": self.capacity,
             "Cost PV": self.cost_pv,
             "Opex": self.total_opex,
             "Capex": self.total_capex,
             "Consumption": self.consumption,
             "Production": self.production,
         }]
     # simulated data
     return [{
         "Moniker":
         self.moniker,
         "Layer":
         str(self.layer).replace("PipelineLayer.", ""),
         "Name":
         self.name,
         "Location":
         self.get_location(),
         "Capacity":
         Utils.simulate_series(self.capacity, 0, 1000),
         "Cost PV":
         Utils.simulate_range(100000, 1000000),
         "Opex":
         Utils.simulate_series(self.total_opex, 100, 10000),
         "Capex":
         Utils.simulate_series(self.total_capex, 1000, 100000),
         "Consumption":
         Utils.to_dict_and_simulate(self.consumption, "volume", 2, 100,
                                    1000),
         "Production":
         Utils.to_dict_and_simulate(self.production, "volume", 2, 50, 500),
     }]
示例#10
0
# -*- coding: utf-8 -*-

import pandas as pd
import app.config.env as env
from app.config.env_func import reset_db_name
from app.tools import Utils
from app.data.DataManager import DataManager
from app.model.Simulator import Simulator
from app.model.ScenarioGenerator import ScenarioGeneratorFactory as SGF

if __name__ == "__main__":
    reset_db_name("mine2farm")
    scenarios_df = pd.read_csv(env.APP_FOLDER + "outputs/global.csv")
    scenarios_dic = Utils.get_scenario_from_df(scenarios_df)
    dm = DataManager()
    dm.load_data()
    for scenario_id in scenarios_dic:
        scenario = scenarios_dic[scenario_id]
        simulator = Simulator(dm=dm, monikers_filter=sum(scenario, []))
        scenarios = [
            simulator.nodes[layer] for layer in [
                env.PipelineLayer.PAP, env.PipelineLayer.SAP,
                env.PipelineLayer.BENEFICIATION, env.PipelineLayer.MINE,
                env.PipelineLayer.MINE_BENEFICIATION
            ] if layer in simulator.nodes
        ]
        scenario_generator = SGF.create_scenario_generator(
            env.ScenarioGeneratorType.SPECIFIC_SCENARIOS, simulator,
            [scenarios])
        result, _ = simulator.simulate(scenario_generator=scenario_generator)
        print("Cost PV: %f" % result[1]["Cost PV"])
示例#11
0
def get_best_scenarios(quantile_step, db_name="mine2farm"):
    update_cache(db_name, -1)
    try:
        time_start = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")

        # insert status of best scenarios "running"
        db_history = DBAccess(env.MONITORING_DB_NAME)
        query_insert = {
            'time_start': time_start,
            'db_name': db_name,
            'quantile_step': quantile_step,
            'status': -1
        }
        _id = db_history.save_to_db_no_check(
            env.MONITORING_COLLECTION_HISTORY_BEST_NAME, query_insert)

        # get best representative scenarios
        quantile_step = quantile_step / 100.
        reset_db_name(db_name)
        db = DBAccess(env.DB_RESULT_NAME)
        logger.info("Deleting best collections from DB")
        db.clear_collection(env.DB_GLOBAL_BEST_RESULT_COLLECTION_NAME)
        db.clear_collection(env.DB_DETAILED_BEST_RESULT_COLLECTION_NAME)
        scenarios = db.get_records(env.DB_GLOBAL_RESULT_COLLECTION_NAME,
                                   {}).sort([("Cost PV", DESCENDING)])

        scenarios_count = scenarios.count()
        step = int(quantile_step * scenarios_count)
        # save to db
        if step == 0:
            # all scenarios are concerned
            logger.info("Moving all scenarios to best collections")
            db.copy_to_collection(env.DB_GLOBAL_RESULT_COLLECTION_NAME,
                                  env.DB_GLOBAL_BEST_RESULT_COLLECTION_NAME)
            db.copy_to_collection(env.DB_DETAILED_RESULT_COLLECTION_NAME,
                                  env.DB_DETAILED_BEST_RESULT_COLLECTION_NAME)
            details_count = db.count(
                env.DB_DETAILED_BEST_RESULT_COLLECTION_NAME)
        else:
            # filter on specific scenarios
            representative_scenario_ids = [
                scenarios.skip(step * i)[0]["Scenario"]
                for i in range(0, int(scenarios_count / step))
            ]
            logger.info("List of selected best scenarios: %s" %
                        representative_scenario_ids)
            # simulate
            scenarios_global, scenarios_details = \
                Simulator().simulate(scenarios_filter=representative_scenario_ids, logistics_lp=env.LOGISTICS_LP)
            # save
            for scenario in scenarios_global:
                db.save_to_db_no_check(
                    env.DB_GLOBAL_BEST_RESULT_COLLECTION_NAME,
                    scenarios_global[scenario])
            for scenario in scenarios_details:
                json_data = json.dumps(NodeJSONEncoder().encode(
                    scenarios_details[scenario]))
                data = json.loads(json.loads(json_data))
                db.save_to_db_no_check(
                    env.DB_DETAILED_BEST_RESULT_COLLECTION_NAME, data)
            details_count = len(scenarios_details)

        # status update
        query_insert['global_count'] = scenarios_count
        query_insert['detailed_count'] = details_count
        filter_ = {'_id': ObjectId(_id)}
        db_history.update_record(
            collection=env.MONITORING_COLLECTION_HISTORY_BEST_NAME,
            filter_=filter_,
            data=query_insert)

        # raw materials sensitivities
        logger.info("Running sensitivity over raw materials")
        db.clear_collection(env.DB_SENSITIVITY_COLLECTION_NAME)
        raw_materials_df = Driver().get_data("raw_materials")
        shocks = {}
        for raw_material in raw_materials_df:
            item = raw_material["Item"]
            shocks[item] = 1
        scenarios_df = pd.DataFrame(Driver().get_results(
            env.DB_GLOBAL_BEST_RESULT_COLLECTION_NAME))
        scenarios_dic = Utils.get_scenario_from_df(scenarios_df)
        risk_engine = RiskEngine()

        for scenario_id in scenarios_dic:
            deltas = risk_engine.compute_delta(scenarios_dic[scenario_id],
                                               shocks,
                                               with_logistics=env.LOGISTICS_LP)

            deltas['Scenario'] = int(scenario_id)
            db.save_to_db_no_check(env.DB_SENSITIVITY_COLLECTION_NAME, deltas)

        # status update
        query_insert['time_end'] = datetime.datetime.now().strftime(
            "%d/%m/%y %H:%M:%S")
        query_insert['status'] = 0
        filter_ = {'_id': ObjectId(_id)}
        db_history.update_record(
            collection=env.MONITORING_COLLECTION_HISTORY_BEST_NAME,
            filter_=filter_,
            data=query_insert)
        update_cache(db_name, 0)

    except Exception as e:
        logger.error("Best scenarios failed")
        update_cache(db_name, 0)
示例#12
0
 def get_cost_pv(self, randomize=False):
     if not randomize:
         return self.cost_pv if self.cost_pv is not None else 0
     return Utils.simulate_range(100000, 1000000)