Пример #1
0
 def add_node(self, node):
     """
     Add node to layer
     :param node: Node
     :return: None
     """
     if node not in self.nodes:
         self.nodes.append(node)
     else:
         logger.warning("Node %s already in leayer %s" %
                        (node.moniker(), str(self.type)))
Пример #2
0
    def simulate(self, ch, method, properties, body):
        """
        Callback function called for each task
        :param ch:
        :param method:
        :param properties:
        :param body:
        :return: None
        """
        logger.info(" [*] Running simulation %r" % body)

        data = json.loads(body)

        cycle = data["cycle"]
        phase = data["phase"]
        time_start = datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")
        if "db_name" in data:
            reset_db_name(data['db_name'])
        if "logistics_lp" in data:
            env.LOGISTICS_LP = data["logistics_lp"]
        detailed_publisher = ResultSaver(
            env.RABBITMQ_DETAILED_RESULT_QUEUE_NAME, env.RESULT_BATCHES_SIZE)
        global_publisher = ResultSaver(env.RABBITMQ_GLOBAL_RESULT_QUEUE_NAME,
                                       env.RESULT_BATCHES_SIZE)

        try:
            s = Simulator()
            s.simulate(cycle,
                       phase, {
                           "details": detailed_publisher,
                           "global": global_publisher
                       },
                       monitor=True,
                       logistics_lp=env.LOGISTICS_LP)

            detailed_publisher.close()
            global_publisher.close()
            logger.info(" [x] Done")
        except Exception as e:
            task_to_save = dict()
            task_to_save['db_name'] = env.DB_NAME
            task_to_save['time_start'] = time_start
            task_to_save['total_scenario'] = 0
            message = "Worker failed: %s" % (str(e))
            logger.warning("Worker failed: %s" % (str(e)))
            insert_history(phase=phase,
                           task_to_save=task_to_save,
                           status=env.HTML_STATUS.ERROR.value,
                           message=message)
            global_publisher.close()
            detailed_publisher.close()
            logger.info(" [x] Done with error")
        ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #3
0
 def shuffle(self):
     if self.shuffle_level == env.ShuffleLevel.SHUFFLE_WITHOUT_PERM:
         return self.shuffling_without_perms_with_all_elements()
     elif self.shuffle_level == env.ShuffleLevel.SHUFFLE_WITH_PERMUTATIONS:
         return self.shuffle_with_permutations(
             self.shuffle_without_permutations())
     elif self.shuffle_level == env.ShuffleLevel.SHUFFLE_WITH_PERMUTATIONS_WITH_FILTERS:
         return self.shuffle_with_permutations_with_filters()
     elif self.shuffle_level == env.ShuffleLevel.SHUFFLE_WITH_UNNAMED:
         return self.shuffle_unnamed_layer(self.nodes, self.raw_data,
                                           self.global_sp)
     else:
         logger.warning("Undefined shuffling method")
Пример #4
0
 def get_node(self, layer, id):
     """
     Get node and cache it
     :param layer: enumeration (MINE, ..)
     :param id: entity id
     :return: Node
     """
     layers = [layer] if layer is not None else list(env.PipelineLayer)
     for layer in layers:
         if layer not in self.nodes:
             continue
         for node in self.nodes[layer]:
             if node.moniker() == id:
                 return node
     logger.warning("Node %s not found" % id)
     return None
Пример #5
0
    def simulate(self,
                 cycle=1,
                 phase=0,
                 publishers=None,
                 scenario_generator=None,
                 monitor=False,
                 counter_limit=None,
                 logistics_lp=False,
                 scenarios_filter=None):
        """
        Simulate scenarios and compute CostPV of all possible scenarios
        :param cycle: int
        :param phase: int
        :param publishers: list
        :param scenario_generator: ScenarioGenerator object
        :param monitor: boolean
        :param counter_limit: int
        :param logistics_lp: boolean
        :return: couple(list,list)
        """

        # create scenario generator
        if scenario_generator is None:
            scenario_generator = SGF.create_scenario_generator(
                env.SCENARIO_GEN_TYPE, self)

        # get driver from sales plan
        sales_plan = self.data_manager.sales_plan

        # monitoring
        counter = 0
        save_counter = 0
        scenarios_details = {}
        scenarios_global = {}
        check_work_infos = {}
        scenarios = scenario_generator.generate()
        scenarios_len = scenario_generator.len()

        # Launch granulation PL
        granulation_solver = GranulationSolver(self.nodes, self.sales_plan)
        total_scenarios = len(granulation_solver.couples) * scenarios_len

        check_work_infos[phase] = {
            "total_scenario": str(total_scenarios),
            "maxWorker": env.RABBITMQ_MAX_WORKER,
            'db_name': env.DB_NAME,
            'time_start': datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")
        }

        for tup in granulation_solver.couples:
            if counter_limit is not None and counter > counter_limit:
                break

            granulation_solved = granulation_solver.launch_granulation_solver(
                tup)
            if granulation_solved.status != 1:
                counter += scenarios_len
                if monitor and counter >= total_scenarios - phase:
                    progress = round(int((counter * 100) / total_scenarios), 2)
                    check_work_infos[phase]["progress"] = str(progress)
                    check_work_infos[phase]["counter"] = str(counter)
                    update_cache("workers_info_%i" % phase, check_work_infos)
                continue

            granulation_solver.write_optimization_results(granulation_solved)
            recalculated_sales_plan = tup[1]
            domestic_granulation_nodes = list(
                filter(lambda x: (x.entity.productionSite == 'Morocco'),
                       tup[0]))

            # Abroad nodes produce at full capacity NPK
            abroad_nodes = list(
                filter(lambda x: (x.entity.productionSite != 'Morocco'),
                       tup[0]))
            if len(abroad_nodes) != 0:
                for node in abroad_nodes:
                    node.entity.production["NPK"][
                        "volume"] = node.entity.capacity

            tup_acid_needs_per_year = \
                reduce(lambda x, y: x + y, [node.entity.production[product]["volume"] *
                                            node.entity.specific_consumptions[product]["ACP 29"]["ACP 29"]
                                            for node in domestic_granulation_nodes for product in node.entity.production.keys()])
            tup_rock_needs_per_year = \
                reduce(lambda x, y: x + y, [node.entity.production["TSP"]["volume"] *
                                            node.entity.specific_consumptions["TSP"]["ACP 29"]["Chimie"]
                                            for node in domestic_granulation_nodes if "TSP" in
                                            node.entity.production.keys()])

            # Running calculation of metrics for granulation layer
            granulation_npv = 0
            granulation_scenario_results = []
            for granulation in tup[0]:
                granulation.entity.compute_metrics()
                granulation_npv += granulation.entity.get_cost_pv(
                    env.RANDOMIZE_RESULTS)
                for result in granulation.entity.get_data(
                        env.RANDOMIZE_RESULTS):
                    granulation_scenario_results.append(result)

            counter_step = 0
            scenario_counter = 0
            for scenario in tqdm(scenarios, total=scenarios_len):

                counter += 1
                if scenarios_filter is not None:
                    if counter in scenarios_filter:
                        scenarios_filter.remove(counter)
                    else:
                        continue
                scenario_counter += 1
                if counter_limit is not None and scenario_counter > counter_limit:
                    break

                if monitor:
                    if counter_step == env.MONITORING_STEP or counter >= total_scenarios - phase:
                        progress = round(
                            int((counter * 100) / total_scenarios), 2)
                        check_work_infos[phase]["progress"] = str(progress)
                        check_work_infos[phase]["counter"] = str(counter)
                        update_cache("workers_info_%i" % phase,
                                     check_work_infos)
                        counter_step = 0
                    counter_step += 1

                if scenario_counter % cycle != phase:
                    continue

                for product in recalculated_sales_plan.Product.unique():
                    product_needs = recalculated_sales_plan[
                        recalculated_sales_plan.Product == product]
                    if product_needs.Type.unique() == 'Fertilizer':
                        pass
                    elif product == 'ACP 29':
                        driver = sales_plan[sales_plan.Product == product][
                            "volume"] + tup_acid_needs_per_year
                        self.flow_upstream(product, driver, scenario)
                    else:
                        driver = sales_plan[sales_plan.Product ==
                                            product]["volume"]
                        self.flow_upstream(product, driver, scenario)
                    # Flow upstream TSP needs separately
                self.flow_upstream("Chimie", tup_rock_needs_per_year, scenario)

                # Rebalance production in threads, and compute balances, opex
                Simulator.rebalance_thread_production(scenario)

                # Calculation for non-granulation entities that have non-zero production
                has_produced = list(
                    filter(
                        lambda x:
                        (x.layer != env.PipelineLayer.GRANULATION) and
                        (True in set(
                            any(x.production[product]["volume"] > 0)
                            for product in x.production.keys())),
                        Entity.ENTITIES.values()))

                scenario_results = []
                for gsr in granulation_scenario_results:
                    gsr_ = gsr.copy()
                    gsr_["Scenario"] = counter
                    scenario_results.append(gsr_)
                scenario_cost_pv = granulation_npv

                for entity in has_produced:
                    entity.compute_metrics()
                    scenario_cost_pv += entity.get_cost_pv(
                        env.RANDOMIZE_RESULTS)
                    for result in entity.get_data(env.RANDOMIZE_RESULTS):
                        result["Scenario"] = counter
                        scenario_results.append(result)

                logistic_model_status = -1
                if logistics_lp:
                    logistics_solver = LogisticsSolver(self.nodes, scenario,
                                                       sales_plan)
                    _, logistics_entities, logistic_model_status = logistics_solver.launch_logistics_solver(
                    )
                    if logistic_model_status != 1:
                        logger.warning(
                            "Logistics solver failed for scenario %d" %
                            counter)
                    else:
                        for elt in logistics_entities:
                            has_produced.append(elt)
                    for entity in logistics_entities:
                        entity.compute_metrics()
                        scenario_cost_pv += entity.get_cost_pv(
                            env.RANDOMIZE_RESULTS)
                        for result in entity.get_data(env.RANDOMIZE_RESULTS):
                            result["Scenario"] = counter
                            scenario_results.append(result)

                # must reset before moving on
                for entity in has_produced:
                    entity.reset()

                if logistics_lp and logistic_model_status != 1:
                    continue

                if publishers is None:
                    scenarios_details[counter] = scenario_results
                    scenarios_global[counter] = {
                        "Scenario":
                        counter,
                        "Cost PV":
                        scenario_cost_pv,
                        "Unit":
                        "$",  #TODO: check unit
                        "Moniker":
                        json.dumps(NodeJSONEncoder().encode([tup[0]] +
                                                            scenario))
                    }
                else:
                    publishers["details"].save(scenario_results, counter)
                    publishers["global"].save(
                        {
                            "Scenario": counter,
                            "Cost PV": scenario_cost_pv,
                            "Unit": "$",  #TODO: check unit
                            "Moniker": [tup[0]] + scenario,
                        },
                        counter)
                save_counter += 1

            # Reset granulation entities
            for granulation in tup[0]:
                granulation.entity.reset()

        if monitor:
            if save_counter > 0:
                message = "Phase %s done successfully" % phase
                insert_history(phase=phase,
                               task_to_save=check_work_infos[phase],
                               status=env.HTML_STATUS.OK.value,
                               message=message)
            else:
                message = "All scenarios have failed for phase %s" % phase
                insert_history(phase=phase,
                               task_to_save=check_work_infos[phase],
                               status=env.HTML_STATUS.ERROR.value,
                               message=message)

        return scenarios_global, scenarios_details
Пример #6
0
 def close(self):
     self.connection.close()
     logger.warning(" [x] RabbitMQ Broker: connection closed")
Пример #7
0
    def __init__(self,
                 option,
                 spec_cons_opex,
                 capex,
                 spec_prod,
                 product_type,
                 layer=env.PipelineLayer.UNDEFINED,
                 unnamed_in_layer=False):
        """
        :param option: option series for considered option
        :param layer: layer
        """
        moniker = option.Moniker
        if moniker in self.ENTITIES:
            logger.warning("Entity {0} already exists".format(moniker))
        Entity.ENTITIES[moniker] = self

        # Attributes available in option
        self.location = option.Location if 'Location' in option else None
        self.status = option.Status
        if self.status == 'New' and unnamed_in_layer:
            """ That means that if layer contains unnamed units (PAP, GRA, and SAP so far, all new entities are 
            considered as unnamed. 
            The attribute "self.signature" will be initiated with an integer. Every entity with a not null max_number 
            attribute will have a unique signature in its corresponding layer. These signatures will be used to generate 
            the shuffles in a unique way. Example:
                - Entity1, max_number = 2, signature=0
                - Entity2, max_number = 3, signature=1
                a scenario would contain a permutation of [0, 0, 0, 1, 1]
                The inverse function to go from a given permutation to its corresponding is based on a counter 
                (cf static method Layer.mirror_counter) """

            self.max_number = option.Name
            self.signature = None
            self.name = None
        else:
            self.max_number = None
            self.signature = None
            self.name = option.Name

        self.base_entity = None

        self.nominal_capacity = float(option.Capacity)
        self.layer = layer
        self.closingDate = option['ClosingDate']
        self.startingDate = option['StartingDate']

        self.moniker = option.Moniker

        # Attributes describing option, with information available in other sheets
        self.timeline = sorted(spec_cons_opex.index.unique().tolist())
        self.capacity = Entity.get_capacities(option, self.nominal_capacity,
                                              self.timeline)
        self.capex = Entity.get_capex(capex, self.moniker)
        self.inputs = Entity.get_consumed(option, spec_cons_opex)
        self.outputs = []
        self.main_input = None
        self.specific_consumptions = None
        self.opex = None
        # Attribute containing type of different outputs, as instructed in dictionary
        # (By-product, Waste, etc)
        if product_type is not None and spec_prod is not None:
            self.secondary_products = \
                [key for key in product_type.keys() if product_type[key] in ['Waste', 'Co-product', 'By-product']]
            self.secondary_products_spec_prod = self.get_specific_productions(
                spec_prod)

        else:
            self.secondary_products = None

        # Constants used for reinitialization from one scenario to another
        self.zero_consumption = {
            k: {
                "volume": pd.Series([0] * len(self.timeline),
                                    index=self.timeline),
                "unit": ""
            }
            for k in self.inputs
        }
        self.zero_production = {
            k: {
                "volume": pd.Series([0] * len(self.timeline),
                                    index=self.timeline),
                "unit": ""
            }
            for k in self.outputs
        }
        self.zero_capacities = self.capacity
        self.zero_total_opex = pd.Series([0] * len(self.timeline),
                                         index=self.timeline)

        # Attributes to reset from one scenario to another (not exhaustive)
        self.total_capex = self.capex.copy()
        self.total_opex = self.zero_total_opex
        self.consumption = {
            k: {
                "volume": pd.Series([0] * len(self.timeline),
                                    index=self.timeline),
                "unit": ""
            }
            for k in self.inputs
        }
        self.production = {
            k: {
                "volume": pd.Series([0] * len(self.timeline),
                                    index=self.timeline),
                "unit": ""
            }
            for k in self.outputs
        }
        self.cost_pv = None
Пример #8
0
    def shuffle(self):
        mine_locations = list(
            set(node.location() for node in self.up_layer.nodes))
        wp_locations = list(
            set(node.location() for node in self.down_layer.nodes))
        d = multidict(mine_locations, wp_locations, {})
        for thread in self.node_dico.values():
            if thread.entity.mine.name not in \
                    d[thread.entity.mine.location][thread.entity.beneficiation.location].keys():
                d[thread.entity.mine.location][
                    thread.entity.beneficiation.location][
                        thread.entity.mine.name] = []
            d[thread.entity.mine.location][
                thread.entity.beneficiation.location][
                    thread.entity.mine.name].append(thread)

        dict_with_name_combinations = multidict(mine_locations, wp_locations,
                                                {})
        for mine in mine_locations:
            for wp in wp_locations:
                l = list()
                for name in d[mine][wp].keys():
                    if d[mine][wp][name]: l.append(d[mine][wp][name])
                if bool(l):
                    dict_with_name_combinations[mine][wp] = [
                        list(tup) for tup in itertools.product(*l)
                    ]
                    # Keeping only scenarios for which mines belonging to a given location are connected to same WP
                    dict_with_name_combinations[mine][wp] = list(
                        filter(
                            lambda x: len(
                                list(
                                    set(thread.down_node.entity
                                        for thread in x))) == 1,
                            dict_with_name_combinations[mine][wp]))
                else:
                    dict_with_name_combinations[mine].pop(wp)

        # Construct dictionary of possible threads by mine
        dict_by_mine = {}
        for key in dict_with_name_combinations.keys():
            if len(dict_with_name_combinations[key].values()) > 0:
                dict_by_mine[key] = reduce(
                    lambda x, y: x + y,
                    dict_with_name_combinations[key].values())
            else:
                logger.warning("No values available for mine %s" % key)

        # Construct mine-benef sub scenarios, taking into account priority mines
        priority_mines = [
            dict_by_mine[key] for key in dict_by_mine.keys()
            if key in self.priority_mines
        ]
        priority_combs = reduce(ComboLayer.product_and_reduce, priority_mines)

        non_priority_mines = [
            dict_by_mine[key] for key in dict_by_mine.keys()
            if key not in self.priority_mines
        ]
        non_priority_combs = []
        if len(non_priority_mines) > 0:
            non_priority_combs = reduce(ComboLayer.product_and_reduce,
                                        non_priority_mines)

        return ComboLayer.product_and_reduce(priority_combs,
                                             non_priority_combs)