Exemplo n.º 1
0
 def get_granulation_processes_data(raw_data):
     """
     :param raw_data: self.raw_data
     :return: dict(process: [products produced by process: granulation_ratio], idem for cs_acp
     """
     granulation_ratios_df = raw_data[
         env.PipelineLayer.GRANULATION]['SpecProd'].copy()
     specific_consumption = raw_data[
         env.PipelineLayer.GRANULATION]['SpecCons'].copy()
     del granulation_ratios_df['Moniker'], specific_consumption['Moniker'], \
         specific_consumption['Location'], specific_consumption['Capacity'],
     granulation_ratios = multidict(
         list(granulation_ratios_df.Process.unique()), {}, {})
     specific_consumptions = multidict(
         list(specific_consumption.Process.unique()), {}, {})
     for process in granulation_ratios.keys():
         sub_gr_by_process = granulation_ratios_df[
             granulation_ratios_df.Process == process]
         sub_sc_by_process = specific_consumption[
             (specific_consumption.Process == process)
             & (specific_consumption.Item == 'ACP 29')]
         products = sub_sc_by_process.Product.unique()
         for product in products:
             # TODO: ATTENTION, les rations de granulations doivent être unique ove the years
             #  per product per process, à assurer
             #TODO: remove input from prod spec sheet eventually
             granulation_ratios[process][product] = float(
                 sub_gr_by_process[sub_gr_by_process.Product ==
                                   product].drop_duplicates()['ratio'])
             specific_consumptions[process][product] = float(
                 sub_sc_by_process[sub_sc_by_process.Product ==
                                   product].drop_duplicates()['sc/opex'])
     return granulation_ratios, specific_consumptions
Exemplo n.º 2
0
 def get_specific_consumptions(self,
                               spec_cons_df,
                               option,
                               outputs,
                               main_inputs,
                               items,
                               inputs_type='uniform'):
     """ assume:
     :param outputs: to be self.outputs, list,
     :param main_inputs: to be self.main_input or self.main_inputs,
     depending on whether uniform or spec mode, as list if specific mode, str if uniform
     :param items: to be self.inputs, as list,
     :param inputs_type:
     expresses the fact that sc are specific to an input type, in which case the resulting dictionary is a three-level dict.
     :return: d={output:{input:{item:array for every item} for every input} for every output}
     """
     if inputs_type == 'uniform':
         d = multidict(outputs, [main_inputs], items, {})
         for output in outputs:
             for item in items:
                 d[output][main_inputs][
                     item] = self.get_specific_consumptions_in_out_item(
                         spec_cons_df, option, output, main_inputs, item)
     else:
         d = multidict(outputs, main_inputs, items, {})
         for output in outputs:
             for input in main_inputs:
                 for item in items:
                     d[output][input][
                         item] = self.get_specific_consumptions_in_out_item(
                             spec_cons_df, option, output, input, item)
     return d
Exemplo n.º 3
0
    def shuffle_unnamed_entities_within_unnamed_layer(layer):
        """This shuffle function generates shuffles of unnamed entities only within layer."""
        unnamed_nodes = [
            node for node in layer if node.entity.status == 'New'
        ]  # Assumed that all new are unnamed
        entities_signatures_link = Layer.reverse_signature(unnamed_nodes)
        locations = set(node.entity.get_location() for node in unnamed_nodes)
        processes = set(node.entity.process for node in unnamed_nodes)

        bijection_baskets = multidict(
            locations, processes, []
        )  # Dictionary containing list of signatures per location per process
        bijection_permutations = multidict(
            locations, processes, []
        )  # Dictionary containing list of permutations of previous signatures per location per process
        bijection_permutations_mirror = multidict(
            locations, processes, []
        )  # Dictionary containing list of mirrors of previous dict lists per location per process
        perms_mirrors_zipped = multidict(
            locations, processes, []
        )  # Dictionary containing zips of tw previous dicts per location per process
        entities_permutations = multidict(
            locations, processes, []
        )  # Dictionary containing list of permutations of entities per location per process

        for node in unnamed_nodes:
            bijection_baskets[node.entity.get_location()][
                node.entity.process].append(node.entity.signature)

        # Creating unique combinations using bijection_baskets
        for location in locations:
            for process in processes:
                bijection_permutations[location][process] = list(
                    Layer.permutate(bijection_baskets[location][process]))
                bijection_permutations_mirror[location][process] = [
                    Layer.mirror_counter(l)
                    for l in bijection_permutations[location][process]
                ]
                perms_mirrors_zipped[location][process] = [
                    zip(bijection_permutations[location][process][k],
                        bijection_permutations_mirror[location][process][k])
                    for k in range(
                        len(bijection_permutations[location][process]))
                ]
                entities_permutations[location][process] = [[
                    entities_signatures_link[t[0]][t[1]] for t in z
                ] for z in perms_mirrors_zipped[location][process]]

        return entities_permutations
Exemplo n.º 4
0
    def shuffle(self):
        mine_locations = list(
            set(node.location() for node in self.up_layer.nodes))
        wp_locations = list(
            set(node.location() for node in self.down_layer.nodes))
        d = multidict(mine_locations, wp_locations, {})
        for thread in self.node_dico.values():
            if thread.entity.mine.name not in \
                    d[thread.entity.mine.location][thread.entity.beneficiation.location].keys():
                d[thread.entity.mine.location][
                    thread.entity.beneficiation.location][
                        thread.entity.mine.name] = []
            d[thread.entity.mine.location][
                thread.entity.beneficiation.location][
                    thread.entity.mine.name].append(thread)

        dict_with_name_combinations = multidict(mine_locations, wp_locations,
                                                {})
        for mine in mine_locations:
            for wp in wp_locations:
                l = list()
                for name in d[mine][wp].keys():
                    if d[mine][wp][name]: l.append(d[mine][wp][name])
                if bool(l):
                    dict_with_name_combinations[mine][wp] = [
                        list(tup) for tup in itertools.product(*l)
                    ]
                    # Keeping only scenarios for which mines belonging to a given location are connected to same WP
                    dict_with_name_combinations[mine][wp] = list(
                        filter(
                            lambda x: len(
                                list(
                                    set(thread.down_node.entity
                                        for thread in x))) == 1,
                            dict_with_name_combinations[mine][wp]))
                else:
                    dict_with_name_combinations[mine].pop(wp)

        # Construct dictionary of possible threads by mine
        dict_by_mine = {}
        for key in dict_with_name_combinations.keys():
            if len(dict_with_name_combinations[key].values()) > 0:
                dict_by_mine[key] = reduce(
                    lambda x, y: x + y,
                    dict_with_name_combinations[key].values())
            else:
                logger.warning("No values available for mine %s" % key)

        # Construct mine-benef sub scenarios, taking into account priority mines
        priority_mines = [
            dict_by_mine[key] for key in dict_by_mine.keys()
            if key in self.priority_mines
        ]
        priority_combs = reduce(ComboLayer.product_and_reduce, priority_mines)

        non_priority_mines = [
            dict_by_mine[key] for key in dict_by_mine.keys()
            if key not in self.priority_mines
        ]
        non_priority_combs = []
        if len(non_priority_mines) > 0:
            non_priority_combs = reduce(ComboLayer.product_and_reduce,
                                        non_priority_mines)

        return ComboLayer.product_and_reduce(priority_combs,
                                             non_priority_combs)
Exemplo n.º 5
0
    def simple_allocator(permutations_dictionary,
                         needs_list,
                         driver_name='ACP 29',
                         max_permutations_to_keep=1):
        """
        :param permutations_dictionary: dictionary of potential permutations per location per process
        :param needs_list: list of pd.Series computed, representing the volume needed, for every set of choices
        (ex. granulation process for pap layer)
        :param max_permutations_to_keep: max number of permutations to keep per location per process
        :return: dictionary of best npv wise permutations per location per process
        """
        locations = list(permutations_dictionary.keys())
        output = multidict(locations, {}, [])

        # Calculation optimal npv for every set of acp_needs
        for driver_ in needs_list:
            for location in locations:
                for process in permutations_dictionary[location].keys():
                    if process not in output[location].keys():
                        output[location][process] = {}
                    dict_key_permutation = dict(
                    )  # Dictionary where {key: permutation} are gonna be stored
                    df_key_npv = pd.DataFrame(columns=['key', 'npv'])
                    key = 0
                    for permutation in permutations_dictionary[location][
                            process]:
                        npv = 0
                        key += 1
                        dict_key_permutation[key] = permutation
                        driver = driver_.copy()
                        for node in permutation:
                            produced = np.maximum(
                                0, np.minimum(driver, node.entity.capacity))
                            driver = driver - produced

                            # update total_opex of entities
                            if np.count_nonzero(produced) != 0:
                                total_opex = produced * node.entity.opex[
                                    driver_name]
                                prod_start = total_opex[
                                    total_opex > 0].index.min()
                                total_capex = node.entity.total_capex.copy()
                                total_capex.index = total_capex.index + prod_start
                                total_capex = total_capex.loc[[
                                    x in node.entity.timeline
                                    for x in total_capex.index
                                ]]
                                total_yearly_expenses = total_opex.add(
                                    total_capex, fill_value=0)
                                npv += np.npv(env.WACC, total_yearly_expenses)
                            if np.count_nonzero(driver) == 0: break
                        df_key_npv = df_key_npv.append({
                            'key': key,
                            'npv': npv
                        },
                                                       ignore_index=True)
                    permutations_kept = df_key_npv.sort_values(
                        by=['npv'],
                        ascending=False).head(max_permutations_to_keep)
                    for key in permutations_kept['key']:
                        output[location][process][key] = dict_key_permutation[
                            key]

        return [
            output[location][process][key] for location in output.keys()
            for process in output[location].keys()
            for key in output[location][process].keys()
        ]
Exemplo n.º 6
0
    def shuffle_unnamed_layer(layer, raw_data, sp=None):
        """ Handles existing units for chemical units.
        Adds them to shuffle of unnamed
        Handles also closing date by generating different scenarios for each date"""
        permutations_of_unnamed = Layer.shuffle_unnamed_entities_within_unnamed_layer(
            layer)
        """ Section used for unification of new units permutation per basket and therefore reducing scenarios number
        Can be parametrized 
         assumptions: 
            - existing units are saturated, we allocate only v = needs - existing capacity
            -  optimal permutation doesn't depend on volume needed. This assumption will be checked by  computing needs 
            and optimal permutation for every choice of granulation process
            - Specialized units abroad are assumed to be saturated 
            - All new acp is supposed to be ACS-self-sufficient """
        #TODO: once granulation PL is implemented, we could either:
        #   - replicate the exact same PL for PAP and SAP layers, especially if execution time is short
        #   - keep current section but remove production of existing units from needs
        granulation_gr, acid_sc = Layer.get_granulation_processes_data(
            raw_data)
        granulation_acp_needs = Layer.calculate_acp_needs_for_ferts(
            sp, acid_sc)
        acp_drivers_list = Layer.calculate_total_acp_needs(
            sp, granulation_acp_needs)
        if layer[0].entity.layer == env.PipelineLayer.PAP:
            optimal_permutations = Layer.simple_allocator(
                permutations_of_unnamed, acp_drivers_list)

        elif layer[0].entity.layer == env.PipelineLayer.SAP:
            acs_needs = Layer.calculate_total_acs_needs(
                acp_drivers_list, raw_data)
            optimal_permutations = Layer.simple_allocator(
                permutations_of_unnamed, acs_needs, driver_name='ACS')

        else:
            pass  #TODO: fix granulation

        existing_nodes = [
            node for node in layer if node.entity.status == 'Existing'
        ]
        names = list(set([node.entity.name for node in existing_nodes]))
        baskets_of_existing_by_name = multidict(names, [])
        for node in existing_nodes:
            baskets_of_existing_by_name[node.entity.name].append(node)

        unique_existing = [
            node for node in existing_nodes
            if len(baskets_of_existing_by_name[node.name()]) == 1
        ]
        existing_with_different_possibilities = list(
            set(node.name() for node in existing_nodes
                if len(baskets_of_existing_by_name[node.name()]) != 1))
        shuffles_of_existing_with_different_possibilities = list(
            itertools.product(*[
                baskets_of_existing_by_name[name]
                for name in existing_with_different_possibilities
            ]))
        scenarios_with_scenarized_existing = [
            list(e) + n
            for e in shuffles_of_existing_with_different_possibilities
            for n in optimal_permutations
        ]

        return [
            unique_existing + scenario
            for scenario in scenarios_with_scenarized_existing
        ]
Exemplo n.º 7
0
 def reverse_signature(layer_):
     signatures = list(set([node.entity.signature for node in layer_]))
     d = multidict(signatures, [], [])
     for node in layer_:
         d[node.entity.signature][node.entity.id_number] = node
     return d