Exemple #1
0
    def fragment_lcia(self,
                      fragment,
                      quantity_ref,
                      scenario=None,
                      refresh=False,
                      **kwargs):
        if scenario is None:
            scenario = '1'
        lcia_q = self.get_lcia_quantity(quantity_ref)
        endpoint = 'scenarios/%s/%s/%s/lciaresults' % (scenario, fragment,
                                                       lcia_q.external_ref)
        lcia_r = self._archive.get_endpoint(endpoint, cache=False)
        if lcia_r is None or (isinstance(lcia_r, list)
                              and all(i is None for i in lcia_r)):
            res = LciaResult(lcia_q, scenario=scenario)
            return res

        res = LciaResult(lcia_q, scenario=lcia_r.pop('scenarioID'))
        total = lcia_r.pop('total')

        for component in lcia_r['lciaScore']:
            self.add_lcia_component(res, component)

        self.check_total(res.total(), total)

        return res
Exemple #2
0
    def lcia(self, process, ref_flow, quantity_ref, **kwargs):
        """
        Antelope v1 doesn't support or even have any knowledge of process reference-flows. this is a somewhat
        significant design flaw.  well, no matter.  each antelopev1 process must therefore represent an allocated single
        operation process that has an unambiguous reference flow.  This is a problem to solve on the server side;
        for now we just ignore the ref_flow argument.

        If the quantity ref is one of the ones natively known by the antelope server-- i.e. if it is a catalog ref whose
        origin matches the origin of the current archive-- then it is trivially used.  Otherwise, the lcia call reduces
        to obtaining the inventory and computing LCIA locally.
        :param process:
        :param ref_flow:
        :param quantity_ref:
        :param kwargs:
        :return:
        """
        lcia_q = self.get_lcia_quantity(quantity_ref)
        endpoint = '%s/%s/lciaresults' % (process, lcia_q.external_ref)
        lcia_r = self._archive.get_endpoint(endpoint, cache=False)

        res = LciaResult(lcia_q, scenario=lcia_r.pop('scenarioID'))
        total = lcia_r.pop('total')

        if len(lcia_r['lciaScore']) > 1:
            raise AntelopeV1Error(
                'Process LCIA result contains too many components\n%s' %
                process)

        component = lcia_r['lciaScore'][0]
        cum = component['cumulativeResult']
        self.check_total(cum, total)

        if 'processes/%s' % component['processID'] != process:
            raise AntelopeV1Error('Reference mismatch: %s begat %s' %
                                  (process, component['processID']))

        self.add_lcia_component(res, component)

        self.check_total(res.total(), total)
        return res
Exemple #3
0
    def score_cache(self,
                    quantity=None,
                    ignore_uncached=False,
                    refresh=False,
                    **kwargs):
        """
        only process-terminations are cached

        :param quantity:
        :param ignore_uncached:
        :param refresh: If True, re-compute unit score even if it is already present in the cache. This fails on
        multi-instance fragments by causing the
        :param kwargs:
        :return:
        """
        if quantity is None:
            return self._score_cache

        if self.is_frag:
            if self.is_subfrag:
                if not self.descend:
                    raise SubFragmentAggregation  # to be caught- subfrag needs to be queried w/scenario
            return LciaResult(
                quantity
            )  # otherwise, subfragment terminations have no impacts

        if quantity in self._score_cache and refresh is False:
            return self._score_cache[quantity]
        else:
            try:
                res = self.compute_unit_score(quantity, **kwargs)
            except UnCachedScore:
                if ignore_uncached:
                    res = LciaResult(quantity)
                else:
                    raise
            self._score_cache[quantity] = res
            return res
Exemple #4
0
 def __getitem__(self, item):
     """
     __getitem__ can either be used as a numerical index
     :param item:
     :return:
     """
     try:
         return super(LciaResults, self).__getitem__(item)
     except KeyError:
         try:
             int(item)
             return super(LciaResults,
                          self).__getitem__(self._indices[item])
         except (ValueError, TypeError):
             try:
                 return super(LciaResults, self).__getitem__(
                     next(k for k in self.keys() if k.startswith(item)))
             except StopIteration:
                 return LciaResult(None)  #
Exemple #5
0
    def apply_weighting(self, weights, quantity, **kwargs):
        """
        Create a new LciaResult object containing the weighted sum of entries in the current object.

        We want the resulting LciaResult to still be aggregatable. In order to accomplish this, we need to maintain
        all the individual _LciaScores entities in the weighting inputs, and compute their weighted scores here. Then
        we need to log the weighted scores as the *node weights* and use *unit* values of unit scores, because
        SummaryLciaResults are only allowed to be further aggregated if they have the same unit score.

        This feels a bit hacky and may turn out to be a terrible idea.  But there is a certain harmony in making the
        quantity's unit THE unit for a weighting computation. So I think it will work for now.

        :param weights: a dict mapping quantity UUIDs to numerical weights
        :param quantity: EITHER an LcQuantity OR a string to use as the name of in LcQuantity.new()
        :param kwargs: passed to LciaResult
        :return:
        """
        weighted_result = LciaResult(quantity, **kwargs)

        component_list = dict()  # dict maps keys entities
        component_score = defaultdict(float)  # maps keys to weighted scores
        for method, weight in weights.items():
            try:
                result = self.__getitem__(method)  # an LciaResult
            except KeyError:
                continue
            for comp in result.keys():
                if comp in component_list.keys():
                    if result[comp].entity != component_list[comp]:
                        raise DuplicateResult(
                            'Key %s matches different entities:\n%s\n%s' %
                            (comp, result[comp], component_list[comp]))
                else:
                    component_list[comp] = result[comp].entity
                component_score[comp] += (weight *
                                          result[comp].cumulative_result)

        for comp, ent in component_list.items():
            weighted_result.add_component(comp, entity=ent)
            weighted_result.add_summary(comp, ent, component_score[comp], 1.0)

        return weighted_result
Exemple #6
0
 def add_lcia_score(self, quantity, score, scenario=None):
     res = LciaResult(quantity, scenario=scenario)
     res.add_summary(self._parent.uuid, self._parent, 1.0, score)
     self._score_cache.add(res)
Exemple #7
0
    def compute_unit_score(self, quantity_ref, **kwargs):
        """
        four different ways to do this.
        0- we are a subfragment-- throw exception: use subfragment traversal results contained in the FragmentFlow
        1- parent is bg: ask catalog to give us bg_lcia (process or fragment)
        2- get fg lcia for unobserved exchanges

        If
        :param quantity_ref:
        :return:
        """
        if self.is_frag:
            '''
            if self.is_subfrag:
                if not self.descend:
                    raise SubFragmentAggregation  # to be caught

            #

            # either is_fg (no impact) or is_bg or term_is_bg (both equiv)

            elif self.is_bg:
                # need bg_lcia method for FragmentRefs
                # this is probably not currently supported
                # return self.term_node.bg_lcia(lcia_qty=quantity_ref, ref_flow=self.term_flow.external_ref, **kwargs)
                # instead- just do fragment_lcia
                print('Warning: ignoring spurious background setting for subfrag:\n%s\n%s' % (self._parent, self.term_node))
                return LciaResult(quantity_ref)

            else:
                assert self.is_fg

                # in the current pre-ContextRefactor world, this is how we are handling
                # cached-LCIA-score nodes
                # in the post-Context-Refactor world, foreground frags have no impact
                #raise UnCachedScore('fragment: %s\nquantity: %s' % (self._parent, quantity_ref))
                return LciaResult(quantity_ref)
            '''
            return LciaResult(quantity_ref)

        try:
            if self.is_context:
                locale = self._parent['SpatialScope']
            else:
                locale = self.term_node['SpatialScope']
        except KeyError:
            locale = 'GLO'
        try:
            res = quantity_ref.do_lcia(self._unobserved_exchanges(),
                                       locale=locale,
                                       **kwargs)
        except PrivateArchive:
            if self.is_bg:
                print(
                    'terminations.compute_unit_score UNTESTED for private bg archives!'
                )
                res = self.term_node.bg_lcia(
                    lcia_qty=quantity_ref,
                    ref_flow=self.term_flow.external_ref,
                    **kwargs)
            else:
                res = self.term_node.fg_lcia(
                    quantity_ref,
                    ref_flow=self.term_flow.external_ref,
                    **kwargs)
                print(
                    'terminations.compute_unit_score UNTESTED for private fg archives!'
                )

        res.scale_result(self.inbound_exchange_value)
        return res