예제 #1
0
    def test_basic(self):
        """
        This test is just a very basic workflow going thru all calculations up to temp score
        """

        # Setup test provider
        company = copy.deepcopy(self.company_base)
        target = copy.deepcopy(self.target_base)
        data_provider = TestDataProvider(companies=[company], targets=[target])

        # Calculat4e Temp Scores
        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID, ETimeFrames.SHORT, ETimeFrames.LONG],
            scopes=[EScope.S1S2],
            aggregation_method=PortfolioAggregationMethod.WATS,
        )

        # portfolio data
        pf_company = copy.deepcopy(self.pf_base)
        portfolio_data = SBTi.utils.get_data([data_provider], [pf_company])

        # Verify data
        scores = temp_score.calculate(portfolio_data)
        self.assertIsNotNone(scores)
        self.assertEqual(len(scores.index), 3)
예제 #2
0
    def test_regression_companies(self):

        nr_companies = 1000

        # test 10000 companies
        companies: List[IDataProviderCompany] = []
        targets: List[IDataProviderTarget] = []
        pf_companies: List[PortfolioCompany] = []

        for i in range(nr_companies):
            company_id = f"Company {str(i)}"
            # company
            company = copy.deepcopy(self.company_base)
            company.company_id = company_id
            companies.append(company)

            # target
            target = copy.deepcopy(self.target_base)
            target.company_id = company_id
            targets.append(target)

            # pf company
            pf_company = PortfolioCompany(
                company_name=company_id,
                company_id=company_id,
                investment_value=100,
                company_isin=company_id,
            )
            pf_companies.append(pf_company)

        data_provider = TestDataProvider(companies=companies, targets=targets)

        # Calculate scores & Aggregated values
        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID],
            scopes=[EScope.S1S2],
            aggregation_method=PortfolioAggregationMethod.WATS,
        )

        portfolio_data = SBTi.utils.get_data([data_provider], pf_companies)
        scores = temp_score.calculate(portfolio_data)
        agg_scores = temp_score.aggregate_scores(scores)

        self.assertAlmostEqual(agg_scores.mid.S1S2.all.score,
                               self.BASE_COMP_SCORE)
예제 #3
0
    def test_score_cap(self):

        companies, targets, pf_companies = self.create_base_companies(["A"])
        data_provider = TestDataProvider(companies=companies, targets=targets)

        # add a Scenario that will trigger the score cap function
        scenario = Scenario()
        scenario.engagement_type = EngagementType.SET_TARGETS
        scenario.scenario_type = ScenarioType.APPROVED_TARGETS

        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID],
            scopes=[EScope.S1S2],
            aggregation_method=PortfolioAggregationMethod.WATS,
            scenario=scenario)

        portfolio_data = SBTi.utils.get_data([data_provider], pf_companies)
        scores = temp_score.calculate(portfolio_data)
        agg_scores = temp_score.aggregate_scores(scores)
예제 #4
0
    def test_grouping(self):
        """
        Testing the grouping feature with two different industry levels and making sure the results are present
        """
        # make 2+ companies and group them together
        industry_levels = ["Manufacturer", "Energy"]
        company_ids = ["A", "B"]
        companies_all: List[IDataProviderCompany] = []
        targets_all: List[IDataProviderTarget] = []
        pf_companies_all: List[PortfolioCompany] = []

        for ind_level in industry_levels:

            company_ids_with_level = [
                f"{ind_level}_{company_id}" for company_id in company_ids
            ]

            companies, targets, pf_companies = self.create_base_companies(
                company_ids_with_level)
            for company in companies:
                company.industry_level_1 = ind_level

            companies_all.extend(companies)
            targets_all.extend(targets)
            pf_companies_all.extend(pf_companies)

        data_provider = TestDataProvider(companies=companies_all,
                                         targets=targets_all)

        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID],
            scopes=[EScope.S1S2],
            aggregation_method=PortfolioAggregationMethod.WATS,
            grouping=["industry_level_1"])

        portfolio_data = SBTi.utils.get_data([data_provider], pf_companies_all)
        scores = temp_score.calculate(portfolio_data)
        agg_scores = temp_score.aggregate_scores(scores)

        for ind_level in industry_levels:
            self.assertAlmostEqual(
                agg_scores.mid.S1S2.grouped[ind_level].score,
                self.BASE_COMP_SCORE)
예제 #5
0
    def test_basic_flow(self):
        """
        This test is going all the way to the aggregated calculations
        """

        companies, targets, pf_companies = self.create_base_companies(
            ["A", "B"])

        data_provider = TestDataProvider(companies=companies, targets=targets)

        # Calculate scores & Aggregated values
        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID],
            scopes=[EScope.S1S2, EScope.S1S2S3],
            aggregation_method=PortfolioAggregationMethod.WATS,
        )

        portfolio_data = SBTi.utils.get_data([data_provider], pf_companies)
        scores = temp_score.calculate(portfolio_data)
        agg_scores = temp_score.aggregate_scores(scores)

        # verify that results exist
        self.assertEqual(agg_scores.mid.S1S2.all.score, self.BASE_COMP_SCORE)
예제 #6
0
 def setUp(self) -> None:
     """
     Create the provider and reporting instance which we'll use later on.
     :return:
     """
     self.temperature_score = TemperatureScore(
         time_frames=list(ETimeFrames), scopes=EScope.get_result_scopes())
     self.data = pd.read_csv(
         os.path.join(os.path.dirname(os.path.realpath(__file__)), "inputs",
                      "data_test_temperature_score.csv"))
     scope_map = {
         "S1+S2": EScope.S1S2,
         "S3": EScope.S3,
         "S1+S2+S3": EScope.S1S2S3
     }
     self.data[ColumnsConfig.SCOPE] = self.data[ColumnsConfig.SCOPE].map(
         scope_map)
     time_frame_map = {
         "short": ETimeFrames.SHORT,
         "mid": ETimeFrames.MID,
         "long": ETimeFrames.LONG
     }
     self.data[ColumnsConfig.TIME_FRAME] = self.data[
         ColumnsConfig.TIME_FRAME].map(time_frame_map)
예제 #7
0
    def test_target_grouping(self):
        """
        This test is checking the target grouping in the target validation from begin to end.
        """

        companies, targets, pf_companies = self.create_base_companies(
            ["A", "B", "C", "D"])
        target = copy.deepcopy(self.target_base)
        target.company_id = 'A'
        target.coverage_s1 = 0.75
        target.coverage_s2 = 0.75
        target.coverage_s3 = 0.75
        targets.append(target)

        target = copy.deepcopy(self.target_base)
        target.company_id = 'A'
        target.coverage_s1 = 0.99
        target.coverage_s2 = 0.99
        target.coverage_s3 = 0.99
        targets.append(target)

        target = copy.deepcopy(self.target_base)
        target.company_id = 'B'
        target.scope = EScope.S3
        target.coverage_s1 = 0.75
        target.coverage_s2 = 0.75
        target.coverage_s3 = 0.49
        targets.append(target)

        target = copy.deepcopy(self.target_base)
        target.company_id = 'B'
        target.scope = EScope.S3
        target.coverage_s1 = 0.99
        target.coverage_s2 = 0.99
        target.coverage_s3 = 0.49
        target.end_year = 2035
        targets.append(target)

        target = copy.deepcopy(self.target_base)
        target.company_id = 'D'
        target.coverage_s1 = 0.95
        target.coverage_s2 = 0.95
        target.target_type = 'int'
        target.intensity_metric = 'Revenue'
        targets.append(target)

        data_provider = TestDataProvider(companies=companies, targets=targets)

        # Calculate scores & Aggregated values
        temp_score = TemperatureScore(
            time_frames=[ETimeFrames.MID],
            scopes=[EScope.S1S2, EScope.S1S2S3],
            aggregation_method=PortfolioAggregationMethod.WATS,
        )

        portfolio_data = SBTi.utils.get_data([data_provider], pf_companies)
        scores = temp_score.calculate(portfolio_data)
        agg_scores = temp_score.aggregate_scores(scores)

        # verify that results exist
        self.assertAlmostEqual(agg_scores.mid.S1S2.all.score,
                               self.BASE_COMP_SCORE,
                               places=4)
예제 #8
0
class TestTemperatureScore(unittest.TestCase):
    """
    Test the reporting functionality. We'll use the Example data provider as the output of this provider is known in
    advance.
    """
    def setUp(self) -> None:
        """
        Create the provider and reporting instance which we'll use later on.
        :return:
        """
        self.temperature_score = TemperatureScore(
            time_frames=list(ETimeFrames), scopes=EScope.get_result_scopes())
        self.data = pd.read_csv(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), "inputs",
                         "data_test_temperature_score.csv"))
        scope_map = {
            "S1+S2": EScope.S1S2,
            "S3": EScope.S3,
            "S1+S2+S3": EScope.S1S2S3
        }
        self.data[ColumnsConfig.SCOPE] = self.data[ColumnsConfig.SCOPE].map(
            scope_map)
        time_frame_map = {
            "short": ETimeFrames.SHORT,
            "mid": ETimeFrames.MID,
            "long": ETimeFrames.LONG
        }
        self.data[ColumnsConfig.TIME_FRAME] = self.data[
            ColumnsConfig.TIME_FRAME].map(time_frame_map)

    def test_temp_score(self) -> None:
        """
        Test whether the temperature score is calculated as expected.

        :return:
        """
        scores = self.temperature_score.calculate(self.data)
        self.assertAlmostEqual(scores[(scores["company_name"] == "Company T")
                                      & (scores["scope"] == EScope.S1S2)]
                               ["temperature_score"].iloc[0],
                               1.77,
                               places=2,
                               msg="The temp score was incorrect")
        self.assertAlmostEqual(scores[(scores["company_name"] == "Company E")
                                      & (scores["scope"] == EScope.S1S2)]
                               ["temperature_score"].iloc[0],
                               3.2,
                               places=2,
                               msg="The fallback temp score was incorrect")
        self.assertAlmostEqual(
            scores[(scores["company_name"] == "Company AA")
                   & (scores["time_frame"] == ETimeFrames.MID) &
                   (scores["scope"]
                    == EScope.S1S2S3)]["temperature_score"].iloc[0],
            1.97,
            places=2,
            msg="The aggregated temp score was incorrect")
        self.assertAlmostEqual(
            scores[(scores["company_name"] == "Company AA")
                   & (scores["time_frame"] == ETimeFrames.LONG) &
                   (scores["scope"]
                    == EScope.S1S2S3)]["temperature_score"].iloc[0],
            3.2,
            places=5,
            msg="The aggregated fallback temp score was incorrect")

    def test_portfolio_aggregations(self):
        scores = self.temperature_score.calculate(self.data)
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.73,
                               places=2,
                               msg="Short WATS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               2.89,
                               places=2,
                               msg="Mid WATS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long WATS aggregation failed")
        self.temperature_score.aggregation_method = PortfolioAggregationMethod.TETS
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.86,
                               places=2,
                               msg="Short TETS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               3.41,
                               places=2,
                               msg="Mid TETS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long TETS aggregation failed")
        self.temperature_score.aggregation_method = PortfolioAggregationMethod.MOTS
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.88,
                               places=2,
                               msg="Short MOTS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               3.43,
                               places=2,
                               msg="Mid MOTS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long MOTS aggregation failed")
        self.temperature_score.aggregation_method = PortfolioAggregationMethod.EOTS
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.93,
                               places=2,
                               msg="Short EOTS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               3.48,
                               places=2,
                               msg="Mid EOTS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long EOTS aggregation failed")
        self.temperature_score.aggregation_method = PortfolioAggregationMethod.ECOTS
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.93,
                               places=2,
                               msg="Short ECOTS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               3.48,
                               places=2,
                               msg="Mid ECOTS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long ECOTS aggregation failed")
        self.temperature_score.aggregation_method = PortfolioAggregationMethod.AOTS
        aggregations = self.temperature_score.aggregate_scores(scores)
        self.assertAlmostEqual(aggregations.short.S1S2.all.score,
                               2.88,
                               places=2,
                               msg="Short AOTS aggregation failed")
        self.assertAlmostEqual(aggregations.mid.S1S2.all.score,
                               3.43,
                               places=2,
                               msg="Mid AOTS aggregation failed")
        self.assertAlmostEqual(aggregations.long.S1S2.all.score,
                               3.2,
                               places=2,
                               msg="Long AOTS aggregation failed")
예제 #9
0
    def post(self):
        json_data = request.get_json(force=True)
        data_providers = self._get_data_providers(json_data)
        default_score = json_data.get("default_score",
                                      self.config["default_score"])
        temperature_score = TemperatureScore(fallback_score=default_score)

        input_data = pd.DataFrame(json_data["companies"])
        company_data = SBTi.data.get_company_data(data_providers,
                                                  json_data["companies"])
        target_data = SBTi.data.get_targets(data_providers,
                                            json_data["companies"])
        company_data = pd.merge(left=company_data,
                                right=input_data[[
                                    column for column in input_data.columns
                                    if column not in ["company_name"]
                                ]],
                                how="left",
                                on=["company_id"])

        aggregation_method = self.aggregation_map[
            self.config["aggregation_method"]]
        # TODO: Write this as a shorthand and throw an exception if the user defined a non existing aggregation
        if "aggregation_method" in json_data and json_data[
                "aggregation_method"] in self.aggregation_map:
            aggregation_method = self.aggregation_map[
                json_data["aggregation_method"]]

        # Group aggregates by certain column names
        grouping = json_data.get("grouping_columns", None)

        scenario = json_data.get('scenario', None)
        if scenario is not None and scenario["number"] > 0:
            scenario['aggregation_method'] = aggregation_method
            scenario['grouping'] = grouping
            scenario = Scenario.from_dict(scenario)
            temperature_score.set_scenario(scenario)

        if len(company_data) == 0:
            return {
                "success":
                False,
                "message":
                "None of the companies in your portfolio could be found by the data provider"
            }, 400

        # Target validation
        target_validation = TargetValidation(target_data, company_data)
        portfolio_data = target_validation.target_validation()

        scores = temperature_score.calculate(portfolio_data)

        # Temperature score percentage breakdown by default score and target score
        temperature_percentage_coverage = temperature_score.temperature_score_influence_percentage(
            scores.copy(), aggregation_method)

        # After calculation we'll re-add the extra columns from the input
        for company in json_data["companies"]:
            for key, value in company.items():
                if key not in ["company_name", "company_id"]:
                    portfolio_data.loc[portfolio_data['company_name'] ==
                                       company["company_name"], key] = value

        # Filter scope (s1s2, s3 or s1s2s3)
        if len(json_data.get("filter_scope_category", [])) > 0:
            scores = scores[scores["scope_category"].isin(
                json_data["filter_scope_category"])]

        # Filter timeframe (short, mid, long)
        if len(json_data.get("filter_time_frame", [])) > 0:
            scores = scores[scores["time_frame"].isin(
                json_data["filter_time_frame"])]

        scores = scores.copy()
        # TODO: Why are the scores rounded here, even though there are still calculation left to do?
        scores = scores.round(2)

        aggregations = temperature_score.aggregate_scores(
            scores, aggregation_method, grouping)

        # Include columns
        include_columns = ["company_name", "scope_category", "time_frame", "temperature_score"] + \
                          [column for column in json_data.get("include_columns", []) if column in scores.columns]

        portfolio_coverage_tvp = PortfolioCoverageTVP()
        coverage = portfolio_coverage_tvp.get_portfolio_coverage(
            portfolio_data, aggregation_method)

        if grouping:
            column_distribution = temperature_score.columns_percentage_distribution(
                portfolio_data, json_data['grouping_columns'])
        else:
            column_distribution = None

        temperature_percentage_coverage = pd.DataFrame.from_dict(
            temperature_percentage_coverage).replace({
                np.nan: None
            }).to_dict()
        aggregations = temperature_score.merge_percentage_coverage_to_aggregations(
            aggregations, temperature_percentage_coverage)

        # Dump raw data to compute the scores
        anonymize_data_dump = json_data.get("anonymize_data_dump", False)
        if anonymize_data_dump:
            scores = temperature_score.anonymize_data_dump(scores)

        return_dic = {
            "aggregated_scores":
            aggregations,
            # TODO: The scores are included twice now, once with all columns, and once with only a subset of the columns
            "scores":
            scores.where(pd.notnull(scores), None).to_dict(orient="records"),
            "coverage":
            coverage,
            "companies":
            scores[include_columns].replace({
                np.nan: None
            }).to_dict(orient="records"),
            "feature_distribution":
            column_distribution
        }

        return return_dic