Пример #1
0
    def test_aws_true(self):
        """
        Tests a situation when benchmark_runner is run on AWS.
        Uses mock to simulate the situation where aws = True
        """

        mock_instancetype_region = \
            {
                'instancetype': 'someinstancetype', 'region': 'someregion'
            }
        mocked_aws_metadata = \
            mock.patch('awsforyou.aws_metadata.get_instance',
                       return_value=mock_instancetype_region)

        mocked_aws_metadata.start()
        runtime = bench.run_benchmark(aws=True)
        scorecard = pd.read_csv('./aws-scorecard.csv')
        dict_scorecard = scorecard.to_dict(orient='records')[0]
        dict_scorecard = \
            {
                'instancetype': dict_scorecard['instancetype'],
                'region': dict_scorecard['region']
            }
        mocked_aws_metadata.stop()
        self.assertEqual(mock_instancetype_region, dict_scorecard)
Пример #2
0
 def test_for_complete_df_empty(self):
     """
     test to see if added times dataframe is empty
     """
     benchmark_df = rc.get_benchmark_data()
     times, percents = [2, 4, 6], [1, 5, 10]
     est_time_user = tt.find_total_time(times, percents)
     user_benchmark = br.run_benchmark()
     est_time_aws = benchmark_df[['runtime']] \
         / user_benchmark * est_time_user[0]
     benchmark_df["estimated_time_aws"] = est_time_aws
     self.assertGreater(benchmark_df.shape[0], 0)
Пример #3
0
def add_estimated_time_aws(dataframe, python_call, module_name):
    """
    This function estimates the time required to run the users algorithim on
    each instance and adds it to the dataframe
    :param python_call: str python string calling the algorithm to be timed
    :param module_name: str name of module from which function is called
    :param dataframe: the benchmark dataframe output from get_benchmark_data()
    :return: dataframe with added estimated times
    """
    times, percents = ar.run_algo(python_call, module_name)
    est_time_user = tt.find_total_time(times, percents)
    user_benchmark = br.run_benchmark()
    est_time_aws = dataframef[['runtime']]/user_benchmark * est_time_user[0]
    dataframe["estimated_time_aws"] = est_time_aws
    return dataframe
Пример #4
0
 def test_add_estimated_price(self):
     """
     This function tests adding the spot and on-demand pricing
     to the dataframe
     """
     benchmark_df = rc.get_benchmark_data()
     times, percents = [2, 4, 6], [1, 5, 10]
     est_time_user = tt.find_total_time(times, percents)
     user_benchmark = br.run_benchmark()
     est_time_aws = benchmark_df[['runtime']] \
         / user_benchmark * est_time_user[0]
     benchmark_df["estimated_time_aws"] = est_time_aws
     instance_types = benchmark_df["instance_type"].tolist()
     price = ap.get_instance_pricing(instance_types)
     complete_df = pd.merge(benchmark_df, price, on="instance_type")
     complete_df["est_cost_spot_price"] = \
         complete_df["estimated_time_aws"] \
         * complete_df["spot_price"] / 3600
     complete_df["est_cost_on_demand_price"] = \
         complete_df["estimated_time_aws"] \
         * complete_df["on_demand_price"] / 3600
     self.assertGreater(complete_df.shape[0], 0)
Пример #5
0
 def test_runtime(self):
     """
     Check if output is indeed number of seconds
     """
     runtime = bench.run_benchmark(aws=False)
     self.assertIsInstance(runtime, float)