def query_graphite_targets(self): """ Queries arrival rate and service time data for all targets stored in self.targets (attached to LeptoidScaler) and extracts data from Graphite's response. Returns: list of leptoid.ServiceQueues with utilization populated. """ # Retrieve arrival rates and service times from Graphite. raw_rates = graphite.call_graphite(self.targets['arrival_rates'], self.api_params) raw_times = graphite.call_graphite(self.targets['service_times'], self.api_params) arrival_rates = graphite.extract_time_series(raw_rates) service_times = graphite.extract_time_series(raw_times) return generate_service_queues(arrival_rates, service_times)
def test_data_extraction(self): """ Using a Mock() to check whether data is extracted correctly. """ fake_pickle = [{ 'start': 1000, 'values': arange(10), 'name': "Knewton.Staging.Webservice-KRS.i-deadbeef"}] namespace_data = g.extract_time_series(fake_pickle) tser = namespace_data['staging']['kbs.KRS']['i-deadbeef'] # Data should be in a dict, with a pandas.TimeSeries containing # data. self.assertTrue(isinstance(namespace_data, dict)) self.assertTrue(isinstance(tser, TimeSeries)) self.assertTrue(len(tser) == 10)
def test_data_extraction(self): """ Using a Mock() to check whether data is extracted correctly. """ fake_pickle = [{ 'start': 1000, 'values': arange(10), 'name': "Knewton.Staging.Webservice-KRS.i-deadbeef" }] namespace_data = g.extract_time_series(fake_pickle) tser = namespace_data['staging']['kbs.KRS']['i-deadbeef'] # Data should be in a dict, with a pandas.TimeSeries containing # data. self.assertTrue(isinstance(namespace_data, dict)) self.assertTrue(isinstance(tser, TimeSeries)) self.assertTrue(len(tser) == 10)