def test_append_cluster_stats_corrupt(self): output_dir = os.path.join(environment.test_output_dir(self), "test_append_cluster_stats_corrupt") if os.path.isdir(output_dir): shutil.rmtree(output_dir) metric_to_append = CurieMetric() metric_to_append.CopyFrom(self.counter_template) metric_to_append.timestamps.extend([1454092320, 1454092321]) metric_to_append.values.extend([1, 2]) new_results = {"node_0": [metric_to_append]} ScenarioUtil.append_cluster_stats(new_results, output_dir) # Corrupt the file. filename = ("%s_%s" % (self._counter_template_name(), self.counter_template.instance)).replace( ".", "_") bin_path = os.path.join(output_dir, "node_0", filename + ".bin") assert (os.path.isfile(bin_path)) with open(bin_path, "w") as f: f.write("Cela ne veut pas un protobuf.") metric_to_append = CurieMetric() metric_to_append.CopyFrom(self.counter_template) metric_to_append.timestamps.extend([1454092322, 1454092323]) metric_to_append.values.extend([3, 4]) new_results = {"node_0": [metric_to_append]} ScenarioUtil.append_cluster_stats(new_results, output_dir) expected_metric = CurieMetric() expected_metric.CopyFrom(self.counter_template) expected_metric.timestamps.extend([1454092322, 1454092323]) expected_metric.values.extend([3, 4]) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], expected_metric)
def test_append_cluster_stats_duplicates(self): output_dir = os.path.join(environment.test_output_dir(self), "test_append_cluster_stats_duplicates") if os.path.isdir(output_dir): shutil.rmtree(output_dir) metric_to_append = CurieMetric() metric_to_append.CopyFrom(self.counter_template) metric_to_append.timestamps.extend( [1454092320, 1454092321, 1454092322]) metric_to_append.values.extend([1, 2, 3]) new_results = {"node_0": [metric_to_append]} ScenarioUtil.append_cluster_stats(new_results, output_dir) metric_to_append = CurieMetric() metric_to_append.CopyFrom(self.counter_template) metric_to_append.timestamps.extend([1454092322, 1454092323]) metric_to_append.values.extend([3, 4]) new_results = {"node_0": [metric_to_append]} ScenarioUtil.append_cluster_stats(new_results, output_dir) expected_metric = CurieMetric() expected_metric.CopyFrom(self.counter_template) expected_metric.timestamps.extend( [1454092320, 1454092321, 1454092322, 1454092323]) expected_metric.values.extend([1, 2, 3, 4]) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], expected_metric)
def test_collect_dump_read_stats(self): results_map = self.cluster.collect_performance_stats() dir_name = environment.test_output_dir(self) ScenarioUtil.append_cluster_stats(results_map, dir_name) read_results_map = ScenarioUtil.read_cluster_stats(dir_name) self.assertEqual(results_map.keys(), read_results_map.keys()) for node_id in results_map: self.assertEqual(len(results_map[node_id]), len(read_results_map[node_id])) for expected_metric, metric in zip(results_map[node_id], read_results_map[node_id]): self.assertEqual(MetricsUtil.metric_name(metric), MetricsUtil.metric_name(expected_metric)) self.assertEqual(metric.instance, expected_metric.instance) self.assertEqual(metric.timestamps, expected_metric.timestamps) self.assertEqual(metric.values, expected_metric.values)
def test_get_result_pbs_partial(self): result = ClusterResult(self.scenario, "fake_result", "NetReceived.Avg.KilobytesPerSecond") metric = CurieMetric( name=CurieMetric.kNetReceived, description="Average network data received across all " "interfaces.", instance="Aggregated", type=CurieMetric.kGauge, consolidation=CurieMetric.kAvg, unit=CurieMetric.kKilobytes, rate=CurieMetric.kPerSecond, experimental=True) ScenarioUtil.append_cluster_stats( { "169.254.0.0": [metric], "169.254.0.1": [metric], "169.254.0.2": [], "169.254.0.3": [], }, self.scenario.cluster_stats_dir()) pbs = result.get_result_pbs() self.assertEqual(2, len(pbs)) self.assertIsInstance(pbs[0], CurieTestResult) self.assertIsInstance(pbs[1], CurieTestResult)
def test_append_read_cluster_stats_empty(self): output_dir = os.path.join(environment.test_output_dir(self), "test_append_read_cluster_stats_empty") if os.path.isdir(output_dir): shutil.rmtree(output_dir) empty_metric = CurieMetric() empty_metric.CopyFrom(self.counter_template) del empty_metric.timestamps[:] del empty_metric.values[:] self.assertEqual(empty_metric.timestamps, []) self.assertEqual(empty_metric.values, []) # Write empty. new_results = {"node_0": [empty_metric]} ScenarioUtil.append_cluster_stats(new_results, output_dir) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], empty_metric) # Append empty. new_results = {"node_0": [empty_metric]} ScenarioUtil.append_cluster_stats(new_results, output_dir) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], empty_metric) # Append non-empty. non_empty_metric = CurieMetric() non_empty_metric.CopyFrom(self.counter_template) non_empty_metric.timestamps.extend([1454092320, 1454092321]) non_empty_metric.values.extend([1, 2]) new_results = {"node_0": [non_empty_metric]} ScenarioUtil.append_cluster_stats(new_results, output_dir) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], non_empty_metric) # Append empty again. new_results = {"node_0": [empty_metric]} ScenarioUtil.append_cluster_stats(new_results, output_dir) results = ScenarioUtil.read_cluster_stats(output_dir) self.assertEqual(results.keys(), ["node_0"]) self.assertEqual(len(results["node_0"]), 1) self.assertEqual(results["node_0"][0], non_empty_metric)
def _cluster_results_update(self, start_time_secs=None): """ Update the in-memory and on-disk cluster results. Returns: int or None: Epoch time passed through from ScenarioUtil.append_cluster_stats, or None if no data was collected. """ cluster_stats_dir = self.cluster_stats_dir() if cluster_stats_dir is None: log.warning("Cluster results not collected because the output directory " "is not set") return None try: # If data has been collected before, get the data after the previous # collection. Otherwise, latest_appended_time_secs is None, and the # default number of the latest samples will be collected. results_map = self.cluster.collect_performance_stats( start_time_secs=start_time_secs) except Exception: log.exception("An exception occurred while updating cluster results. " "This operation will be retried.") return None else: csv_path = os.path.join(cluster_stats_dir, "cluster_stats.csv") appended_time_secs = ScenarioUtil.append_cluster_stats( results_map, self.cluster_stats_dir()) # Write the results in CSV format for easier analysis. if os.path.isfile(csv_path): mode = "a" header = False else: mode = "w" header = True with open(csv_path, mode) as csv_file: csv_file.write( ScenarioUtil.results_map_to_csv(results_map, header=header)) return appended_time_secs