Exemplo n.º 1
0
 def test_append_cluster_stats_duplicates(self):
     output_dir = os.path.join(environment.test_output_dir(self),
                               "test_append_cluster_stats_duplicates")
     if os.path.isdir(output_dir):
         shutil.rmtree(output_dir)
     metric_to_append = CurieMetric()
     metric_to_append.CopyFrom(self.counter_template)
     metric_to_append.timestamps.extend(
         [1454092320, 1454092321, 1454092322])
     metric_to_append.values.extend([1, 2, 3])
     new_results = {"node_0": [metric_to_append]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     metric_to_append = CurieMetric()
     metric_to_append.CopyFrom(self.counter_template)
     metric_to_append.timestamps.extend([1454092322, 1454092323])
     metric_to_append.values.extend([3, 4])
     new_results = {"node_0": [metric_to_append]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     expected_metric = CurieMetric()
     expected_metric.CopyFrom(self.counter_template)
     expected_metric.timestamps.extend(
         [1454092320, 1454092321, 1454092322, 1454092323])
     expected_metric.values.extend([1, 2, 3, 4])
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], expected_metric)
Exemplo n.º 2
0
    def start(self, vms, runtime_secs=None, stagger_secs=None, seed=None):
        """
    Starts the workload generator on the VM for a given 'runtime_secs' if
    specified.

    Args:
      vms: (list of curie.vm.Vm) VMs to start workload on.
      runtime_secs: (int) how long to run the workload.
      stagger_secs: (int) Maximum amount of time in which to evenly space the
        start of the workloads.
      seed: (str) seed value to be used by the workload generator.

    Raises:
      Will raise CurieException from execute_async()
    """
        self.__workload_start_secs = int(time.time())
        self._workload_duration_secs = runtime_secs
        if stagger_secs is None:
            stagger_secs = 0
        if runtime_secs is not None:
            self._expected_workload_finish_secs = (
                self.__workload_start_secs + self._workload_duration_secs +
                stagger_secs)
        next_deadline = self.__workload_start_secs
        last_deadline = self.__workload_start_secs + stagger_secs
        if len(vms) < 2:
            interval_secs = 0
        else:
            interval_secs = stagger_secs / (len(vms) - 1)
        log.info("Starting '%s' on a total of %d VM(s)", self._name, len(vms))
        if self._expected_workload_finish_secs is not None:
            log.debug("Expecting '%s' to finish at epoch %d", self._name,
                      self._expected_workload_finish_secs)
        else:
            log.debug("Expecting '%s' to continue running until it is stopped",
                      self._name)
        if interval_secs > 0:
            log.info("Workload start times will be offset by %d seconds",
                     interval_secs)
        for vm_index, vm in enumerate(vms):
            if self._scenario.should_stop():
                log.info(
                    "Test marked to stop; Not all workloads will be started")
                return
            log.info("Starting workload on %s with command_id %s",
                     vm.vm_name(), self.get_cmd_id())
            if seed is None:
                seed = self._name
            seed_hash = hash("%s_%d" % (seed, vm_index))
            vm.execute_async(self.get_cmd_id(),
                             self._cmd(randseed=seed_hash),
                             user="******")
            next_deadline += interval_secs
            if interval_secs > 0 and next_deadline <= last_deadline:
                log.info(
                    "Waiting %d seconds before starting the next workload",
                    next_deadline - time.time())
                ScenarioUtil.wait_for_deadline(
                    self._scenario, next_deadline,
                    "waiting to start next workload")
Exemplo n.º 3
0
 def test_append_cluster_stats_corrupt(self):
     output_dir = os.path.join(environment.test_output_dir(self),
                               "test_append_cluster_stats_corrupt")
     if os.path.isdir(output_dir):
         shutil.rmtree(output_dir)
     metric_to_append = CurieMetric()
     metric_to_append.CopyFrom(self.counter_template)
     metric_to_append.timestamps.extend([1454092320, 1454092321])
     metric_to_append.values.extend([1, 2])
     new_results = {"node_0": [metric_to_append]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     # Corrupt the file.
     filename = ("%s_%s" % (self._counter_template_name(),
                            self.counter_template.instance)).replace(
                                ".", "_")
     bin_path = os.path.join(output_dir, "node_0", filename + ".bin")
     assert (os.path.isfile(bin_path))
     with open(bin_path, "w") as f:
         f.write("Cela ne veut pas un protobuf.")
     metric_to_append = CurieMetric()
     metric_to_append.CopyFrom(self.counter_template)
     metric_to_append.timestamps.extend([1454092322, 1454092323])
     metric_to_append.values.extend([3, 4])
     new_results = {"node_0": [metric_to_append]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     expected_metric = CurieMetric()
     expected_metric.CopyFrom(self.counter_template)
     expected_metric.timestamps.extend([1454092322, 1454092323])
     expected_metric.values.extend([3, 4])
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], expected_metric)
Exemplo n.º 4
0
 def test_wait_for_deadline_already_passed(self, time_mock):
     start = time.time()
     time_mock.time.return_value = start
     with self.assertRaises(CurieTestException) as ar:
         ScenarioUtil.wait_for_deadline(self.scenario, start - 5,
                                        "exception to happen")
     self.assertEqual(
         str(ar.exception),
         "Cannot wait for a deadline that has already passed "
         "deadline_secs: %d; now_secs: %d" % (start - 5, start))
     time_mock.sleep.assert_not_called()
Exemplo n.º 5
0
 def test_wait_for_deadline_missed_grace_period(self, time_mock):
     start = time.time()
     time_mock.time.side_effect = lambda: (start + (time_mock.time.
                                                    call_count - 1) * 60)
     time_mock.sleep.return_value = 0
     with self.assertRaises(CurieTestException) as ar:
         ScenarioUtil.wait_for_deadline(self.scenario, start + 5,
                                        "exception to happen")
     self.assertEqual(
         str(ar.exception),
         "Missed exception to happen deadline by 54s with grace "
         "of 30s")
     time_mock.sleep.assert_has_calls([mock.call(1)] * 1)
Exemplo n.º 6
0
    def get_result_pbs(self):
        """Produces the results for a given workload with the specified design.

    Depending on what the result_type is, generate the appropriate result
    from the iogen. Then, apply various items to the resulting protobuf
    result. Additional parameters may be provided as kwargs in configuration.
    """
        cluster_stats_dir = self.scenario.cluster_stats_dir()
        if cluster_stats_dir is None:
            log.debug(
                "Cluster results not read because the output directory is not "
                "set")
            return []
        try:
            results_map = ScenarioUtil.read_cluster_stats(cluster_stats_dir)
        except (IOError, OSError):
            log.debug(
                "Skipping reporting cluster stats to GUI since they have not "
                "been created yet")
            return []
        if not results_map:
            return []
        results_map = MetricsUtil.filter_results_map(results_map,
                                                     self.metric_name,
                                                     "Aggregated")
        series_list = []
        for node in self.scenario.cluster.nodes():
            for metric in results_map.get(str(node.node_id()), []):
                series_list.append(MetricsUtil.get_series(metric))
        return self.series_list_to_result_pbs(series_list)
Exemplo n.º 7
0
 def test_collect_dump_read_stats(self):
     results_map = self.cluster.collect_performance_stats()
     dir_name = environment.test_output_dir(self)
     ScenarioUtil.append_cluster_stats(results_map, dir_name)
     read_results_map = ScenarioUtil.read_cluster_stats(dir_name)
     self.assertEqual(results_map.keys(), read_results_map.keys())
     for node_id in results_map:
         self.assertEqual(len(results_map[node_id]),
                          len(read_results_map[node_id]))
         for expected_metric, metric in zip(results_map[node_id],
                                            read_results_map[node_id]):
             self.assertEqual(MetricsUtil.metric_name(metric),
                              MetricsUtil.metric_name(expected_metric))
             self.assertEqual(metric.instance, expected_metric.instance)
             self.assertEqual(metric.timestamps, expected_metric.timestamps)
             self.assertEqual(metric.values, expected_metric.values)
Exemplo n.º 8
0
 def test_wait_for_deadline(self, time_mock):
     start = time.time()
     time_mock.time.side_effect = lambda: start + time_mock.time.call_count
     time_mock.sleep.return_value = 0
     ret = ScenarioUtil.wait_for_deadline(self.scenario, start + 5,
                                          "5 iterations to pass")
     self.assertIsNone(ret)
     self.assertEqual(6, time_mock.time.call_count)
     time_mock.sleep.assert_has_calls([mock.call(1)] * 5)
Exemplo n.º 9
0
 def test_to_csv_no_header(self):
     metric = CurieMetric()
     metric.CopyFrom(self.counter_template)
     metric.timestamps.extend([1454092320, 1454092321])
     metric.values.extend([1, 2])
     new_results = {"node_0": [metric]}
     csv = ScenarioUtil.results_map_to_csv(new_results, header=False)
     self.assertEqual(
         csv, "1454092320,node_0,CpuUsage.Avg.Percent,Aggregated,1\n" +
         "1454092321,node_0,CpuUsage.Avg.Percent,Aggregated,2\n")
Exemplo n.º 10
0
 def test_to_csv_newline(self):
     metric = CurieMetric()
     metric.CopyFrom(self.counter_template)
     metric.timestamps.extend([1454092320, 1454092321])
     metric.values.extend([1, 2])
     new_results = {"node_0": [metric]}
     csv = ScenarioUtil.results_map_to_csv(new_results, newline="\r\n")
     self.assertEqual(
         csv, "timestamp,node_id,metric_name,instance,value\r\n" +
         "1454092320,node_0,CpuUsage.Avg.Percent,Aggregated,1\r\n" +
         "1454092321,node_0,CpuUsage.Avg.Percent,Aggregated,2\r\n")
Exemplo n.º 11
0
 def test_wait_for_deadline_should_stop(self, time_mock):
     start = time.time()
     time_mock.time.side_effect = lambda: start + time_mock.time.call_count
     time_mock.sleep.return_value = 0
     with mock.patch.object(self.scenario,
                            "should_stop") as mock_should_stop:
         mock_should_stop.side_effect = [False, False, True]
         ret = ScenarioUtil.wait_for_deadline(
             self.scenario, start + 5, "5 iterations to pass, only to be "
             "interrupted after 2")
     self.assertIsNone(ret)
     self.assertEqual(3, time_mock.time.call_count)
     time_mock.sleep.assert_has_calls([mock.call(1)] * 2)
Exemplo n.º 12
0
  def _cluster_results_update(self, start_time_secs=None):
    """
    Update the in-memory and on-disk cluster results.

    Returns:
      int or None: Epoch time passed through from
        ScenarioUtil.append_cluster_stats, or None if no data was collected.
    """
    cluster_stats_dir = self.cluster_stats_dir()
    if cluster_stats_dir is None:
      log.warning("Cluster results not collected because the output directory "
                  "is not set")
      return None
    try:
      # If data has been collected before, get the data after the previous
      # collection. Otherwise, latest_appended_time_secs is None, and the
      # default number of the latest samples will be collected.
      results_map = self.cluster.collect_performance_stats(
        start_time_secs=start_time_secs)
    except Exception:
      log.exception("An exception occurred while updating cluster results. "
                    "This operation will be retried.")
      return None
    else:
      csv_path = os.path.join(cluster_stats_dir, "cluster_stats.csv")
      appended_time_secs = ScenarioUtil.append_cluster_stats(
        results_map, self.cluster_stats_dir())
      # Write the results in CSV format for easier analysis.
      if os.path.isfile(csv_path):
        mode = "a"
        header = False
      else:
        mode = "w"
        header = True
      with open(csv_path, mode) as csv_file:
        csv_file.write(
          ScenarioUtil.results_map_to_csv(results_map, header=header))
      return appended_time_secs
Exemplo n.º 13
0
 def test_get_result_pbs_partial(self):
     result = ClusterResult(self.scenario, "fake_result",
                            "NetReceived.Avg.KilobytesPerSecond")
     metric = CurieMetric(
         name=CurieMetric.kNetReceived,
         description="Average network data received across all "
         "interfaces.",
         instance="Aggregated",
         type=CurieMetric.kGauge,
         consolidation=CurieMetric.kAvg,
         unit=CurieMetric.kKilobytes,
         rate=CurieMetric.kPerSecond,
         experimental=True)
     ScenarioUtil.append_cluster_stats(
         {
             "169.254.0.0": [metric],
             "169.254.0.1": [metric],
             "169.254.0.2": [],
             "169.254.0.3": [],
         }, self.scenario.cluster_stats_dir())
     pbs = result.get_result_pbs()
     self.assertEqual(2, len(pbs))
     self.assertIsInstance(pbs[0], CurieTestResult)
     self.assertIsInstance(pbs[1], CurieTestResult)
Exemplo n.º 14
0
 def test_to_csv_rate(self):
     metric = CurieMetric()
     metric.CopyFrom(self.counter_template)
     metric.timestamps.extend([1454092320, 1454092321])
     metric.values.extend([1, 2])
     metric.name = CurieMetric.kNetTransmitted
     metric.unit = CurieMetric.kKilobytes
     metric.rate = CurieMetric.kPerSecond
     new_results = {"node_0": [metric]}
     csv = ScenarioUtil.results_map_to_csv(new_results, newline="\r\n")
     self.assertEqual(
         csv, "timestamp,node_id,metric_name,instance,value\r\n" +
         "1454092320,node_0,NetTransmitted.Avg.KilobytesPerSecond,Aggregated,1\r\n"
         +
         "1454092321,node_0,NetTransmitted.Avg.KilobytesPerSecond,Aggregated,2\r\n"
     )
Exemplo n.º 15
0
 def _run(self):
   ScenarioUtil.prereq_metadata_can_run_failure_scenario(
     self.scenario.cluster.metadata())
Exemplo n.º 16
0
 def _run(self):
   if not ScenarioUtil.prereq_runtime_cluster_is_ready(self.scenario.cluster):
     ScenarioUtil.prereq_runtime_cluster_is_ready_fix(self.scenario.cluster)
Exemplo n.º 17
0
 def _run(self):
   ScenarioUtil.prereq_runtime_storage_cluster_mgmt_cluster_match(
     self.scenario.cluster)
Exemplo n.º 18
0
 def test_append_read_cluster_stats_empty(self):
     output_dir = os.path.join(environment.test_output_dir(self),
                               "test_append_read_cluster_stats_empty")
     if os.path.isdir(output_dir):
         shutil.rmtree(output_dir)
     empty_metric = CurieMetric()
     empty_metric.CopyFrom(self.counter_template)
     del empty_metric.timestamps[:]
     del empty_metric.values[:]
     self.assertEqual(empty_metric.timestamps, [])
     self.assertEqual(empty_metric.values, [])
     # Write empty.
     new_results = {"node_0": [empty_metric]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], empty_metric)
     # Append empty.
     new_results = {"node_0": [empty_metric]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], empty_metric)
     # Append non-empty.
     non_empty_metric = CurieMetric()
     non_empty_metric.CopyFrom(self.counter_template)
     non_empty_metric.timestamps.extend([1454092320, 1454092321])
     non_empty_metric.values.extend([1, 2])
     new_results = {"node_0": [non_empty_metric]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], non_empty_metric)
     # Append empty again.
     new_results = {"node_0": [empty_metric]}
     ScenarioUtil.append_cluster_stats(new_results, output_dir)
     results = ScenarioUtil.read_cluster_stats(output_dir)
     self.assertEqual(results.keys(), ["node_0"])
     self.assertEqual(len(results["node_0"]), 1)
     self.assertEqual(results["node_0"][0], non_empty_metric)