Ejemplo n.º 1
0
    def run_experiment(self, db_options, db_path):
        # type: (List[str], str) -> str
        self._setup_db_before_experiment(db_options, db_path)
        command = self._build_experiment_command(db_options, db_path)
        self._run_command(command)

        parsed_output = self._parse_output(get_perf_context=True)

        # Create the LOGS object
        # get the log options from the OPTIONS file
        logs_file_prefix, stats_freq_sec = self.get_log_options(
            db_options, parsed_output[self.DB_PATH])
        db_logs = DatabaseLogs(logs_file_prefix,
                               db_options.get_column_families())
        # Create the Log STATS object
        db_log_stats = LogStatsParser(logs_file_prefix, stats_freq_sec)
        # Create the PerfContext STATS object
        db_perf_context = DatabasePerfContext(parsed_output[self.PERF_CON], 0,
                                              False)
        data_sources = {
            DataSource.Type.DB_OPTIONS: [db_options],
            DataSource.Type.LOG: [db_logs],
            DataSource.Type.TIME_SERIES: [db_log_stats, db_perf_context]
        }
        # Create the ODS STATS object
        if self.ods_args:
            data_sources[DataSource.Type.TIME_SERIES].append(
                OdsStatsFetcher(self.ods_args['client_script'],
                                self.ods_args['entity'],
                                self.ods_args['key_prefix']))
        return data_sources, parsed_output[self.THROUGHPUT]
Ejemplo n.º 2
0
def main(args):
    # initialise the RulesSpec parser
    rule_spec_parser = RulesSpec(args.rules_spec)
    rule_spec_parser.load_rules_from_spec()
    rule_spec_parser.perform_section_checks()
    # initialize the DatabaseOptions object
    db_options = DatabaseOptions(args.rocksdb_options)
    # Create DatabaseLogs object
    db_logs = DatabaseLogs(args.log_files_path_prefix,
                           db_options.get_column_families())
    # Create the Log STATS object
    db_log_stats = LogStatsParser(args.log_files_path_prefix,
                                  args.stats_dump_period_sec)
    data_sources = {
        DataSource.Type.DB_OPTIONS: [db_options],
        DataSource.Type.LOG: [db_logs],
        DataSource.Type.TIME_SERIES: [db_log_stats]
    }
    if args.ods_client:
        data_sources[DataSource.Type.TIME_SERIES].append(
            OdsStatsFetcher(args.ods_client, args.ods_entity, args.ods_tstart,
                            args.ods_tend, args.ods_key_prefix))
    triggered_rules = rule_spec_parser.get_triggered_rules(
        data_sources, db_options.get_column_families())
    rule_spec_parser.print_rules(triggered_rules)
Ejemplo n.º 3
0
 def test_check_and_trigger_conditions(self):
     this_path = os.path.abspath(os.path.dirname(__file__))
     logs_path_prefix = os.path.join(this_path, 'input_files/LOG-0')
     column_families = ['default', 'col-fam-A', 'col-fam-B']
     db_logs = DatabaseLogs(logs_path_prefix, column_families)
     # matches, has 2 col_fams
     condition1 = LogCondition.create(Condition('cond-A'))
     condition1.set_parameter('regex', 'random log message')
     # matches, multiple lines message
     condition2 = LogCondition.create(Condition('cond-B'))
     condition2.set_parameter('regex', 'continuing on next line')
     # does not match
     condition3 = LogCondition.create(Condition('cond-C'))
     condition3.set_parameter('regex', 'this should match no log')
     db_logs.check_and_trigger_conditions(
         [condition1, condition2, condition3]
     )
     cond1_trigger = condition1.get_trigger()
     self.assertEqual(2, len(cond1_trigger.keys()))
     self.assertSetEqual(
         {'col-fam-A', NO_COL_FAMILY}, set(cond1_trigger.keys())
     )
     self.assertEqual(2, len(cond1_trigger['col-fam-A']))
     messages = [
         "[db/db_impl.cc:563] [col-fam-A] random log message for testing",
         "[db/db_impl.cc:653] [col-fam-A] another random log message"
     ]
     self.assertIn(cond1_trigger['col-fam-A'][0].get_message(), messages)
     self.assertIn(cond1_trigger['col-fam-A'][1].get_message(), messages)
     self.assertEqual(1, len(cond1_trigger[NO_COL_FAMILY]))
     self.assertEqual(
         cond1_trigger[NO_COL_FAMILY][0].get_message(),
         "[db/db_impl.cc:331] [unknown] random log message no column family"
     )
     cond2_trigger = condition2.get_trigger()
     self.assertEqual(['col-fam-B'], list(cond2_trigger.keys()))
     self.assertEqual(1, len(cond2_trigger['col-fam-B']))
     self.assertEqual(
         cond2_trigger['col-fam-B'][0].get_message(),
         "[db/db_impl.cc:234] [col-fam-B] log continuing on next line\n" +
         "remaining part of the log"
     )
     self.assertIsNone(condition3.get_trigger())
Ejemplo n.º 4
0
 def test_check_and_trigger_conditions(self):
     this_path = os.path.abspath(os.path.dirname(__file__))
     logs_path_prefix = os.path.join(this_path, 'input_files/LOG-0')
     column_families = ['default', 'col-fam-A', 'col-fam-B']
     db_logs = DatabaseLogs(logs_path_prefix, column_families)
     # matches, has 2 col_fams
     condition1 = LogCondition.create(Condition('cond-A'))
     condition1.set_parameter('regex', 'random log message')
     # matches, multiple lines message
     condition2 = LogCondition.create(Condition('cond-B'))
     condition2.set_parameter('regex', 'continuing on next line')
     # does not match
     condition3 = LogCondition.create(Condition('cond-C'))
     condition3.set_parameter('regex', 'this should match no log')
     db_logs.check_and_trigger_conditions(
         [condition1, condition2, condition3])
     cond1_trigger = condition1.get_trigger()
     self.assertEqual(2, len(cond1_trigger.keys()))
     self.assertSetEqual({'col-fam-A', NO_COL_FAMILY},
                         set(cond1_trigger.keys()))
     self.assertEqual(2, len(cond1_trigger['col-fam-A']))
     messages = [
         "[db/db_impl.cc:563] [col-fam-A] random log message for testing",
         "[db/db_impl.cc:653] [col-fam-A] another random log message"
     ]
     self.assertIn(cond1_trigger['col-fam-A'][0].get_message(), messages)
     self.assertIn(cond1_trigger['col-fam-A'][1].get_message(), messages)
     self.assertEqual(1, len(cond1_trigger[NO_COL_FAMILY]))
     self.assertEqual(
         cond1_trigger[NO_COL_FAMILY][0].get_message(),
         "[db/db_impl.cc:331] [unknown] random log message no column family"
     )
     cond2_trigger = condition2.get_trigger()
     self.assertEqual(['col-fam-B'], list(cond2_trigger.keys()))
     self.assertEqual(1, len(cond2_trigger['col-fam-B']))
     self.assertEqual(
         cond2_trigger['col-fam-B'][0].get_message(),
         "[db/db_impl.cc:234] [col-fam-B] log continuing on next line\n" +
         "remaining part of the log")
     self.assertIsNone(condition3.get_trigger())
Ejemplo n.º 5
0
 def setUp(self):
     # load the Rules
     this_path = os.path.abspath(os.path.dirname(__file__))
     ini_path = os.path.join(this_path, 'input_files/test_rules.ini')
     self.db_rules = RulesSpec(ini_path)
     self.db_rules.load_rules_from_spec()
     self.db_rules.perform_section_checks()
     # load the data sources: LOG and OPTIONS
     log_path = os.path.join(this_path, 'input_files/LOG-1')
     options_path = os.path.join(this_path, 'input_files/OPTIONS-000005')
     db_options_parser = DatabaseOptions(options_path)
     self.column_families = db_options_parser.get_column_families()
     db_logs_parser = DatabaseLogs(log_path, self.column_families)
     self.data_sources = [db_options_parser, db_logs_parser]
Ejemplo n.º 6
0
    def run_experiment(self, db_options, db_path):
        # setup the Rocksdb database before running experiment
        self._setup_db_before_experiment(db_options, db_path)
        # get the command to run the experiment
        command = self._build_experiment_command(db_options, db_path)
        experiment_start_time = int(time.time())
        # run experiment
        self._run_command(command)
        experiment_end_time = int(time.time())
        # parse the db_bench experiment output
        parsed_output = self._parse_output(get_perf_context=True)

        # get the log files path prefix and frequency at which Rocksdb stats
        # are dumped in the logs
        logs_file_prefix, stats_freq_sec = self.get_log_options(
            db_options, parsed_output[self.DB_PATH])
        # create the RocksDB LOGS object
        db_logs = DatabaseLogs(logs_file_prefix,
                               db_options.get_column_families())
        # Create the Log STATS object
        db_log_stats = LogStatsParser(logs_file_prefix, stats_freq_sec)
        # Create the PerfContext STATS object
        db_perf_context = DatabasePerfContext(parsed_output[self.PERF_CON], 0,
                                              False)
        # create the data-sources dictionary
        data_sources = {
            DataSource.Type.DB_OPTIONS: [db_options],
            DataSource.Type.LOG: [db_logs],
            DataSource.Type.TIME_SERIES: [db_log_stats, db_perf_context]
        }
        # Create the ODS STATS object
        if self.ods_args:
            key_prefix = ''
            if 'key_prefix' in self.ods_args:
                key_prefix = self.ods_args['key_prefix']
            data_sources[DataSource.Type.TIME_SERIES].append(
                OdsStatsFetcher(self.ods_args['client_script'],
                                self.ods_args['entity'], experiment_start_time,
                                experiment_end_time, key_prefix))
        # return the experiment's data-sources and throughput
        return data_sources, parsed_output[self.THROUGHPUT]
Ejemplo n.º 7
0
def main(args):
    # Load the rules with their conditions and suggestions.
    db_rules = RulesSpec(args.rules_spec)
    db_rules.load_rules_from_spec()
    # Perform some basic sanity checks for each section.
    db_rules.perform_section_checks()
    rules_dict = db_rules.get_rules_dict()
    conditions_dict = db_rules.get_conditions_dict()
    suggestions_dict = db_rules.get_suggestions_dict()
    print()
    print('RULES')
    for rule in rules_dict.values():
        print(repr(rule))
    print()
    print('CONDITIONS')
    for cond in conditions_dict.values():
        print(repr(cond))
    print()
    print('SUGGESTIONS')
    for sugg in suggestions_dict.values():
        print(repr(sugg))

    # Initialise the data sources.
    data_sources = []
    data_sources.append(DatabaseOptions(args.rocksdb_options))
    data_sources.append(DatabaseLogs(args.rocksdb_log_prefix))

    # Initialise the ConditionChecker with the provided data sources.
    trigger_conditions(data_sources, conditions_dict)

    # Check for the conditions read in from the Rules spec, if triggered.
    print()
    triggered_rules = get_triggered_rules(rules_dict, conditions_dict)
    for rule in triggered_rules:
        print('Rule: ' + rule.name + ' has been triggered and:')
        rule_suggestions = rule.get_suggestions()
        for sugg_name in rule_suggestions:
            print(suggestions_dict[sugg_name])