def test_runlog_integration(tmpdir):
    """RunLog system works as intended"""

    db = rl.RunLogDB(ArgsMock(workdir=tmpdir))
    log_handles_before = db.list()

    start_meta = {"begin": True}
    stop_meta = {"end": True}
    combined_meta = start_meta
    combined_meta.update(stop_meta)
    log_lines = [{"foo": 1}, {"foo": 2}, {"foo": 3, "nolog": True}]
    log = db.new_log()
    log.start(meta=start_meta,
              log_filter=lambda x: None if "nolog" in x else x)
    log.log(log_lines)
    log.stop(meta=stop_meta)

    log_handles_after = db.list()
    assert len(
        log_handles_after) == len(log_handles_before) + 1, "new log is listed"
    log = db.read_log(log_handles_after[-1])
    read_meta = log.get_meta()
    assert "log_lines" in read_meta and read_meta[
        "log_lines"] == 2, "has right number of lines in metadata"
    has_meta = True
    for k in combined_meta:
        if k not in read_meta or read_meta[k] != combined_meta[k]:
            has_meta = False
            break
    assert has_meta, "log metadata is correct"
    read_lines = [line for line in log]
    assert len(read_lines) == 2, "log has correct number of lines"
    assert read_lines == log_lines[:2], "log lines have correct content"
Exemple #2
0
    def __init__(self, args, module_dir, tmp_dir):
        super(LogMode, self).__init__(args, module_dir, tmp_dir)
        self.log_db = rl.RunLogDB(self.args)
        self.tag_db = tdb.TagsDB(self.args)
        self.tag_db.remove_dangling(self.log_db.list(), save=True)

        # Check arguments
        if len(self.args.include) == 0:
            logger.debug("No --include argument given, defaulting to `all`")
            self.args.include.append("all")
def test_runlog_db_instance(tmpdir):
    """RunLogDB can list log handles"""

    db = rl.RunLogDB(ArgsMock(workdir=tmpdir))
    handle_list = db.list()
    dir_list = db.list_logs()
    assert type(handle_list) is list, "handle listing is a python list"
    assert type(dir_list) is list, "dir listing is a python list"
    assert len(handle_list) == 0, "empty db yields empty handle list"
    assert len(dir_list) == 0, "empty db yields empty dir list"
def test_runlog_db_file_handling(tmpdir):
    """RunLogDB associates handles and directories"""

    db = rl.RunLogDB(ArgsMock(workdir=tmpdir))

    now = datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H-%M-%S")
    dir_name = os.path.abspath(db.handle_to_dir_name(now))
    assert dir_name.startswith(
        str(tmpdir)), "log files are stored in the right location"
    handle_name = db.dir_name_to_handle(dir_name)
    assert now == handle_name, "converting between handles and file names works"
def test_runlog_db_rw(tmpdir):
    """RunLogDB can read and write"""

    db = rl.RunLogDB(ArgsMock(workdir=tmpdir))

    in_db_before = db.list()
    now = datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H-%M-%S")
    assert now not in in_db_before, "database is not cluttered"
    test_data = "hello,test"
    db.write(now, "test", test_data)
    in_db_after = db.list()
    assert now in in_db_after, "written log appears in db listing"
    read_test_data = db.read(now, "test")
    assert test_data == read_test_data, "read data is same as data"
def test_runlog_rw(tmpdir):
    """RunLog objects can write and read"""

    db = rl.RunLogDB(ArgsMock(workdir=tmpdir))
    now = datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H-%M-%S")
    log = rl.RunLog(now, "w", db)

    # Write something to log using the with statement
    with log:
        log.log({"foo": 5})
        log.update_meta({"just": "testing"})
        assert log.is_running, "log is running in with"
    assert not log.is_running, "log is not running after with"

    # Write something to log. Previous content will be overwritten
    log.start(meta={"first_meta": "one"})
    log.log([{"foo": 1}, {"foo": 2}])
    log.log({"foo": 3})
    assert not log.has_finished(), "log is not marked `finished` while logging"
    log.stop(meta={"last_meta": "two"})
    assert log.has_finished(), "log is marked `finished` after logging"

    test_log_dir = db.handle_to_dir_name(now)
    assert os.path.isdir(test_log_dir), "log directory is created"

    assert len(os.listdir(
        test_log_dir)) == 2, "log and meta files are written to disk"

    # Read from log
    log = rl.RunLog(now, "r", db)
    assert log.has_finished(), "completed log is marked as `finished`"
    meta = log.get_meta()
    # Metadata always has "format_revision", "log_lines" and "run_completed" keys
    assert len(list(meta.keys())) == 5, "log has correct number of meta data"
    log_lines = [line for line in log]
    assert len(log_lines) == 3, "log has correct number of lines"
Exemple #7
0
    def run(self):
        # Perform the scan
        self.start_time = datetime.datetime.now()

        meta = {
            "tlscanary_version": pkgr.require("tlscanary")[0].version,
            "mode": self.name,
            "args": vars(self.args),
            "argv": sys.argv,
            "sources_size": len(self.sources),
            "test_metadata": self.test_metadata,
            "base_metadata": self.base_metadata,
            "run_start_time": datetime.datetime.utcnow().isoformat()
        }

        rldb = rl.RunLogDB(self.args)
        log = rldb.new_log()
        log.start(meta=meta)

        test_uri_sets = []
        base_uri_sets = []

        self.total_change = 0
        test_speed_aggregate = 0
        base_speed_aggregate = 0

        for i in range(0, self.args.scans):
            test_uri_sets.append(
                self.run_test(self.test_app,
                              self.sources,
                              profile=self.test_profile,
                              prefs=self.args.prefs_test,
                              get_info=True,
                              get_certs=not self.args.remove_certs,
                              return_only_errors=False))

            base_uri_sets.append(
                self.run_test(self.base_app,
                              self.sources,
                              profile=self.base_profile,
                              prefs=self.args.prefs_base,
                              get_info=True,
                              get_certs=not self.args.remove_certs,
                              return_only_errors=False))

        # extract connection speed from all scans
        test_connections_all = []
        for uri_set in test_uri_sets:
            test_connections_all.append(self.extract_connection_speed(uri_set))

        base_connections_all = []
        for uri_set in base_uri_sets:
            base_connections_all.append(self.extract_connection_speed(uri_set))

        # collapse all scan data into one URI set
        self.consolidate_connection_speed_info(test_uri_sets[0],
                                               test_connections_all)
        self.consolidate_connection_speed_info(base_uri_sets[0],
                                               base_connections_all)

        # the first URI set becomes our primary set
        self.test_uri_set = test_uri_sets[0]
        self.base_uri_set = base_uri_sets[0]

        # new values to be inserted into response
        for test_record in self.test_uri_set:
            base_record = [
                d for d in self.base_uri_set if d[1] == test_record[1]
            ][0]
            test_response_time = float(
                test_record[2].response.connection_speed_average)
            base_response_time = float(
                base_record[2].response.connection_speed_average)
            test_speed_aggregate += test_response_time
            base_speed_aggregate += base_response_time
            pct_change = float((test_response_time - base_response_time) /
                               base_response_time) * 100
            test_record[2].response.connection_speed_change = int(pct_change)
            # save the speed samples of the base record to the test record for now,
            # in case we decide we want to include this in the report later
            test_record[
                2].response.connection_speed_base_samples = base_record[
                    2].response.connection_speed_samples

        self.total_change = float(
            (test_speed_aggregate - base_speed_aggregate) /
            base_speed_aggregate) * 100

        for rank, host, result in self.test_uri_set:
            log.log(result.as_dict())

        meta["run_finish_time"] = datetime.datetime.utcnow().isoformat()
        meta["total_change"] = self.total_change
        self.save_profile(self.test_profile, "test_profile", log)
        self.save_profile(self.base_profile, "base_profile", log)
        log.stop(meta=meta)
    def run(self):
        global logger

        logger.info(
            "Testing Firefox %s %s against Firefox %s %s" %
            (self.test_metadata["app_version"], self.test_metadata["branch"],
             self.base_metadata["app_version"], self.base_metadata["branch"]))

        self.start_time = datetime.datetime.now()

        meta = {
            "tlscanary_version": pkgr.require("tlscanary")[0].version,
            "mode": self.name,
            "args": vars(self.args),
            "argv": sys.argv,
            "sources_size": len(self.sources),
            "test_metadata": self.test_metadata,
            "base_metadata": self.base_metadata,
            "run_start_time": datetime.datetime.utcnow().isoformat()
        }

        rldb = rl.RunLogDB(self.args)
        log = rldb.new_log()
        log.start(meta=meta)
        progress = pr.ProgressTracker(total=len(self.sources),
                                      unit="hosts",
                                      average=30 * 60.0)

        limit = len(
            self.sources) if self.args.limit is None else self.args.limit

        # Split work into 50 chunks to conserve memory, but make no chunk smaller than 1000 hosts
        next_chunk = self.sources.iter_chunks(chunk_size=int(limit / 50),
                                              min_chunk_size=1000)

        try:
            while True:
                host_set_chunk = next_chunk(as_set=True)
                if host_set_chunk is None:
                    break

                logger.info("Starting regression run on chunk of %d hosts" %
                            len(host_set_chunk))

                error_set = self.run_regression_passes(
                    host_set_chunk,
                    report_completed=progress.log_completed,
                    report_overhead=progress.log_overhead)
                # Log progress per chunk
                logger.info("Progress: %s" % str(progress))

                # Commit results to log
                for rank, host, result in error_set:
                    log.log(result.as_dict())

        except KeyboardInterrupt:
            logger.critical("Ctrl-C received")
            progress.stop_reporting()
            raise KeyboardInterrupt

        finally:
            progress.stop_reporting()

        meta["run_finish_time"] = datetime.datetime.utcnow().isoformat()
        self.save_profile(self.test_profile, "test_profile", log)
        self.save_profile(self.base_profile, "base_profile", log)
        self.save_profile(self.altered_profile, "altered_profile", log)
        log.stop(meta=meta)