def test_set_under_min_report(mock_consensus, conf, datadir): # The number of relays (1) is the same as the ones in the consensus, # therefore there is no any relay excluded and under_min_report is not set. mock_consensus.return_value = 1 state_fpath = conf['paths']['state_fpath'] results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results, '', '', state_fpath) bwl = v3bwfile.bw_lines[0] assert not hasattr(bwl, "vote") assert not hasattr(bwl, "under_min_report") assert bwl.bw != 1 # The number of relays is the same as the ones in the consensus, # but after filtering there's no any, under_min_report is set to 1 # and unmeasured was also set to 1. # After filtering the relay is excluded because there's only 1 success # result and it should have at least 2 (min_num) v3bwfile = V3BWFile.from_results(results, '', '', state_fpath, min_num=2) bwl = v3bwfile.bw_lines[0] assert bwl.vote == 0 assert bwl.under_min_report == 1 assert bwl.unmeasured == 1 assert bwl.bw == 1 # The number of relays after scaling is than the 60% in the network, # therefore the relays are excluded and under_min_report is set to 1. mock_consensus.return_value = 3 v3bwfile = V3BWFile.from_results(results, '', '', state_fpath) bwl = v3bwfile.bw_lines[0] assert bwl.vote == 0 assert bwl.under_min_report == 1 assert bwl.bw != 1
def test_torflow_scale(datadir): results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING) assert v3bwfile.bw_lines[0].bw == 1000 v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, torflow_cap=0.0001) assert v3bwfile.bw_lines[0].bw == 1000 v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, torflow_cap=1, torflow_round_digs=0) assert v3bwfile.bw_lines[0].bw == 524
def test_from_results_read(datadir, tmpdir, conf, args): results = load_result_file(str(datadir.join("results.txt"))) expected_header = V3BWHeader(timestamp_l, earliest_bandwidth=earliest_bandwidth, latest_bandwidth=latest_bandwidth) expected_bwls = [V3BWLine.from_results(results[fp]) for fp in results] # bw store now B, not KB expected_bwls[0].bw = round(expected_bwls[0].bw / 1000) expected_f = V3BWFile(expected_header, expected_bwls) # This way is going to convert bw to KB v3bwfile = V3BWFile.from_results(results) assert str(expected_f)[1:] == str(v3bwfile)[1:] output = os.path.join(args.output, now_fname()) v3bwfile.write(output)
def test_update_progress(datadir, tmpdir): bw_lines_raw = [] number_consensus_relays = 6 state = {} header = V3BWHeader(str(now_unixts())) results = load_result_file(str(datadir.join("results_away.txt"))) for fp, values in results.items(): # log.debug("Relay fp %s", fp) line = V3BWLine.from_results(values) if line is not None: bw_lines_raw.append(line) bwfile = V3BWFile(header, []) bwfile.update_progress(len(bw_lines_raw), header, number_consensus_relays, state) assert header.percent_eligible_relays == '50' assert state.get('min_perc_reached') is None # Test that the headers are also included when there are enough eligible # relays number_consensus_relays = 3 header = V3BWHeader(str(now_unixts())) bwfile.update_progress(len(bw_lines_raw), header, number_consensus_relays, state) assert state.get('min_perc_reached') == now_isodt_str() assert header.minimum_number_eligible_relays == '2' assert header.minimum_percent_eligible_relays == str(MIN_REPORT) assert header.number_consensus_relays == '3' assert header.number_eligible_relays == '3' assert header.percent_eligible_relays == '100'
def test_torflow_scale_no_consensus_bw(datadir, conf, caplog): state_fpath = conf['paths']['state_fpath'] results = load_result_file(str( datadir.join("results_no_consensus_bw.txt"))) caplog.set_level(logging.DEBUG) v3bwfile = V3BWFile.from_results(results, '', '', state_fpath) assert v3bwfile.bw_lines[0].bw == 26
def test_from_arg_results_write_read(datadir, tmpdir, conf, args): results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results) output = os.path.join(args.output, now_fname()) v3bwfile.write(output) with open(output) as fd: v3bw = fd.read() assert v3bw == str(v3bwfile)
def main(args, conf): os.makedirs(conf.getpath('paths', 'v3bw_dname'), exist_ok=True) datadir = conf.getpath('paths', 'datadir') if not os.path.isdir(datadir): fail_hard('%s does not exist', datadir) if args.scale_constant < 1: fail_hard('--scale-constant must be positive') if args.torflow_bw_margin < 0: fail_hard('toflow-bw-margin must be major than 0.') if args.scale_sbws: scaling_method = SBWS_SCALING elif args.raw: scaling_method = None else: # sbws will scale as torflow until we have a better algorithm for # scaling (#XXX) scaling_method = TORFLOW_SCALING if args.secs_recent: fresh_days = ceil(args.secs_recent / 24 / 60 / 60) else: fresh_days = conf.getint('general', 'data_period') reset_bw_ipv4_changes = conf.getboolean('general', 'reset_bw_ipv4_changes') reset_bw_ipv6_changes = conf.getboolean('general', 'reset_bw_ipv6_changes') results = load_recent_results_in_datadir( fresh_days, datadir, on_changed_ipv4=reset_bw_ipv4_changes, on_changed_ipv6=reset_bw_ipv6_changes) if len(results) < 1: log.warning('No recent results, so not generating anything. (Have you ' 'ran sbws scanner recently?)') return state_fpath = conf.getpath('paths', 'state_fname') consensus_path = os.path.join(conf.getpath('tor', 'datadir'), "cached-consensus") # Accept None as scanner_country to be compatible with older versions. scanner_country = conf['scanner'].get('country') destinations_countries = destination.parse_destinations_countries(conf) bw_file = V3BWFile.from_results(results, scanner_country, destinations_countries, state_fpath, args.scale_constant, scaling_method, torflow_cap=args.torflow_bw_margin, round_digs=args.round_digs, secs_recent=args.secs_recent, secs_away=args.secs_away, min_num=args.min_num, consensus_path=consensus_path) output = args.output or \ conf.getpath('paths', 'v3bw_fname').format(now_fname()) bw_file.write(output) bw_file.info_stats
def test_from_results_read(datadir, tmpdir, conf, args): results = load_result_file(str(datadir.join("results.txt"))) expected_header = V3BWHeader(timestamp_l, earliest_bandwidth=earliest_bandwidth, latest_bandwidth=latest_bandwidth) exclusion_dict = dict([ (k, 0) for k in BW_HEADER_KEYVALUES_RECENT_MEASUREMENTS_EXCLUDED ]) expected_header.add_relays_excluded_counters(exclusion_dict) raw_bwls = [V3BWLine.from_results(results[fp])[0] for fp in results] # Scale BWLines using torflow method, since it's the default and BWLines # bandwidth is the raw bandwidth. expected_bwls = V3BWFile.bw_torflow_scale(raw_bwls) expected_f = V3BWFile(expected_header, expected_bwls) # This way is going to convert bw to KB v3bwfile = V3BWFile.from_results(results) assert str(expected_f)[1:] == str(v3bwfile)[1:] output = os.path.join(args.output, now_fname()) v3bwfile.write(output)
def main(): p = ArgumentParser(description='Bw stats.') p.add_argument('-t', '--torflow') p.add_argument('-u', '--torflow2') p.add_argument('-s', '--sbws') p.add_argument('-w', '--sbws2') p.add_argument('-p', '--torflow-compare') p.add_argument('-o', '--output-dir') args = p.parse_args() output_dir = args.output_dir if args.output_dir else OUT_DPATH os.makedirs(output_dir, exist_ok=True) if args.torflow: log.debug('torflow') v100 = V3BWFile.from_v100_fpath(args.torflow_raw) x, ys, labels = v100.to_plt() plot_xys(x, ys, labels, output_dir) if args.torflow2: v110 = V3BWFile.from_v100_lines(args.storflow) x, ys, labels = combine_bwfiles([v100, v110]) plot_xys(x, ys, labels, output_dir) x, ys, labels = combine_bwfiles([v110, v100], inverse=True) plot_xys(x, ys, labels, output_dir, inverse=True) elif args.sbws: log.debug('sbws') v110 = V3BWFile.from_v110_fpath(args.sbws) x, ys, labels = v110.to_plt() plot_xys(x, ys, labels, output_dir) if args.sbws2: v1102 = V3BWFile.from_v110_fpath(args.sbws2) # v1102.info_stats x, ys = combine_bwfiles([v1102, v110]) plot_xys(x, ys, ['2nd sbws', 'sbws'], output_dir) x, ys = combine_bwfiles([v110, v1102], 'bw') plot_xys(x, ys, ['sbws', '2nd sbws'], output_dir) if args.torflow_compare: log.debug('') v100 = V3BWFile.from_v100_fpath(args.torflow_compare) # v100.info_stats x, ys = combine_bwfiles([v100, v110]) plot_xys(x, ys, ['torflow', 'sbws'], output_dir) plot_xys(x, reversed(ys), ['sbws', 'torflow'], output_dir)
def test_torflow_scale(mock_consensus, datadir, tmpdir, conf): mock_consensus.return_value = 1 # state_fpath = str(tmpdir.join('.sbws', 'state.dat')) state_fpath = conf['paths']['state_fpath'] results = load_result_file(str(datadir.join("results.txt"))) # Since v1.1.0, it'll write bw=1 if the minimum percent of measured relays # wasn't reached. Therefore mock the consensus number. # Because the consensus number is mocked, it'll try to read the sate path. # Obtain it from conf, so that the root directory exists. v3bwfile = V3BWFile.from_results(results, '', '', state_fpath, scaling_method=TORFLOW_SCALING, round_digs=TORFLOW_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 6 v3bwfile = V3BWFile.from_results(results, '', '', state_fpath, scaling_method=TORFLOW_SCALING, torflow_cap=0.0001, round_digs=TORFLOW_ROUND_DIG) # Wrong because it should be rounded after clipping assert v3bwfile.bw_lines[0].bw == 1 v3bwfile = V3BWFile.from_results(results, '', '', state_fpath, scaling_method=TORFLOW_SCALING, torflow_cap=1, round_digs=TORFLOW_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 123 v3bwfile = V3BWFile.from_results(results, '', '', state_fpath, scaling_method=TORFLOW_SCALING, torflow_cap=1, round_digs=PROP276_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 120
def test_v3bwfile(conf): bwfile = V3BWFile.from_v1_fpath( conf["paths"]["v3bw_fname"].format("latest") ) assert "1" == bwfile.header.recent_consensus_count assert "1" == bwfile.header.recent_priority_list_count assert "15" == bwfile.header.recent_priority_relay_count assert "15" == bwfile.header.recent_measurement_attempt_count for bwline in bwfile.bw_lines: assert 1 == bwline.relay_in_recent_consensus_count assert 1 == bwline.relay_recent_priority_list_count assert 1 == bwline.relay_recent_measurement_attempt_count
def test_torflow_scale(datadir): results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, round_digs=TORFLOW_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 123 v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, torflow_cap=0.0001, round_digs=TORFLOW_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 123 v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, torflow_cap=1, round_digs=TORFLOW_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 123 v3bwfile = V3BWFile.from_results(results, scaling_method=TORFLOW_SCALING, torflow_cap=1, round_digs=PROP276_ROUND_DIG) assert v3bwfile.bw_lines[0].bw == 120
def test_measured_progress_stats(datadir): number_consensus_relays = 3 bw_lines_raw = [] statsd_exp = { 'percent_eligible_relays': 100, 'minimum_percent_eligible_relays': 60, 'number_consensus_relays': 3, 'minimum_number_eligible_relays': 2, 'number_eligible_relays': 3 } min_perc_reached_before = None results = load_result_file(str(datadir.join("results_away.txt"))) for fp, values in results.items(): # log.debug("Relay fp %s", fp) line, _ = V3BWLine.from_results(values) if line is not None: bw_lines_raw.append(line) assert len(bw_lines_raw) == 3 bw_lines = V3BWFile.bw_torflow_scale(bw_lines_raw) assert len(bw_lines) == 3 statsd, success = V3BWFile.measured_progress_stats( len(bw_lines), number_consensus_relays, min_perc_reached_before) assert success assert statsd == statsd_exp number_consensus_relays = 6 statsd, success = V3BWFile.measured_progress_stats( len(bw_lines), number_consensus_relays, min_perc_reached_before) assert not success statsd_exp = { 'percent_eligible_relays': 50, 'minimum_percent_eligible_relays': 60, 'number_consensus_relays': 6, 'minimum_number_eligible_relays': 4, 'number_eligible_relays': 3 } assert statsd_exp == statsd
def main(args, conf): os.makedirs(conf.getpath('paths', 'v3bw_dname'), exist_ok=True) datadir = conf.getpath('paths', 'datadir') if not os.path.isdir(datadir): fail_hard('%s does not exist', datadir) if args.scale_constant < 1: fail_hard('--scale-constant must be positive') if args.torflow_bw_margin < 0: fail_hard('toflow-bw-margin must be major than 0.') if args.scale_sbws: scaling_method = SBWS_SCALING elif args.raw: scaling_method = None else: scaling_method = TORFLOW_SCALING fresh_days = conf.getint('general', 'data_period') reset_bw_ipv4_changes = conf.getboolean('general', 'reset_bw_ipv4_changes') reset_bw_ipv6_changes = conf.getboolean('general', 'reset_bw_ipv6_changes') results = load_recent_results_in_datadir( fresh_days, datadir, success_only=True, on_changed_ipv4=reset_bw_ipv4_changes, on_changed_ipv6=reset_bw_ipv6_changes) if len(results) < 1: log.warning('No recent results, so not generating anything. (Have you ' 'ran sbws scanner recently?)') return state_fpath = conf.getpath('paths', 'state_fname') bw_file = V3BWFile.from_results(results, state_fpath, args.scale_constant, scaling_method, torflow_cap=args.torflow_bw_margin, torflow_round_digs=args.torflow_round_digs, secs_recent=args.secs_recent, secs_away=args.secs_away, min_num=args.min_num) output = args.output or \ conf.getpath('paths', 'v3bw_fname').format(now_fname()) bw_file.write(output) bw_file.info_stats
def test_sbws_scale(datadir): results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results, scaling_method=SBWS_SCALING) assert v3bwfile.bw_lines[0].bw == 8
def test_from_arg_results_write(datadir, tmpdir, conf, args): results = load_result_file(str(datadir.join("results.txt"))) v3bwfile = V3BWFile.from_results(results) output = os.path.join(args.output, now_fname()) v3bwfile.write(output) assert os.path.isfile(output)