Ejemplo n.º 1
0
def main(argv):
    TorUtil.read_config(argv[1])
    (
        start_pct,
        stop_pct,
        nodes_per_slice,
        save_every,
        circs_per_node,
        out_dir,
        max_fetch_time,
        tor_dir,
        sleep_start,
        sleep_stop,
        min_streams,
        pid_file_name,
    ) = read_config(argv[1])

    if pid_file_name:
        pidfd = file(pid_file_name, "w")
        pidfd.write("%d\n" % os.getpid())
        pidfd.close()

    try:
        (c, hdlr) = setup_handler(out_dir, tor_dir + "/control_auth_cookie")
    except Exception, e:
        traceback.print_exc()
        plog("WARN", "Can't connect to Tor: " + str(e))
Ejemplo n.º 2
0
def main(argv):
  plog("DEBUG", "Child Process Spawning...")
  TorUtil.read_config(argv[1])
  (start_pct,stop_pct,nodes_per_slice,save_every,circs_per_node,out_dir,
      max_fetch_time,tor_dir,sleep_start,sleep_stop,
             min_streams,pid_file_name,db_url) = read_config(argv[1])
 
  if pid_file_name:
    pidfd = file(pid_file_name, 'w')
    pidfd.write('%d\n' % os.getpid())
    pidfd.close()

    slice_num = int(argv[2])

    try:
      (c,hdlr) = setup_handler(out_dir, tor_dir+"/control_auth_cookie")
    except Exception, e:
      traceback.print_exc()
      plog("WARN", "Can't connect to Tor: "+str(e))

    if db_url:
      hdlr.attach_sql_listener(db_url)
      sql_file = None
    else:
      plog("INFO", "db_url not found in config. Defaulting to sqlite")
      sql_file = os.getcwd()+'/'+out_dir+'/bwauthority.sqlite'
      hdlr.attach_sql_listener('sqlite:///'+sql_file)
    
    # set SOCKS proxy
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host, TorUtil.tor_port)
    socket.socket = socks.socksocket
    plog("INFO", "Set socks proxy to "+TorUtil.tor_host+":"+str(TorUtil.tor_port))

    hdlr.wait_for_consensus()
    pct_step = hdlr.rank_to_percent(nodes_per_slice)

    # check to see if we are done
    if (slice_num * pct_step + start_pct > stop_pct):
        plog('INFO', 'stop_pct: %s reached. Exiting with %s' % (stop_pct, STOP_PCT_REACHED))
        sys.exit(STOP_PCT_REACHED)

    plog("DEBUG", "Starting slice number %s" % slice_num)
    speedrace(hdlr, slice_num*pct_step + start_pct, (slice_num + 1)*pct_step + start_pct, circs_per_node, save_every, out_dir,
              max_fetch_time, sleep_start, sleep_stop, slice_num,
              min_streams, sql_file)

    # For debugging memory leak..
    #TorUtil.dump_class_ref_counts(referrer_depth=1)

    # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
    # And record circ failure rates..

    #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every, 
    #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)
    sys.exit(0)
Ejemplo n.º 3
0
def main():
    #guard_slices,ncircuits,max_circuits,begin,end,pct,dirname,use_sql = getargs()
    TorUtil.read_config('cbt.cfg')

    try:
        getargs()
        atexit.register(cleanup)
        return open_controller("cbtest")
    except PathSupport.NoNodesRemain:
        print 'No nodes remain at this percentile range.'
        return 1
    except Exception, e:
        plog("ERROR", "Misc exception: " + str(e))
        traceback.print_exc()
        return 23
Ejemplo n.º 4
0
def main():
  #guard_slices,ncircuits,max_circuits,begin,end,pct,dirname,use_sql = getargs()
  TorUtil.read_config('cbt.cfg')

  try:
    getargs()
    atexit.register(cleanup)
    return open_controller("cbtest")
  except PathSupport.NoNodesRemain:
    print 'No nodes remain at this percentile range.'
    return 1
  except Exception, e:
    plog("ERROR", "Misc exception: "+str(e))
    traceback.print_exc()
    return 23
Ejemplo n.º 5
0
def main(argv):
  TorUtil.read_config(argv[1])
  (start_pct,stop_pct,nodes_per_slice,save_every,circs_per_node,out_dir,
      max_fetch_time,tor_dir,sleep_start,sleep_stop,
             min_streams,pid_file_name,db_url,only_unmeasured,
             min_unmeasured) = read_config(argv[1])
  plog("NOTICE", "Child Process Spawned...")

  # make sure necessary out_dir directory exists
  path = os.getcwd()+'/'+out_dir
  if not os.path.exists(path):
    os.makedirs(path)
 
  if pid_file_name:
    pidfd = file(pid_file_name, 'w')
    pidfd.write('%d\n' % os.getpid())
    pidfd.close()

    slice_num = int(argv[2])

    try:
      (c,hdlr) = setup_handler(out_dir, tor_dir+"/control_auth_cookie")
    except Exception, e:
      traceback.print_exc()
      plog("WARN", "Can't connect to Tor: "+str(e))
      sys.exit(STOP_PCT_REACHED)

    if db_url:
      hdlr.attach_sql_listener(db_url)
      sql_file = None
    else:
      plog("INFO", "db_url not found in config. Defaulting to sqlite")
      sql_file = os.getcwd()+'/'+out_dir+'/bwauthority.sqlite'
      #hdlr.attach_sql_listener('sqlite:///'+sql_file)
      hdlr.attach_sql_listener('sqlite://')

    # set SOCKS proxy
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host, TorUtil.tor_port)
    socket.socket = socks.socksocket
    plog("INFO", "Set socks proxy to "+TorUtil.tor_host+":"+str(TorUtil.tor_port))

    hdlr.schedule_selmgr(lambda s: setattr(s, "only_unmeasured", only_unmeasured))

    hdlr.wait_for_consensus()

    # Now that we have the consensus, we shouldn't need to listen
    # for new consensus events.
    c.set_events([TorCtl.EVENT_TYPE.STREAM,
          TorCtl.EVENT_TYPE.BW,
          TorCtl.EVENT_TYPE.CIRC,
          TorCtl.EVENT_TYPE.STREAM_BW], True)

    # We should go to sleep if there are less than 5 unmeasured nodes after
    # consensus update
    if min_unmeasured and hdlr.get_unmeasured() < min_unmeasured:
      plog("NOTICE", "Less than "+str(min_unmeasured)+" unmeasured nodes ("+str(hdlr.get_unmeasured())+"). Sleeping for a bit")
      time.sleep(3600) # Until next consensus arrives
      plog("NOTICE", "Woke up from waiting for more unmeasured nodes.  Requesting slice restart.")
      sys.exit(RESTART_SLICE)

    pct_step = hdlr.rank_to_percent(nodes_per_slice)
    plog("INFO", "Percent per slice is: "+str(pct_step))
    if pct_step > 100: pct_step = 100

    # check to see if we are done
    if (slice_num * pct_step + start_pct > stop_pct):
        plog('NOTICE', 'Child stop point %s reached. Exiting with %s' % (stop_pct, STOP_PCT_REACHED))
        sys.exit(STOP_PCT_REACHED)

    successful = speedrace(hdlr, slice_num*pct_step + start_pct, (slice_num + 1)*pct_step + start_pct, circs_per_node,
              save_every, out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num,
              min_streams, sql_file, only_unmeasured)

    # For debugging memory leak..
    #TorUtil.dump_class_ref_counts(referrer_depth=1)

    # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
    # And record circ failure rates..

    #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every, 
    #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)

    # XXX: Hack this to return a codelen double the slice size on failure?
    plog("INFO", "Slice success count: "+str(successful))
    if successful == 0:
      plog("WARN", "Slice success count was ZERO!")

    sys.exit(0)
Ejemplo n.º 6
0
def main(argv):
    TorUtil.read_config(argv[1])
    (start_pct, stop_pct, nodes_per_slice, save_every, circs_per_node, out_dir,
     max_fetch_time, tor_dir, sleep_start, sleep_stop, min_streams,
     pid_file_name, db_url, only_unmeasured,
     min_unmeasured) = read_config(argv[1])
    plog("NOTICE", "Child Process Spawned...")

    # make sure necessary out_dir directory exists
    path = os.getcwd() + '/' + out_dir
    if not os.path.exists(path):
        os.makedirs(path)

    if pid_file_name:
        pidfd = file(pid_file_name, 'w')
        pidfd.write('%d\n' % os.getpid())
        pidfd.close()

        slice_num = int(argv[2])

        try:
            (c, hdlr) = setup_handler(out_dir,
                                      tor_dir + "/control_auth_cookie")
        except Exception, e:
            traceback.print_exc()
            plog("WARN", "Can't connect to Tor: " + str(e))
            sys.exit(STOP_PCT_REACHED)

        if db_url:
            hdlr.attach_sql_listener(db_url)
            sql_file = None
        else:
            plog("INFO", "db_url not found in config. Defaulting to sqlite")
            sql_file = os.getcwd() + '/' + out_dir + '/bwauthority.sqlite'
            #hdlr.attach_sql_listener('sqlite:///'+sql_file)
            hdlr.attach_sql_listener('sqlite://')

        # set SOCKS proxy
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host,
                              TorUtil.tor_port)
        socket.socket = socks.socksocket
        plog(
            "INFO", "Set socks proxy to " + TorUtil.tor_host + ":" +
            str(TorUtil.tor_port))

        hdlr.schedule_selmgr(
            lambda s: setattr(s, "only_unmeasured", only_unmeasured))

        hdlr.wait_for_consensus()

        # Now that we have the consensus, we shouldn't need to listen
        # for new consensus events.
        c.set_events([
            TorCtl.EVENT_TYPE.STREAM, TorCtl.EVENT_TYPE.BW,
            TorCtl.EVENT_TYPE.CIRC, TorCtl.EVENT_TYPE.STREAM_BW
        ], True)

        # We should go to sleep if there are less than 5 unmeasured nodes after
        # consensus update
        if min_unmeasured and hdlr.get_unmeasured() < min_unmeasured:
            plog(
                "NOTICE",
                "Less than " + str(min_unmeasured) + " unmeasured nodes (" +
                str(hdlr.get_unmeasured()) + "). Sleeping for a bit")
            time.sleep(3600)  # Until next consensus arrives
            plog(
                "NOTICE",
                "Woke up from waiting for more unmeasured nodes.  Requesting slice restart."
            )
            sys.exit(RESTART_SLICE)

        pct_step = hdlr.rank_to_percent(nodes_per_slice)
        plog("INFO", "Percent per slice is: " + str(pct_step))
        if pct_step > 100: pct_step = 100

        # check to see if we are done
        if (slice_num * pct_step + start_pct > stop_pct):
            plog(
                'NOTICE', 'Child stop point %s reached. Exiting with %s' %
                (stop_pct, STOP_PCT_REACHED))
            sys.exit(STOP_PCT_REACHED)

        successful = speedrace(hdlr, slice_num * pct_step + start_pct,
                               (slice_num + 1) * pct_step + start_pct,
                               circs_per_node, save_every, out_dir,
                               max_fetch_time, sleep_start, sleep_stop,
                               slice_num, min_streams, sql_file,
                               only_unmeasured)

        # For debugging memory leak..
        #TorUtil.dump_class_ref_counts(referrer_depth=1)

        # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
        # And record circ failure rates..

        #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every,
        #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)

        # XXX: Hack this to return a codelen double the slice size on failure?
        plog("INFO", "Slice success count: " + str(successful))
        if successful == 0:
            plog("WARN", "Slice success count was ZERO!")

        sys.exit(0)
Ejemplo n.º 7
0
def main(argv):
    plog("DEBUG", "Child Process Spawning...")
    TorUtil.read_config(argv[1])
    (start_pct, stop_pct, nodes_per_slice, save_every, circs_per_node, out_dir,
     max_fetch_time, tor_dir, sleep_start, sleep_stop, min_streams,
     pid_file_name, db_url) = read_config(argv[1])

    # make sure necessary out_dir directory exists
    path = os.getcwd() + '/' + out_dir
    if not os.path.exists(path):
        os.makedirs(path)

    if pid_file_name:
        pidfd = file(pid_file_name, 'w')
        pidfd.write('%d\n' % os.getpid())
        pidfd.close()

        slice_num = int(argv[2])

        try:
            (c, hdlr) = setup_handler(out_dir,
                                      tor_dir + "/control_auth_cookie")
        except Exception, e:
            traceback.print_exc()
            plog("WARN", "Can't connect to Tor: " + str(e))
            sys.exit(STOP_PCT_REACHED)

        if db_url:
            hdlr.attach_sql_listener(db_url)
            sql_file = None
        else:
            plog("INFO", "db_url not found in config. Defaulting to sqlite")
            sql_file = os.getcwd() + '/' + out_dir + '/bwauthority.sqlite'
            #hdlr.attach_sql_listener('sqlite:///'+sql_file)
            hdlr.attach_sql_listener('sqlite://')

        # set SOCKS proxy
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host,
                              TorUtil.tor_port)
        socket.socket = socks.socksocket
        plog(
            "INFO", "Set socks proxy to " + TorUtil.tor_host + ":" +
            str(TorUtil.tor_port))

        hdlr.wait_for_consensus()

        pct_step = hdlr.rank_to_percent(nodes_per_slice)

        # check to see if we are done
        if (slice_num * pct_step + start_pct > stop_pct):
            plog(
                'INFO', 'stop_pct: %s reached. Exiting with %s' %
                (stop_pct, STOP_PCT_REACHED))
            sys.exit(STOP_PCT_REACHED)

        plog("DEBUG", "Starting slice number %s" % slice_num)
        speedrace(hdlr, slice_num * pct_step + start_pct,
                  (slice_num + 1) * pct_step + start_pct, circs_per_node,
                  save_every, out_dir, max_fetch_time, sleep_start, sleep_stop,
                  slice_num, min_streams, sql_file)

        # For debugging memory leak..
        #TorUtil.dump_class_ref_counts(referrer_depth=1)

        # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
        # And record circ failure rates..

        #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every,
        #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)
        sys.exit(0)
Ejemplo n.º 8
0
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.loglevel = "NOTICE"

  (branch, head) = TorUtil.get_git_version(PATH_TO_TORFLOW_REPO)
  plog('INFO', 'TorFlow Version: %s' % branch+' '+head)
  (branch, head) = TorUtil.get_git_version(PATH_TO_TORCTL_REPO)
  plog('INFO', 'TorCtl Version: %s' % branch+' '+head)

  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: int(y.bandwidth/10000.0 - x.bandwidth/10000.0))
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  cs_junk = ConsensusJunk(c)

  # TODO: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  sqlf = f.replace("bws-", "sql-")
                  plog("INFO", "Removing old file "+f+" and "+sqlf)
                  os.remove(sr+"/"+f)
                  try:
                    os.remove(sr+"/"+sqlf)
                  except:
                    pass # In some cases the sql file may not exist
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()
Ejemplo n.º 9
0
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.logfile = "data/aggregate-debug.log"

  (branch, head) = TorUtil.get_git_version(PATH_TO_TORFLOW_REPO)
  plog('NOTICE', 'TorFlow Version: %s' % branch+' '+head)
  (branch, head) = TorUtil.get_git_version(PATH_TO_TORCTL_REPO)
  plog('NOTICE', 'TorCtl Version: %s' % branch+' '+head)

  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor.1/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: int(y.bandwidth/10000.0 - x.bandwidth/10000.0))
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  cs_junk = ConsensusJunk(c)

  # TODO: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  sqlf = f.replace("bws-", "sql-")
                  plog("INFO", "Removing old file "+f+" and "+sqlf)
                  os.remove(sr+"/"+f)
                  try:
                    os.remove(sr+"/"+sqlf)
                  except:
                    pass # In some cases the sql file may not exist
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()
Ejemplo n.º 10
0
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.loglevel = "NOTICE"
 
  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: y.bandwidth - x.bandwidth)
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  global BETA
  sorted_rlist = None
  if BETA == -1:
    # Compute beta based on the upgrade rate for nsbw obeying routers
    # (karsten's data show this slightly underestimates client upgrade rate)
    nsbw_yes = VersionRangeRestriction("0.2.1.17")
    sorted_rlist = c.read_routers(ns_list)

    nsbw_cnt = 0
    non_nsbw_cnt = 0
    for r in sorted_rlist:
      if nsbw_yes.r_is_ok(r): nsbw_cnt += 1
      else: non_nsbw_cnt += 1
    BETA = float(nsbw_cnt)/(nsbw_cnt+non_nsbw_cnt)

  global GUARD_BETA
  if GUARD_BETA == -1:
    # Compute GUARD_BETA based on the upgrade rate for nsbw obeying routers
    # (karsten's data show this slightly underestimates client upgrade rate)
    guardbw_yes = NodeRestrictionList([VersionRangeRestriction("0.2.1.23"),
       NotNodeRestriction(VersionRangeRestriction("0.2.2.0", "0.2.2.6"))])

    if not sorted_rlist:
      sorted_rlist = c.read_routers(ns_list)

    guardbw_cnt = 0
    non_guardbw_cnt = 0
    for r in sorted_rlist:
      if guardbw_yes.r_is_ok(r): guardbw_cnt += 1
      else: non_guardbw_cnt += 1
    GUARD_BETA = float(guardbw_cnt)/(guardbw_cnt+non_guardbw_cnt)


  # FIXME: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  plog("DEBUG", "Skipping old file "+f)
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
                # FIXME: Can we kill this?
                if slicenum not in timestamps or \
                     timestamps[slicenum] < timestamp:
                  timestamps[slicenum] = timestamp
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()