예제 #1
0
    def sort_routers(self, sorted_routers):
        routers = copy.copy(sorted_routers)

        def ratio_cmp(r1, r2):
            if r1.bw / float(r1.desc_bw) > r2.bw / float(r2.desc_bw):
                return -1
            elif r1.bw / float(r1.desc_bw) < r2.bw / float(r2.desc_bw):
                return 1
            else:
                return 0

        if self.speed == "fast":
            pass  # no action needed
        elif self.speed == "slow":
            routers.reverse()
        elif self.speed == "fastratio":
            routers.sort(ratio_cmp)
        elif self.speed == "slowratio":
            routers.sort(lambda x, y: ratio_cmp(y, x))

        # Print top 5 routers + ratios
        for i in xrange(5):
            TorUtil.plog(
                "DEBUG",
                self.speed + " router " + routers[i].nickname + " #" + str(i) +
                ": " + str(routers[i].bw) + "/" + str(routers[i].desc_bw) +
                " = " + str(routers[i].bw / float(routers[i].desc_bw)))

        return routers
예제 #2
0
파일: entrycons.py 프로젝트: mrphs/torperf
  def sort_routers(self, sorted_routers):
    routers = copy.copy(sorted_routers)
    def ratio_cmp(r1, r2):
      if r1.bw/float(r1.desc_bw) > r2.bw/float(r2.desc_bw):
        return -1
      elif r1.bw/float(r1.desc_bw) < r2.bw/float(r2.desc_bw):
        return 1
      else:
        return 0

    if self.speed == "fast":
      pass # no action needed
    elif self.speed == "slow":
      routers.reverse()
    elif self.speed == "fastratio":
      routers.sort(ratio_cmp)
    elif self.speed == "slowratio":
      routers.sort(lambda x,y: ratio_cmp(y,x))

    # Print top 3 routers + ratios
    if len(routers) < SAMPLE_SIZE:
      TorUtil.plog("WARN", "Only "+str(len(routers))+" in our list!")
    else:
      for i in xrange(SAMPLE_SIZE):
        TorUtil.plog("INFO", self.speed+" router "+routers[i].nickname+" #"+str(i)+": "
                      +str(routers[i].bw)+"/"+str(routers[i].desc_bw)+" = "
                      +str(routers[i].bw/float(routers[i].desc_bw)))

    return routers
예제 #3
0
  def sort_routers(self, sorted_routers):
    routers = copy.copy(sorted_routers)
    def ratio_cmp(r1, r2):
      if r1.bw/float(r1.desc_bw) > r2.bw/float(r2.desc_bw):
        return -1
      elif r1.bw/float(r1.desc_bw) < r2.bw/float(r2.desc_bw):
        return 1
      else:
        return 0

    if self.speed == "fast":
      pass # no action needed
    elif self.speed == "slow":
      routers.reverse()
    elif self.speed == "fastratio":
      routers.sort(ratio_cmp)
    elif self.speed == "slowratio":
      routers.sort(lambda x,y: ratio_cmp(y,x))

    # Print top 5 routers + ratios
    for i in xrange(5):
      TorUtil.plog("DEBUG", self.speed+" router "+routers[i].nickname+" #"+str(i)+": "
                    +str(routers[i].bw)+"/"+str(routers[i].desc_bw)+" = "
                    +str(routers[i].bw/float(routers[i].desc_bw)))

    return routers
예제 #4
0
def main(argv):
    TorUtil.read_config(argv[1])
    (
        start_pct,
        stop_pct,
        nodes_per_slice,
        save_every,
        circs_per_node,
        out_dir,
        max_fetch_time,
        tor_dir,
        sleep_start,
        sleep_stop,
        min_streams,
        pid_file_name,
    ) = read_config(argv[1])

    if pid_file_name:
        pidfd = file(pid_file_name, "w")
        pidfd.write("%d\n" % os.getpid())
        pidfd.close()

    try:
        (c, hdlr) = setup_handler(out_dir, tor_dir + "/control_auth_cookie")
    except Exception, e:
        traceback.print_exc()
        plog("WARN", "Can't connect to Tor: " + str(e))
예제 #5
0
파일: entrycons.py 프로젝트: mrphs/torperf
 def new_desc_event(self, n):
   TorCtl.ConsensusTracker.new_desc_event(self, n)
   if self.need_guards and self.consensus_count >= DESCRIPTORS_NEEDED*len(self.ns_map):
     TorUtil.plog("INFO", "We have enough routers. Rejoice!")
     self.used_entries = []
     self.set_entries()
     self.need_guards = False
   else:
     self.need_guards = True
예제 #6
0
def main(argv):
  plog("DEBUG", "Child Process Spawning...")
  TorUtil.read_config(argv[1])
  (start_pct,stop_pct,nodes_per_slice,save_every,circs_per_node,out_dir,
      max_fetch_time,tor_dir,sleep_start,sleep_stop,
             min_streams,pid_file_name,db_url) = read_config(argv[1])
 
  if pid_file_name:
    pidfd = file(pid_file_name, 'w')
    pidfd.write('%d\n' % os.getpid())
    pidfd.close()

    slice_num = int(argv[2])

    try:
      (c,hdlr) = setup_handler(out_dir, tor_dir+"/control_auth_cookie")
    except Exception, e:
      traceback.print_exc()
      plog("WARN", "Can't connect to Tor: "+str(e))

    if db_url:
      hdlr.attach_sql_listener(db_url)
      sql_file = None
    else:
      plog("INFO", "db_url not found in config. Defaulting to sqlite")
      sql_file = os.getcwd()+'/'+out_dir+'/bwauthority.sqlite'
      hdlr.attach_sql_listener('sqlite:///'+sql_file)
    
    # set SOCKS proxy
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host, TorUtil.tor_port)
    socket.socket = socks.socksocket
    plog("INFO", "Set socks proxy to "+TorUtil.tor_host+":"+str(TorUtil.tor_port))

    hdlr.wait_for_consensus()
    pct_step = hdlr.rank_to_percent(nodes_per_slice)

    # check to see if we are done
    if (slice_num * pct_step + start_pct > stop_pct):
        plog('INFO', 'stop_pct: %s reached. Exiting with %s' % (stop_pct, STOP_PCT_REACHED))
        sys.exit(STOP_PCT_REACHED)

    plog("DEBUG", "Starting slice number %s" % slice_num)
    speedrace(hdlr, slice_num*pct_step + start_pct, (slice_num + 1)*pct_step + start_pct, circs_per_node, save_every, out_dir,
              max_fetch_time, sleep_start, sleep_stop, slice_num,
              min_streams, sql_file)

    # For debugging memory leak..
    #TorUtil.dump_class_ref_counts(referrer_depth=1)

    # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
    # And record circ failure rates..

    #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every, 
    #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)
    sys.exit(0)
예제 #7
0
파일: entrycons.py 프로젝트: mrphs/torperf
 def __init__(self, conn, speed):
   TorCtl.ConsensusTracker.__init__(self, conn, consensus_only=False)
   self.speed = speed
   self.used_entries = []
   if self.consensus_count < DESCRIPTORS_NEEDED*len(self.ns_map):
     TorUtil.plog("NOTICE",
        "Insufficient routers to choose new guard. Waiting for more..")
     self.need_guards = True
   else:
     self.set_entries()
     self.need_guards = False
예제 #8
0
파일: entrycons.py 프로젝트: mrphs/torperf
  def set_entries(self):
    # XXX: This is inefficient, but if we do it now, we're sure that
    # we're always using the very latest networkstatus and descriptor data
    sorted_routers = self.sort_routers(self.current_consensus().sorted_r)

    entry_nodes = []
    for i in xrange(len(sorted_routers)):
      if len(entry_nodes) >= SAMPLE_SIZE: break
      if (not sorted_routers[i].down and "Guard" in sorted_routers[i].flags):
        entry_nodes.append(sorted_routers[i].idhex)
        self.used_entries.append(sorted_routers[i].idhex)
    self.c.set_option("EntryNodes", ",".join(entry_nodes))
    TorUtil.plog("NOTICE", self.speed+": Changed EntryNodes to: " +
                   ",".join(map(lambda x: self.ns_map[x].nickname+"="+x,
                                entry_nodes)))
예제 #9
0
파일: cbttest.py 프로젝트: AdrienLE/torflow
def main():
  #guard_slices,ncircuits,max_circuits,begin,end,pct,dirname,use_sql = getargs()
  TorUtil.read_config('cbt.cfg')

  try:
    getargs()
    atexit.register(cleanup)
    return open_controller("cbtest")
  except PathSupport.NoNodesRemain:
    print 'No nodes remain at this percentile range.'
    return 1
  except Exception, e:
    plog("ERROR", "Misc exception: "+str(e))
    traceback.print_exc()
    return 23
예제 #10
0
def main():
    #guard_slices,ncircuits,max_circuits,begin,end,pct,dirname,use_sql = getargs()
    TorUtil.read_config('cbt.cfg')

    try:
        getargs()
        atexit.register(cleanup)
        return open_controller("cbtest")
    except PathSupport.NoNodesRemain:
        print 'No nodes remain at this percentile range.'
        return 1
    except Exception, e:
        plog("ERROR", "Misc exception: " + str(e))
        traceback.print_exc()
        return 23
예제 #11
0
 def stream_status_event(self, event):
   if event.status == "NEW":
     if event.purpose != "USER":
       self.ignore_streams[event.strm_id] = True
     return
   if event.strm_id in self.ignore_streams:
     if event.status == "CLOSED":
       del self.ignore_streams[event.strm_id]
     return
   if event.circ_id not in self.all_circs:
     if event.circ_id:
       TorUtil.plog("WARN",
          "Unknown circuit id %d has a stream event %d %s" % \
          (event.circ_id, event.strm_id, event.status))
     return
   circ = self.all_circs[event.circ_id]
   if event.status == 'DETACHED' or event.status == 'FAILED':
     # Detached usually means there was some failure
     assert not circ.strm_id
     circ.used = True
     circ.strm_id = event.strm_id
     circ.stream_failed = True
     circ.stream_end_time = event.arrived_at
     if event.reason:
       circ.stream_fail_reason = event.reason
       if event.remote_reason:
         circ.stream_fail_reason += ":"+event.remote_reason
     self.write_circ(circ)
     # We have no explicit assurance here that tor will not
     # try to reuse this circuit later... But we should
     # print out a warn above if that happens.
     del self.all_circs[event.circ_id]
     # Some STREAM FAILED events are paired with a CLOSED, some are not :(
     if event.status == "FAILED":
       self.ignore_streams[event.strm_id] = True
   if event.status == 'CLOSED':
     assert not circ.strm_id or circ.stream_failed
     circ.used = True
     circ.strm_id = event.strm_id
     circ.stream_end_time = event.arrived_at
     self.write_circ(circ)
     del self.all_circs[event.circ_id]
예제 #12
0
파일: entrycons.py 프로젝트: mrphs/torperf
def main():
  if len(sys.argv) < 3:
    usage()
    return

  port = int(sys.argv[1])
  speed = sys.argv[2]

  if not speed in ("fast", "slow", "fastratio", "slowratio"):
    TorUtil.plog("ERROR",
        "Second parameter must be 'fast', 'slow', 'fastratio', or 'slowratio'")
    return

  conn = TorCtl.connect(HOST, port)

  conn.set_option("StrictEntryNodes", "1")
  conn.set_option("UseEntryNodes", "1")

  EntryTracker(conn, speed)
  conn.set_events(["NEWCONSENSUS", "NEWDESC", "NS", "GUARD"])
  conn.block_until_close()
예제 #13
0
파일: entrycons.py 프로젝트: mrphs/torperf
 def handle_entry_deaths(self, event):
   state = event.status
   if (state == "DOWN" or state == "BAD" or state == "DROPPED"):
     if self.consensus_count < DESCRIPTORS_NEEDED*len(self.ns_map):
       self.need_guards = True
       TorUtil.plog("NOTICE",
          "Insufficient routers to choose new guard. Waiting for more..")
       return
     nodes_tuple = self.c.get_option("EntryNodes")
     nodes_list = nodes_tuple[0][1].split(",")
     try: 
       nodes_list.remove(event.idhex)
       nodes_list.append(self.get_next_guard())
       self.c.set_option("EntryNodes", ",".join(nodes_list))
       TorUtil.plog("NOTICE", "Entry: " + event.nick + ":" + event.idhex +
                    " died, and we replaced it with: " + nodes_list[-1] + "!")
       nodes_tuple = self.c.get_option("EntryNodes")
       nodes_list = nodes_tuple[0][1]
       TorUtil.plog("INFO", "New nodes_list: " + nodes_list)
     except ValueError:
       TorUtil.plog("INFO", "GUARD event notified of an entry death that " +
                    "is not in nodes_list! Mysterioush!")
       TorUtil.plog("INFO", "It was: " + event.nick + " : " + event.idhex)
예제 #14
0
 def handle_entry_deaths(self, event):
   state = event.status
   if (state == "DOWN" or state == "BAD" or state == "DROPPED"):
     nodes_tuple = self.c.get_option("EntryNodes")
     nodes_list = nodes_tuple[0][1].split(",")
     try: 
       nodes_list.remove(event.idhex)
       nodes_list.append(self.get_next_router(event.idhex, nodes_list))
       self.c.set_option("EntryNodes", ",".join(nodes_list))
       TorUtil.plog("NOTICE", "Entry: " + event.nick + ":" + event.idhex +
                    " died, and we replaced it with: " + nodes_list[-1] + "!")
       nodes_tuple = self.c.get_option("EntryNodes")
       nodes_list = nodes_tuple[0][1]
       TorUtil.plog("INFO", "New nodes_list: " + nodes_list)
     except ValueError:
       TorUtil.plog("INFO", "GUARD event notified of an entry death that " +
                    "is not in nodes_list! Mysterioush!")
       TorUtil.plog("INFO", "It was: " + event.nick + " : " + event.idhex)
예제 #15
0
 def handle_entry_deaths(self, event):
     state = event.status
     if (state == "DOWN" or state == "BAD" or state == "DROPPED"):
         nodes_tuple = self.c.get_option("EntryNodes")
         nodes_list = nodes_tuple[0][1].split(",")
         try:
             nodes_list.remove(event.idhex)
             nodes_list.append(self.get_next_router(event.idhex,
                                                    nodes_list))
             self.c.set_option("EntryNodes", ",".join(nodes_list))
             TorUtil.plog(
                 "NOTICE", "Entry: " + event.nick + ":" + event.idhex +
                 " died, and we replaced it with: " + nodes_list[-1] + "!")
             nodes_tuple = self.c.get_option("EntryNodes")
             nodes_list = nodes_tuple[0][1]
             TorUtil.plog("INFO", "New nodes_list: " + nodes_list)
         except ValueError:
             TorUtil.plog(
                 "INFO", "GUARD event notified of an entry death that " +
                 "is not in nodes_list! Mysterioush!")
             TorUtil.plog("INFO",
                          "It was: " + event.nick + " : " + event.idhex)
예제 #16
0
def main(argv):
  TorUtil.read_config(argv[1])
  (start_pct,stop_pct,nodes_per_slice,save_every,circs_per_node,out_dir,
      max_fetch_time,tor_dir,sleep_start,sleep_stop,
             min_streams,pid_file_name,db_url,only_unmeasured,
             min_unmeasured) = read_config(argv[1])
  plog("NOTICE", "Child Process Spawned...")

  # make sure necessary out_dir directory exists
  path = os.getcwd()+'/'+out_dir
  if not os.path.exists(path):
    os.makedirs(path)
 
  if pid_file_name:
    pidfd = file(pid_file_name, 'w')
    pidfd.write('%d\n' % os.getpid())
    pidfd.close()

    slice_num = int(argv[2])

    try:
      (c,hdlr) = setup_handler(out_dir, tor_dir+"/control_auth_cookie")
    except Exception, e:
      traceback.print_exc()
      plog("WARN", "Can't connect to Tor: "+str(e))
      sys.exit(STOP_PCT_REACHED)

    if db_url:
      hdlr.attach_sql_listener(db_url)
      sql_file = None
    else:
      plog("INFO", "db_url not found in config. Defaulting to sqlite")
      sql_file = os.getcwd()+'/'+out_dir+'/bwauthority.sqlite'
      #hdlr.attach_sql_listener('sqlite:///'+sql_file)
      hdlr.attach_sql_listener('sqlite://')

    # set SOCKS proxy
    socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host, TorUtil.tor_port)
    socket.socket = socks.socksocket
    plog("INFO", "Set socks proxy to "+TorUtil.tor_host+":"+str(TorUtil.tor_port))

    hdlr.schedule_selmgr(lambda s: setattr(s, "only_unmeasured", only_unmeasured))

    hdlr.wait_for_consensus()

    # Now that we have the consensus, we shouldn't need to listen
    # for new consensus events.
    c.set_events([TorCtl.EVENT_TYPE.STREAM,
          TorCtl.EVENT_TYPE.BW,
          TorCtl.EVENT_TYPE.CIRC,
          TorCtl.EVENT_TYPE.STREAM_BW], True)

    # We should go to sleep if there are less than 5 unmeasured nodes after
    # consensus update
    if min_unmeasured and hdlr.get_unmeasured() < min_unmeasured:
      plog("NOTICE", "Less than "+str(min_unmeasured)+" unmeasured nodes ("+str(hdlr.get_unmeasured())+"). Sleeping for a bit")
      time.sleep(3600) # Until next consensus arrives
      plog("NOTICE", "Woke up from waiting for more unmeasured nodes.  Requesting slice restart.")
      sys.exit(RESTART_SLICE)

    pct_step = hdlr.rank_to_percent(nodes_per_slice)
    plog("INFO", "Percent per slice is: "+str(pct_step))
    if pct_step > 100: pct_step = 100

    # check to see if we are done
    if (slice_num * pct_step + start_pct > stop_pct):
        plog('NOTICE', 'Child stop point %s reached. Exiting with %s' % (stop_pct, STOP_PCT_REACHED))
        sys.exit(STOP_PCT_REACHED)

    successful = speedrace(hdlr, slice_num*pct_step + start_pct, (slice_num + 1)*pct_step + start_pct, circs_per_node,
              save_every, out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num,
              min_streams, sql_file, only_unmeasured)

    # For debugging memory leak..
    #TorUtil.dump_class_ref_counts(referrer_depth=1)

    # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
    # And record circ failure rates..

    #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every, 
    #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)

    # XXX: Hack this to return a codelen double the slice size on failure?
    plog("INFO", "Slice success count: "+str(successful))
    if successful == 0:
      plog("WARN", "Slice success count was ZERO!")

    sys.exit(0)
예제 #17
0
def main(argv):
    TorUtil.read_config(argv[1])
    (start_pct, stop_pct, nodes_per_slice, save_every, circs_per_node, out_dir,
     max_fetch_time, tor_dir, sleep_start, sleep_stop, min_streams,
     pid_file_name, db_url, only_unmeasured,
     min_unmeasured) = read_config(argv[1])
    plog("NOTICE", "Child Process Spawned...")

    # make sure necessary out_dir directory exists
    path = os.getcwd() + '/' + out_dir
    if not os.path.exists(path):
        os.makedirs(path)

    if pid_file_name:
        pidfd = file(pid_file_name, 'w')
        pidfd.write('%d\n' % os.getpid())
        pidfd.close()

        slice_num = int(argv[2])

        try:
            (c, hdlr) = setup_handler(out_dir,
                                      tor_dir + "/control_auth_cookie")
        except Exception, e:
            traceback.print_exc()
            plog("WARN", "Can't connect to Tor: " + str(e))
            sys.exit(STOP_PCT_REACHED)

        if db_url:
            hdlr.attach_sql_listener(db_url)
            sql_file = None
        else:
            plog("INFO", "db_url not found in config. Defaulting to sqlite")
            sql_file = os.getcwd() + '/' + out_dir + '/bwauthority.sqlite'
            #hdlr.attach_sql_listener('sqlite:///'+sql_file)
            hdlr.attach_sql_listener('sqlite://')

        # set SOCKS proxy
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host,
                              TorUtil.tor_port)
        socket.socket = socks.socksocket
        plog(
            "INFO", "Set socks proxy to " + TorUtil.tor_host + ":" +
            str(TorUtil.tor_port))

        hdlr.schedule_selmgr(
            lambda s: setattr(s, "only_unmeasured", only_unmeasured))

        hdlr.wait_for_consensus()

        # Now that we have the consensus, we shouldn't need to listen
        # for new consensus events.
        c.set_events([
            TorCtl.EVENT_TYPE.STREAM, TorCtl.EVENT_TYPE.BW,
            TorCtl.EVENT_TYPE.CIRC, TorCtl.EVENT_TYPE.STREAM_BW
        ], True)

        # We should go to sleep if there are less than 5 unmeasured nodes after
        # consensus update
        if min_unmeasured and hdlr.get_unmeasured() < min_unmeasured:
            plog(
                "NOTICE",
                "Less than " + str(min_unmeasured) + " unmeasured nodes (" +
                str(hdlr.get_unmeasured()) + "). Sleeping for a bit")
            time.sleep(3600)  # Until next consensus arrives
            plog(
                "NOTICE",
                "Woke up from waiting for more unmeasured nodes.  Requesting slice restart."
            )
            sys.exit(RESTART_SLICE)

        pct_step = hdlr.rank_to_percent(nodes_per_slice)
        plog("INFO", "Percent per slice is: " + str(pct_step))
        if pct_step > 100: pct_step = 100

        # check to see if we are done
        if (slice_num * pct_step + start_pct > stop_pct):
            plog(
                'NOTICE', 'Child stop point %s reached. Exiting with %s' %
                (stop_pct, STOP_PCT_REACHED))
            sys.exit(STOP_PCT_REACHED)

        successful = speedrace(hdlr, slice_num * pct_step + start_pct,
                               (slice_num + 1) * pct_step + start_pct,
                               circs_per_node, save_every, out_dir,
                               max_fetch_time, sleep_start, sleep_stop,
                               slice_num, min_streams, sql_file,
                               only_unmeasured)

        # For debugging memory leak..
        #TorUtil.dump_class_ref_counts(referrer_depth=1)

        # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
        # And record circ failure rates..

        #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every,
        #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)

        # XXX: Hack this to return a codelen double the slice size on failure?
        plog("INFO", "Slice success count: " + str(successful))
        if successful == 0:
            plog("WARN", "Slice success count was ZERO!")

        sys.exit(0)
예제 #18
0
def main(argv):
    plog("DEBUG", "Child Process Spawning...")
    TorUtil.read_config(argv[1])
    (start_pct, stop_pct, nodes_per_slice, save_every, circs_per_node, out_dir,
     max_fetch_time, tor_dir, sleep_start, sleep_stop, min_streams,
     pid_file_name, db_url) = read_config(argv[1])

    # make sure necessary out_dir directory exists
    path = os.getcwd() + '/' + out_dir
    if not os.path.exists(path):
        os.makedirs(path)

    if pid_file_name:
        pidfd = file(pid_file_name, 'w')
        pidfd.write('%d\n' % os.getpid())
        pidfd.close()

        slice_num = int(argv[2])

        try:
            (c, hdlr) = setup_handler(out_dir,
                                      tor_dir + "/control_auth_cookie")
        except Exception, e:
            traceback.print_exc()
            plog("WARN", "Can't connect to Tor: " + str(e))
            sys.exit(STOP_PCT_REACHED)

        if db_url:
            hdlr.attach_sql_listener(db_url)
            sql_file = None
        else:
            plog("INFO", "db_url not found in config. Defaulting to sqlite")
            sql_file = os.getcwd() + '/' + out_dir + '/bwauthority.sqlite'
            #hdlr.attach_sql_listener('sqlite:///'+sql_file)
            hdlr.attach_sql_listener('sqlite://')

        # set SOCKS proxy
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, TorUtil.tor_host,
                              TorUtil.tor_port)
        socket.socket = socks.socksocket
        plog(
            "INFO", "Set socks proxy to " + TorUtil.tor_host + ":" +
            str(TorUtil.tor_port))

        hdlr.wait_for_consensus()

        pct_step = hdlr.rank_to_percent(nodes_per_slice)

        # check to see if we are done
        if (slice_num * pct_step + start_pct > stop_pct):
            plog(
                'INFO', 'stop_pct: %s reached. Exiting with %s' %
                (stop_pct, STOP_PCT_REACHED))
            sys.exit(STOP_PCT_REACHED)

        plog("DEBUG", "Starting slice number %s" % slice_num)
        speedrace(hdlr, slice_num * pct_step + start_pct,
                  (slice_num + 1) * pct_step + start_pct, circs_per_node,
                  save_every, out_dir, max_fetch_time, sleep_start, sleep_stop,
                  slice_num, min_streams, sql_file)

        # For debugging memory leak..
        #TorUtil.dump_class_ref_counts(referrer_depth=1)

        # TODO: Change pathlen to 3 and kill exit+ConserveExit restrictions
        # And record circ failure rates..

        #circ_measure(hdlr, pct, pct+pct_step, circs_per_node, save_every,
        #  out_dir, max_fetch_time, sleep_start, sleep_stop, slice_num, sql_file)
        sys.exit(0)
예제 #19
0
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.loglevel = "NOTICE"

  (branch, head) = TorUtil.get_git_version(PATH_TO_TORFLOW_REPO)
  plog('INFO', 'TorFlow Version: %s' % branch+' '+head)
  (branch, head) = TorUtil.get_git_version(PATH_TO_TORCTL_REPO)
  plog('INFO', 'TorCtl Version: %s' % branch+' '+head)

  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: int(y.bandwidth/10000.0 - x.bandwidth/10000.0))
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  cs_junk = ConsensusJunk(c)

  # TODO: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  sqlf = f.replace("bws-", "sql-")
                  plog("INFO", "Removing old file "+f+" and "+sqlf)
                  os.remove(sr+"/"+f)
                  try:
                    os.remove(sr+"/"+sqlf)
                  except:
                    pass # In some cases the sql file may not exist
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()
예제 #20
0
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.logfile = "data/aggregate-debug.log"

  (branch, head) = TorUtil.get_git_version(PATH_TO_TORFLOW_REPO)
  plog('NOTICE', 'TorFlow Version: %s' % branch+' '+head)
  (branch, head) = TorUtil.get_git_version(PATH_TO_TORCTL_REPO)
  plog('NOTICE', 'TorCtl Version: %s' % branch+' '+head)

  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor.1/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: int(y.bandwidth/10000.0 - x.bandwidth/10000.0))
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  cs_junk = ConsensusJunk(c)

  # TODO: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  sqlf = f.replace("bws-", "sql-")
                  plog("INFO", "Removing old file "+f+" and "+sqlf)
                  os.remove(sr+"/"+f)
                  try:
                    os.remove(sr+"/"+sqlf)
                  except:
                    pass # In some cases the sql file may not exist
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()
예제 #21
0
 def new_consensus_event(self, n):
     TorCtl.ConsensusTracker.new_consensus_event(self, n)
     TorUtil.plog("INFO", "New consensus arrived. Rejoice!")
     self.used_entries = []
     self.set_entries()
예제 #22
0
파일: aggregate.py 프로젝트: aagbsn/torflow
def main(argv):
  TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg")
  TorUtil.loglevel = "NOTICE"
 
  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  s.connect((TorUtil.control_host,TorUtil.control_port))
  c = TorCtl.Connection(s)
  c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0))
  c.authenticate_cookie(file(argv[1]+"/tor/control_auth_cookie",
                         "r"))

  ns_list = c.get_network_status()
  for n in ns_list:
    if n.bandwidth == None: n.bandwidth = -1
  ns_list.sort(lambda x, y: y.bandwidth - x.bandwidth)
  for n in ns_list:
    if n.bandwidth == -1: n.bandwidth = None
  got_ns_bw = False
  max_rank = len(ns_list)

  global BETA
  sorted_rlist = None
  if BETA == -1:
    # Compute beta based on the upgrade rate for nsbw obeying routers
    # (karsten's data show this slightly underestimates client upgrade rate)
    nsbw_yes = VersionRangeRestriction("0.2.1.17")
    sorted_rlist = c.read_routers(ns_list)

    nsbw_cnt = 0
    non_nsbw_cnt = 0
    for r in sorted_rlist:
      if nsbw_yes.r_is_ok(r): nsbw_cnt += 1
      else: non_nsbw_cnt += 1
    BETA = float(nsbw_cnt)/(nsbw_cnt+non_nsbw_cnt)

  global GUARD_BETA
  if GUARD_BETA == -1:
    # Compute GUARD_BETA based on the upgrade rate for nsbw obeying routers
    # (karsten's data show this slightly underestimates client upgrade rate)
    guardbw_yes = NodeRestrictionList([VersionRangeRestriction("0.2.1.23"),
       NotNodeRestriction(VersionRangeRestriction("0.2.2.0", "0.2.2.6"))])

    if not sorted_rlist:
      sorted_rlist = c.read_routers(ns_list)

    guardbw_cnt = 0
    non_guardbw_cnt = 0
    for r in sorted_rlist:
      if guardbw_yes.r_is_ok(r): guardbw_cnt += 1
      else: non_guardbw_cnt += 1
    GUARD_BETA = float(guardbw_cnt)/(guardbw_cnt+non_guardbw_cnt)


  # FIXME: This is poor form.. We should subclass the Networkstatus class
  # instead of just adding members
  for i in xrange(max_rank):
    n = ns_list[i]
    n.list_rank = i
    if n.bandwidth == None:
      plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex)
    else:
      got_ns_bw = True
    n.measured = False
    prev_consensus["$"+n.idhex] = n

  if not got_ns_bw:
    # Sometimes the consensus lacks a descriptor. In that case,
    # it will skip outputting 
    plog("ERROR", "Your Tor is not providing NS w bandwidths!")
    sys.exit(0)

  # Take the most recent timestamp from each scanner 
  # and use the oldest for the timestamp of the result.
  # That way we can ensure all the scanners continue running.
  scanner_timestamps = {}
  for da in argv[1:-1]:
    # First, create a list of the most recent files in the
    # scan dirs that are recent enough
    for root, dirs, f in os.walk(da):
      for ds in dirs:
        if re.match("^scanner.[\d+]$", ds):
          newest_timestamp = 0
          for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"):
            for f in files:
              if re.search("^bws-[\S]+-done-", f):
                fp = file(sr+"/"+f, "r")
                slicenum = sr+"/"+fp.readline()
                timestamp = float(fp.readline())
                fp.close()
                # old measurements are probably
                # better than no measurements. We may not
                # measure hibernating routers for days.
                # This filter is just to remove REALLY old files
                if time.time() - timestamp > MAX_AGE:
                  plog("DEBUG", "Skipping old file "+f)
                  continue
                if timestamp > newest_timestamp:
                  newest_timestamp = timestamp
                bw_files.append((slicenum, timestamp, sr+"/"+f))
                # FIXME: Can we kill this?
                if slicenum not in timestamps or \
                     timestamps[slicenum] < timestamp:
                  timestamps[slicenum] = timestamp
          scanner_timestamps[ds] = newest_timestamp

  # Need to only use most recent slice-file for each node..
  for (s,t,f) in bw_files:
    fp = file(f, "r")
    fp.readline() # slicenum
    fp.readline() # timestamp
    for l in fp.readlines():
      try:
        line = Line(l,s,t)
        if line.idhex not in nodes:
          n = Node()
          nodes[line.idhex] = n
        else:
          n = nodes[line.idhex]
        n.add_line(line)
      except ValueError,e:
        plog("NOTICE", "Conversion error "+str(e)+" at "+l)
      except AttributeError, e:
        plog("NOTICE", "Slice file format error "+str(e)+" at "+l)
      except Exception, e:
        plog("WARN", "Unknown slice parse error "+str(e)+" at "+l)
        traceback.print_exc()
예제 #23
0
 def new_consensus_event(self, n):
   TorCtl.ConsensusTracker.new_consensus_event(self, n)
   TorUtil.plog("INFO", "New consensus arrived. Rejoice!")
   self.used_entries = []
   self.set_entries()