def __init__(self, control_host = _CONTROL_HOST, control_port = _CONTROL_PORT, sock = None, authenticator = _AUTHENTICATOR): """Initialize the CtlUtil object, connect to TorCtl.""" self.sock = sock if not sock: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.control_host = control_host self.control_port = control_port self.authenticator = authenticator # Try to connect try: self.sock.connect((self.control_host, self.control_port)) except: errormsg = "Could not connect to Tor control port.\n" + \ "Is Tor running on %s with its control port opened on %s?" %\ (control_host, control_port) logging.error(errormsg) raise self.control = TorCtl.Connection(self.sock) # Authenticate connection self.control.authenticate(config.authenticator) # Set up log file self.control.debug(debugfile)
def listen(): """Sets up a connection to TorCtl and launches a thread to listen for new consensus events. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ctrl_host = '127.0.0.1' ctrl_port = config.control_port sock.connect((ctrl_host, ctrl_port)) ctrl = TorCtl.Connection(sock) ctrl.launch_thread(daemon=0) ctrl.authenticate(config.authenticator) ctrl.set_event_handler(MyEventHandler()) ctrl.set_events([TorCtl.EVENT_TYPE.NEWCONSENSUS]) print 'Listening for new consensus events.' logging.info('Listening for new consensus events.')
def main(argv): TorUtil.read_config(argv[1]+"/scanner.1/bwauthority.cfg") TorUtil.logfile = "data/aggregate-debug.log" (branch, head) = TorUtil.get_git_version(PATH_TO_TORFLOW_REPO) plog('NOTICE', 'TorFlow Version: %s' % branch+' '+head) (branch, head) = TorUtil.get_git_version(PATH_TO_TORCTL_REPO) plog('NOTICE', 'TorCtl Version: %s' % branch+' '+head) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((TorUtil.control_host,TorUtil.control_port)) c = TorCtl.Connection(s) c.debug(file(argv[1]+"/aggregate-control.log", "w", buffering=0)) c.authenticate_cookie(file(argv[1]+"/tor.1/control_auth_cookie", "r")) ns_list = c.get_network_status() for n in ns_list: if n.bandwidth == None: n.bandwidth = -1 ns_list.sort(lambda x, y: int(y.bandwidth/10000.0 - x.bandwidth/10000.0)) for n in ns_list: if n.bandwidth == -1: n.bandwidth = None got_ns_bw = False max_rank = len(ns_list) cs_junk = ConsensusJunk(c) # TODO: This is poor form.. We should subclass the Networkstatus class # instead of just adding members for i in xrange(max_rank): n = ns_list[i] n.list_rank = i if n.bandwidth == None: plog("NOTICE", "Your Tor is not providing NS w bandwidths for "+n.idhex) else: got_ns_bw = True n.measured = False prev_consensus["$"+n.idhex] = n if not got_ns_bw: # Sometimes the consensus lacks a descriptor. In that case, # it will skip outputting plog("ERROR", "Your Tor is not providing NS w bandwidths!") sys.exit(0) # Take the most recent timestamp from each scanner # and use the oldest for the timestamp of the result. # That way we can ensure all the scanners continue running. scanner_timestamps = {} for da in argv[1:-1]: # First, create a list of the most recent files in the # scan dirs that are recent enough for root, dirs, f in os.walk(da): for ds in dirs: if re.match("^scanner.[\d+]$", ds): newest_timestamp = 0 for sr, sd, files in os.walk(da+"/"+ds+"/scan-data"): for f in files: if re.search("^bws-[\S]+-done-", f): fp = file(sr+"/"+f, "r") slicenum = sr+"/"+fp.readline() timestamp = float(fp.readline()) fp.close() # old measurements are probably # better than no measurements. We may not # measure hibernating routers for days. # This filter is just to remove REALLY old files if time.time() - timestamp > MAX_AGE: sqlf = f.replace("bws-", "sql-") plog("INFO", "Removing old file "+f+" and "+sqlf) os.remove(sr+"/"+f) try: os.remove(sr+"/"+sqlf) except: pass # In some cases the sql file may not exist continue if timestamp > newest_timestamp: newest_timestamp = timestamp bw_files.append((slicenum, timestamp, sr+"/"+f)) scanner_timestamps[ds] = newest_timestamp # Need to only use most recent slice-file for each node.. for (s,t,f) in bw_files: fp = file(f, "r") fp.readline() # slicenum fp.readline() # timestamp for l in fp.readlines(): try: line = Line(l,s,t) if line.idhex not in nodes: n = Node() nodes[line.idhex] = n else: n = nodes[line.idhex] n.add_line(line) except ValueError,e: plog("NOTICE", "Conversion error "+str(e)+" at "+l) except AttributeError, e: plog("NOTICE", "Slice file format error "+str(e)+" at "+l) except Exception, e: plog("WARN", "Unknown slice parse error "+str(e)+" at "+l) traceback.print_exc()