示例#1
0
def import_descriptors(wherefrom, persistence_file):
  if not gc.isenabled():
    gc.enable()

  reader = DescriptorReader(wherefrom, persistence_path=persistence_file)
  log('info', 'recalled %d files processed from my source(s) provided' % len(reader.get_processed_files()))
  with reader:
    i = 0
    for i, desc in enumerate(reader): # 'enumerate' might be memory-inefficient here
      desc_model = Descriptor(
        descriptor=desc._path.split('/')[-1], # do we have to be so ugly, though?
        nickname=desc.nickname,
        address=desc.address,
        orport=desc.or_port,
        dirport=desc.dir_port,
        fingerprint=desc.fingerprint,
        platform=desc.platform,
        published=desc.published,
        uptime=desc.uptime)
      db_session.add(desc_model)
      if (i+1) % 100000 == 0: # total could be e.g. 323715 descriptors
                              # (find ../../data/server-descriptors-2013-02 -type f | wc -l)
        print 'committing..',
        db_session.commit() # committed ORM objects can be garbage collected, which is important
                            # when dealing with these amounts of rows
        print 'done. collecting garbage..',
        gc.collect()
        print 'done.'
  log('info', 'iterated over %d files' % i)
  db_session.commit()
示例#2
0
        def tutorial_example():
            from stem.descriptor.reader import DescriptorReader

            with DescriptorReader(
                ["/home/atagar/server-descriptors-2013-03.tar"]) as reader:
                for desc in reader:
                    print "found relay %s (%s)" % (desc.nickname,
                                                   desc.fingerprint)
示例#3
0
class VerboseDescriptorReader(object):
    def __init__(self, targets, *args, **kwargs):
        self._targets = targets
        self._entries_seen = 0
        self._reader = DescriptorReader(targets, *args, **kwargs)

    def __iter__(self):
        for d in self._reader:
            self._entries_seen += 1
            if self._entries_seen % 25 == 0:
                print >>sys.stderr, "%d documents parsed…\r" % (self._entries_seen,),
            yield d

    def __enter__(self):
        self._reader.start()
        return self

    def __exit__(self, exit_type, value, traceback):
        self._reader.stop()
示例#4
0
文件: util.py 项目: zackw/exitmap
def relay_in_consensus(fingerprint, cached_consensus_path):
    """
    Check if a relay is part of the consensus.

    If the relay identified by `fingerprint' is part of the given `consensus',
    True is returned.  If not, False is returned.
    """

    fingerprint = fingerprint.upper()

    with DescriptorReader(cached_consensus_path) as reader:
        for descriptor in reader:
            if descriptor.fingerprint == fingerprint:
                return True

    return False
示例#5
0
 def __init__(self, targets, *args, **kwargs):
     self._targets = targets
     self._entries_seen = 0
     self._reader = DescriptorReader(targets, *args, **kwargs)
示例#6
0
            hosts = yaml.load(f.read())
    except IOError:
        print("[!] Could not read hosts file.")
        sys.exit(-1)
#    except yaml.ReaderError:
#        print("[!] Error parsing YAML file.")
#        sys.exit(-1)

    fingerprints = []
    for hostname in hosts:
        fingerprints.extend(hosts[hostname])

    print("[i] %i fingerprints found." % len(fingerprints))

    descriptors = {}
    with DescriptorReader(args.descriptors) as reader:
        for descriptor in reader:
            if descriptor.fingerprint in fingerprints:
                try:
                    descriptor_data = {
                        'nickname':
                        descriptor.nickname,
                        'read_history_end':
                        descriptor.read_history_end.strftime("%s"),
                        'read_history_interval':
                        descriptor.read_history_interval,
                        'read_history_values':
                        descriptor.read_history_values,
                        'write_history_end':
                        descriptor.write_history_end.strftime("%s"),
                        'write_history_interval':
def unpacking_and_database_creation(database_name, file_name, database_csv_name):
    debug_level = 2
    con = None
    table_name = 'server_descriptors'
    print("Processing file: " + file_name)

    # Creating the sqlite3 database and the view
    if not os.path.exists(database_name):
        if debug_level >= 2:
            print("Creating the database and connecting to it ..")
        con = lite.connect(database_name)
        sql_query = '\
    	CREATE TABLE ' + table_name + '\
    	(\
    	address 			TEXT,\
    	family 				TEXT,\
    	fingerprint 		TEXT,\
    	nickname 			TEXT,\
    	published 			TEXT\
    	);'
        if debug_level >= 3:
            print(".. with the following sql query: ")
            print(sql_query)
        try:
            con.execute(sql_query)
        except lite.Error as e:
            print("Error %s:" % e.args[0])
            sys.exit(1)
        if debug_level >= 2:
            print(".. finished.")
        # Create the view familyview
        if debug_level >= 2:
            print("Creating the view 'familyview' ..")
        sql_query_familyview = "CREATE VIEW familyview AS SELECT nickname || '@' || address || ' ' || published AS hash, * FROM server_descriptors WHERE \
    								 family != 'set([])' AND hash != 'nickname@address published'"
        if debug_level >= 3:
            print(".. with the following sql query .. ")
            print(sql_query_familyview)
        try:
            con.execute(sql_query_familyview)
        except lite.Error as e:
            print("Error %s:" % e.args[0])
            sys.exit(1)
        if debug_level >= 2:
            print(".. finished.")

    else:
        if debug_level >= 2:
            print("Connecting to the database ..")
        con = lite.connect(database_name)
        if debug_level >= 2:
            print(".. finished.")

    if os.path.exists(file_name) and not os.path.exists(database_csv_name):
        with DescriptorReader(file_name) as reader:
            with open(database_csv_name, 'a') as csv_file:
                csv_file.write("address,family,fingerprint,nickname,published\n")
                values_list = []
                insert_string = 'INSERT INTO ' + table_name + ' VALUES (?,?,?,?,?)'
                # i = 0
                # last_i = 0
                if debug_level >= 2:
                    print("Reading the server descriptors ..")
                for desc in reader:
                    con.execute(insert_string, (
                        str(desc.address), str(desc.family), str(desc.fingerprint), str(desc.nickname),
                        str(desc.published)))

    # finally:
    if con:
        if debug_level >= 2:
            print("Closing the database ..")
        con.commit()
        con.close()
        if debug_level >= 2:
            print(".. finished.")
    if os.path.exists(database_csv_name):
        os.remove(database_csv_name)
示例#8
0
from stem.descriptor.reader import DescriptorReader

with DescriptorReader(["/home/atagar/server-descriptors-2013-03.tar"]) as reader:
  for desc in reader:
    print("found relay %s (%s)" % (desc.nickname, desc.fingerprint))
示例#9
0
		con.execute(sql_query_familyview)
	except lite.Error, e:
		print "Error %s:" % e.args[0]
		sys.exit(1)
	if debug_level >= 2:
		print ".. finished."
	
else:
	if debug_level >= 2:
		print "Connecting to the database .."
	con = lite.connect(database_name)
	if debug_level >= 2:
		print ".. finished."

if os.path.exists(file_name_tar) and not os.path.exists(database_csv_name):
	with DescriptorReader(file_name_tar) as reader:
		with open(database_csv_name, 'a') as csv_file:
			csv_file.write("address,family,fingerprint,nickname,published\n")
			values_list = []
			insert_string = 'INSERT INTO ' + table_name + ' VALUES (?,?,?,?,?)'
			# i = 0
			# last_i = 0
			if debug_level >= 2:
				print "Reading the server descriptors .."
			for desc in reader:
				con.execute(insert_string, (str(desc.address), str(desc.family), str(desc.fingerprint), str(desc.nickname), str(desc.published)))
				
# finally:
if con:
	if debug_level >= 2:
		print "Closing the database .."
        pubKey = pubKey.replace('-----BEGIN RSA PUBLIC KEY-----', '')
        pubKey = pubKey.replace('-----END RSA PUBLIC KEY-----', '')
        metadata['key_type'] = 'signing key'
        rsa_key_data[pubKey] = metadata

    onionKey = desc_r.onion_key
    if onionKey is not None:
        onionKey = onionKey.replace('\n', '')
        onionKey = onionKey.replace('-----BEGIN RSA PUBLIC KEY-----', '')
        onionKey = onionKey.replace('-----END RSA PUBLIC KEY-----', '')
        onion_metadata['key_type'] = 'onion key'
        rsa_key_data[onionKey] = onion_metadata

# Get RSA keys from archived server descriptors.
descriptors = ['archived_descriptors/']
with DescriptorReader(descriptors) as reader:
    for desc_a in reader:
        metadata = {}
        if desc_a.nickname is not None:
            metadata['nickname'] = desc_a.nickname
        if desc_a.fingerprint is not None:
            metadata['fingerprint'] = desc_a.fingerprint
        if desc_a.published is not None:
            metadata['date'] = desc_a.published
        if desc_a.address is not None:
            metadata['ip4_address'] = desc_a.address
        if desc_a.or_port is not None:
            metadata['port'] = desc_a.or_port
        if desc_a.platform is not None:
            metadata['platform'] = desc_a.platform
        if desc_a.contact is not None: