def get_connections(db, source): conn = get_db_connection(db, source) db_list = locate_other_dbs(db, '-') conn_list = [] for fn in db_list: conn_list.append(get_db_connection(fn, source)) return conn, conn_list
def dump_db(output, db, book): conn = get_db_connection(db, source=book) curs = conn.cursor() # Create files for interesting subtypes dump_types(output, conn, book) # Create files for rules dump_rules(output, conn, book)
def load_rule_structure_documents(db, args, parent): conn = get_db_connection(db) for arg in args: fp = open(arg, 'r') struct = json.load(fp) fp.close() load_rule_structure_document(db, conn, arg, struct)
def load_url_references(db, args, parent): conn = get_db_connection(db) # Used for file loading additional indices for arg in args: print arg fp = open(arg, 'r') struct = json.load(fp) fp.close() load_url_reference(conn, struct)
def load_section_index(db, args, parent): conn = get_db_connection(db) build_default_index(db, conn) # Used for file loading additional indices for arg in args: fp = open(arg, 'r') struct = json.load(fp) fp.close() load_additional_index_entries(db, conn, arg, struct)
def load_section_index(db, args, parent): conn = get_db_connection(db) build_default_index(db, conn) # Used for file loading additional indices for arg in args: fp = open(arg, 'r') struct = json.load(fp) fp.close() load_additional_index_entries(db, conn, arg, struct) strip_urls(conn)
def load_spell_list_documents(db, args, parent): conn = get_db_connection(db) last = [] for arg in args: fp = open(arg, 'r') struct = json.load(fp) fp.close() try: load_spell_list_document(db, conn, arg, struct, parent) except ProcessLastException, pe: conn.rollback() last.append((struct, arg))
def load_documents(db, args, parent): conn = get_db_connection(db) last = [] for arg in args: fp = open(arg, 'r') struct = json.load(fp) fp.close() try: load_document(db, conn, arg, struct, parent) except ProcessLastException, pe: conn.rollback() last.append((struct, arg))
def dump_table(output_dir, db, book): extensions = load_extension_file(output_dir, book) conn = get_db_connection(db, source=book) curs = conn.cursor() section_cache = {} for table_data in extensions['tables']: url = table_data['url'] find_section(curs, **{"url": url}) table = curs.fetchone() parser = get_parser(table_data['parser']) items = parser(table, table_data) for item in items: update_sections(curs, section_cache, table_data, table, item) urls = section_cache.keys() urls.sort() output = produce_output([section_cache[url] for url in urls]) write_output(output_dir, book, output)
def load_section_index(db, args, parent): conn = get_db_connection(db) build_default_index(db, conn) strip_urls(conn)
def output_creatures(db, args, parent): # parent means nothing in this case conn = get_db_connection(db) for arg in args: output_creature(conn, arg)
def load_extensions(db, extensions): conn = get_db_connection(db) for extension in extensions: load_extension(conn, extension)
def setUp(self): self.db = os.path.join(os.getenv('DATA_DIR'), 'psrd.db') self.conn = get_db_connection(self.db) self.curs = self.conn.cursor()