def _save_json(self, node): '''export the catalog in json''' exp = JsonExporter(indent=2, sort_keys=True) with open(self.path, 'w') as f: exp.write(node, f) self._debug('Catalog saved to json \"{}\"'.format(self.path)) return True
def main(ioc_file, output_dir): with open(ioc_file) as csvfile: iocreader = csv.reader(csvfile, delimiter=',') for row in iocreader: root = AnyNode(id=row[1], type=row[0]) logger.info('=========Start to explore IOC: %s', root.id) ioc_list = build_ioc_relation(root) timestamp = datetime.now().strftime('%Y%m%d%H%M') query_depth = config.get('general', 'depth') txtfile = output_dir + root.id + '_depth_' + query_depth + '_' + timestamp + '.txt' file = open(txtfile, "w") file.write(str(RenderTree(root))) file.close() logger.info('Export IOCs to TXT file: %s', txtfile) jsonfile = output_dir + root.id + '_depth_' + query_depth + '_' + timestamp + '.json' file = open(jsonfile, "w") exporter = JsonExporter(indent=2, sort_keys=False) exporter.write(root, file) file.close() logger.info('Export IOCs to JSON file: %s', jsonfile) logger.info('=========Done exploration for IOC: %s', root.id) return
def test_tree(): from anytree import Node, RenderTree udo = Node("Udo") marc = Node("Marc", parent=udo) lian = Node("Lian", parent=marc) dan = Node("Dan", parent=udo) jet = Node("Jet", parent=dan) jan = Node("Jan", parent=dan) joe = Node("Joe", parent=dan) print("CHILD") print(dan.children) print(dan.name) print(udo) Node('/Udo') print(joe) Node('/Udo/Dan/Joe') for pre, fill, node in RenderTree(udo): print("%s%s" % (pre, node.name)) from anytree.exporter import JsonExporter exporter = JsonExporter(indent=2, sort_keys=True) print("EXPORT") print(exporter.export(udo))
def ast_anytree_to_json(root_node): """ Serializes an anytree to json format and returns it. """ from anytree.exporter import JsonExporter exporter = JsonExporter(indent=2, sort_keys=True) return exporter.export(root_node)
def main(json_filepath, out_dot_path, htmlTitle): """IO.""" # Read JSON with open(json_filepath) as data_file: data = json.load(data_file) # Get edges edges, root = tree2graph(data) exporter = JsonExporter(indent=1, sort_keys=True) tempFile = out_dot_path + '.tmp' f = open(tempFile, 'w') print(exporter.export(root), file=f) f.close() find = "'" replace = " " jsonTreeString = '' for line in fileinput.input(files=tempFile): line = re.sub(find, replace, line.rstrip()) jsonTreeString = jsonTreeString + line os.remove(tempFile) body = '<body onload="onLoadDocument();">' body = body + '<h1>' + htmlTitle + '</h1>' body = body + ' <input id="vdspdata" type="hidden" value=\'' + jsonTreeString + '\' />' body = body + '</body>' src = 'treeViewer.html' filepath = pkg_resources.resource_filename(__name__, src) dst = out_dot_path copyfile(filepath, dst) with open(dst, "a") as myfile: myfile.write(body)
def json_export(self): exporter = JsonExporter(indent=2, sort_keys=True, dictexporter=None) filename = "tree.json" filehandle = open(filename, 'w') filehandle.write( exporter.export(globals()[str(self.starting_board_state.fen()) + str(0)]))
def create(tweetsFile): # A node should have an nodeNr (starting on 0), idStr(tweet id), parent. propTree = PropTree() # an instance of a tree nodeNr = 0 # to be ordered by time unknownNodeNr = 0 requestCounter = 1 posts = [] print(tweetsFile) for line in open(tweetsFile, 'r'): posts.append(json.loads(line)) # make a list of json arrays print("This file contains " + str(len(posts)) + " posts.") for post in posts: post[ 'tweet_nr'] = nodeNr # adds a new key, which is the id for a post when in the tree (does this do anything really? Should we include post as a JSON in the AnyNode object?) idStr = post['id_str'] idUser = post['user']['id_str'] if 'retweeted_status' in post: parentIdStr = post['retweeted_status']['id_str'] parentIdUser = post['retweeted_status']['user']['id_str'] elif 'quoted_status' in post: parentIdStr = post['quoted_status']['id_str'] parentIdUser = post['quoted_status']['user']['id_str'] if 'retweeted_status' in post or 'quoted_status' in post: # make retweet or quote node parentNode = getFriendInTree(propTree, idUser, parentIdStr, requestCounter, len(posts)) requestCounter += 1 if parentNode is None: # if this node has no parent we want to artificially create one parentNodeNr = "ex" + str( unknownNodeNr ) # artificial parents can be distinguished by an ex in their id parentNode = AnyNode(nodeNr=parentNodeNr, idStr=parentIdStr, idUser=parentIdUser) propTree.addRoot(parentNode) unknownNodeNr += 1 AnyNode(nodeNr=nodeNr, idStr=idStr, idUser=idUser, parent=parentNode) else: # this is original content reference = AnyNode(nodeNr=nodeNr, idStr=idStr, idUser=idUser) propTree.addRoot(reference) nodeNr += 1 propTree.updatePosts(posts) exporter = JsonExporter(indent=2, sort_keys=True) open('./data/tree/trees/' + tweetsFile[30:-4] + '.txt', 'w').close savedFile = open('./data/tree/trees/' + tweetsFile[30:-4] + '.txt', 'r+') for root in propTree.roots: exporter.write(root, savedFile) savedFile.write("&\n") savedFile.close() propTree.makeNodeTree() writeToFile(propTree) return propTree
def test_json_exporter(): """Json Exporter.""" root = AnyNode(id="root") s0 = AnyNode(id="sub0", parent=root) AnyNode(id="sub0B", parent=s0) AnyNode(id="sub0A", parent=s0) s1 = AnyNode(id="sub1", parent=root) AnyNode(id="sub1A", parent=s1) AnyNode(id="sub1B", parent=s1) s1c = AnyNode(id="sub1C", parent=s1) AnyNode(id="sub1Ca", parent=s1c) lines = [ '{', ' "children": [', ' {', ' "children": [', ' {', ' "id": "sub0B"', ' },', ' {', ' "id": "sub0A"', ' }', ' ],', ' "id": "sub0"', ' },', ' {', ' "children": [', ' {', ' "id": "sub1A"', ' },', ' {', ' "id": "sub1B"', ' },', ' {', ' "children": [', ' {', ' "id": "sub1Ca"', ' }', ' ],', ' "id": "sub1C"', ' }', ' ],', ' "id": "sub1"', ' }', ' ],', ' "id": "root"', '}' ] exporter = JsonExporter(indent=2, sort_keys=True) exported = exporter.export(root).split("\n") exported = [e.rstrip() for e in exported] # just a fix for a strange py2x behavior. eq_(exported, lines) with NamedTemporaryFile(mode="w+") as ref: with NamedTemporaryFile(mode="w+") as gen: ref.write("\n".join(lines)) exporter.write(root, gen) assert filecmp.cmp(ref.name, gen.name)
def save(self): extension = ".rpt" dir_name = "../data/policies/" file_name = dir_name + self.agent_name + extension exporter = JsonExporter(indent=2, sort_keys=True) with open(file_name, 'w') as f: json.dump(exporter.export(self.tree), f) f.close()
def write_arvore_no_arquivo(arvore, arquivo): """ Escreve a @arvore no @arquivo """ exporter = JsonExporter(indent=2, sort_keys=True) json = exporter.export(arvore) with open(arquivo, 'w') as obj_file: obj_file.write(json)
def on_stop(self): if self.current.button: self.bank.add_note(self.current.note, self.current.text) self.bank.save_notes() stor = self.get_current_storage() exp = TreeExporter(indent=2, sort_keys=True) self.bank.save_tree(exp.export(stor.root_folder))
def __init__(self, output_folder, use_compression, process_nr=0): self.output_folder = output_folder self.use_compression = use_compression self.df = pd.DataFrame(columns=['id', 'AST']) self.first_save = True self.process_nr = process_nr # Create exporter to export the tree to JSON format self.exporter = JsonExporter(indent=2)
def export_tree(self, filename): exporter_dict = DictExporter(dictcls=OrderedDict, attriter=sorted) self.export_cst_dict = exporter_dict.export(self.cst) exporter_json = JsonExporter(indent=2, sort_keys=True) with open(filename, 'w') as filehandle: exporter_json.write(self.cst, filehandle) print('CST tree export to JSON successful!') return
def _serialize_ontology(root, filename=None): """Serializes an ontology given by its root to a JSON file. If no output filename is given, return the serialized as string. """ exporter = JsonExporter(indent=2, sort_keys=True) if filename: exporter.write(root, filename) else: return exporter.export(root)
def main(): """Make a full tree from the default targets, and export it in graphviz and JSON form.""" tree = make_tree() DotExporter(tree).to_dotfile('full_tree.dot') with open('full_tree.json', 'w') as f: exporter = JsonExporter(indent=4, sort_keys=True) exporter.write(tree, f) print(f'node count: {len(tree.descendants)}')
def _serialize_ontologies(roots, filename): """Serializes ontologies given by their roots to a JSON file. If no output filename is given, return the serialized as string. """ exporter = JsonExporter(indent=2, sort_keys=True) forest = [] for root in roots: forest.append(exporter.export(root)) if not filename: return forest with open(filename, 'w') as f: json.dump(forest, f, indent=2, sort_keys=True)
def prepare(self, query): if self.saveimage and not self.imagemetaid1: raise ProcessorError('You cannot save to image files without setting at least imagemetaid1.') if not (self.column_token, self.column_index and self.column_head and self.column_relation): raise ProcessorError('You have to set the column indices for the dependency information.') self.has_attributes = True if len(query.attributes) > 1 else False self.rex = re.compile('^<.+>$') if self.savejson: self.exporter = JsonExporter(indent = 2, sort_keys = False) self.writer = open(self.fileprefix + '.json', 'w')
def load_serialize_evidence_dict(): global server_evidence_dict print("Loading evidence dict...", end='') with open(evidence_dict_path, 'rb') as fd: evidence_dict = pickle.load(fd) print("{} evidences".format(len(evidence_dict))) print("Serializing evidence dict...") exporter = JsonExporter() for key in tqdm(evidence_dict): evidence = evidence_dict[key] evidence['tree'] = exporter.export(evidence['tree']) evidence_dict[key] = json.dumps(evidence) server_evidence_dict = evidence_dict
def export_conversation_trees_to_db(user_id): root_id_list = [] root_nodes_list = [] print("Selecting all tweets from id: {}...".format(user_id)) conversation_query = collection.find( {'user.id': user_id, 'in_reply_to_user_id': {"$ne": None}}) # Mentioning somebody conversation_query2 = collection.find({'in_reply_to_user_id': user_id}) # Mentioned by somebody print("SET A: {} B: {} ".format(conversation_query.count(), conversation_query2.count())) for i, tweet in enumerate(conversation_query): t = find_root_tweet(tweet) if t['id'] not in process_set: process_set.add(t['id']) root_id_list.append(t) if i % 1000 == 0: print("Processing mentions {}".format(i)) for i, tweet in enumerate(conversation_query2): t = find_root_tweet(tweet) if t['id'] not in process_set: process_set.add(t['id']) root_id_list.append(t) if i % 1000 == 0: print("Processing mentioned {}".format(i)) print("SET A: {} B: {} UNION: {}".format(conversation_query.count(), conversation_query2.count(), len(process_set))) for i, root_tweet in enumerate(root_id_list): root_nodes = AnyNode(id=root_tweet['id'], name=root_tweet['user']['name'], text=root_tweet['text']) root_nodes.id = root_tweet['id'] # Not sure if why I need to state this two times. if i % 1000 == 0: pprint.pprint("Populating: {}".format(i)) populate_node(root_nodes) root_nodes_list.append(root_nodes) pprint.pprint('Total sum of root nodes: {}'.format(len(root_id_list))) # DotExporter(root_nodes_list[0]).to_picture("test.png") count = 0 for i, tree in enumerate(root_nodes_list): count = count + len(tree.descendants) + 1 exporter = JsonExporter(indent=2, sort_keys=True) json_tree = exporter.export(tree) collection_trees.insert_one(json.loads(json_tree)) print("Total conversation count: {}".format(count)) print("Average conversation length: {}".format(count / (len(root_id_list))))
def write_outputs(args, logger, tree): """Write outputs""" logger.info("Begin writing outputs") # Export JSON using anytree with open( "%s/%s.json" % (args.output_dir, constants.HIERARCHICAL_FDR_OUTPUTS), "w") as output_file: JsonExporter(indent=2).write(tree, output_file) # Write CSV with additional column for rejected or not with open("%s/%s.csv" % (args.output_dir, constants.HIERARCHICAL_FDR_OUTPUTS), "w", newline="") as output_file: writer = csv.writer(output_file) writer.writerow([ constants.NODE_NAME, constants.PARENT_NAME, constants.PVALUE_LOSSES, constants.REJECTED_STATUS, constants.ADJUSTED_PVALUE ]) for node in anytree.LevelOrderIter(tree): parent_name = "" if node.parent: parent_name = node.parent.name writer.writerow([ node.name, parent_name, node.pvalue, int(node.rejected), node.adjusted_pvalue ]) # Generate tree of rejected hypotheses with colour grading based on adjusted p-value generate_tree_of_rejected_hypotheses(args, logger, tree) logger.info("End writing outputs")
class AstFileHandler: def __init__(self, output_folder, use_compression, process_nr=0): self.output_folder = output_folder self.use_compression = use_compression self.df = pd.DataFrame(columns=['id', 'AST']) self.first_save = True self.process_nr = process_nr # Create exporter to export the tree to JSON format self.exporter = JsonExporter(indent=2) def add_ast(self, ast, id): output = self.exporter.export(ast) self.df = self.df.append([{ 'id': id, 'AST': output }], ignore_index=True) def save(self): if self.first_save: self.df.to_csv( f'{self.output_folder}asts{self.process_nr}.csv{".bz2" if self.use_compression else ""}', index=False) self.first_save = False else: self.df.to_csv( f'{self.output_folder}asts{self.process_nr}.csv{".bz2" if self.use_compression else ""}', header=False, index=False, mode='a') self.df = pd.DataFrame(columns=['id', 'AST'])
def to_json(self, sink= None, **kwargs): """ writes region tree info to json Arguments: sink (str or None): file to save to. if None, will return json object. kwargs: addtional arguments to pass to anytree.exporter.jsonexporter.JsonExporter and json.dumps. """ exporter = JsonExporter(indent=2, **kwargs) if sink: with open(sink, 'w') as outfile: exporter.write(self.tree_root, outfile) return sink else: data = exporter.export(self.tree_root) return data
def export(tree, filename, ext='json', **kwargs): parent = _get_printable_tree(tree) if ext == 'json': with io.open(f'{filename}.json', mode='w+', encoding='utf-8') as fp: JsonExporter(**kwargs).write(parent, fp) else: DotExporter(parent, **kwargs).to_picture(f'{filename}.{ext}')
def convert_to_json(input="GFOP.owl", output="GFOP.json"): # create a root to bundle everything root: Node = Node("GFOP") nodes = {} # read owl file and cache all nodes in a dict{name, node} obo = Ontology(input) for term in obo.terms(): id = term.id name = term.name # find parents in distance 1 (exclude self) parent_terms = term.superclasses(with_self=False, distance=1).to_set() if parent_terms is None or len(parent_terms) == 0: # create root node nodes[name] = Node(name, id=id) else: # currently only uses one parent parent = parent_terms.pop() nodes[name] = Node(name, id=id, parent_id=parent.id, parent_name=parent.name) # link all nodes to their parents for key, node in nodes.items(): if key is not root.name: try: # find parent in cached nodes and set to node node.parent = nodes[node.parent_name] except AttributeError: # no parent - add to root node.parent = root # generate json string exporter = JsonExporter(indent=2, sort_keys=True) json = exporter.export(root) # print json and tree for debugging print(json) for pre, _, node in RenderTree(root): print("%s%s" % (pre, node.name)) # export to json file print("Writing to {}".format(output)) with open(output, "w") as file: print(json, file=file)
def convert_to_json(input="canopus_classyfire/classyfire.json", output="canopus_classyfire/classyfire_ontology.json"): import json # create a root to bundle everything root: Node nodes = {} # read owl file and cache all nodes in a dict{name, node} with open(input) as json_file: data = json.load(json_file) for term in data: id = term["chemont_id"] name = term["name"] parent_id = term["parent_chemont_id"] if parent_id == None or parent_id == "null": # create root node root = Node(name, id=id) nodes[id] = root else: # currently only uses one parent nodes[id] = Node(name, id=id, parent_id=parent_id) # link all nodes to their parents for key, node in nodes.items(): if key is not root.id: try: # find parent in cached nodes and set to node node.parent = nodes[node.parent_id] except AttributeError as ex: print(ex) raise ex # generate json string exporter = JsonExporter(indent=2, sort_keys=True) json = exporter.export(root) # print json and tree for debugging print(json) for pre, _, node in RenderTree(root): print("%s%s" % (pre, node.name)) # export to json file print("Writing to {}".format(output)) with open(output, "w") as file: print(json, file=file)
def build_tree(root): for node in PostOrderIter(root): node_status = node.config.is_root node.config.is_root = True node.name = str(node.config) exporter = JsonExporter(sort_keys=True) node_json = exporter.export(node).encode('utf-8') hash = hashlib.md5(node_json).hexdigest() node.config.set_hash(hash) if node.config.is_run: save_info(node) node.config.is_root = node_status node.name = str(node.config)
def createJson(): os.chdir(os.path.dirname(__file__)) os.chdir("..") os.chdir("data") philFile = open('PhilpapersTaxonomy.txt', 'r') nodes = {"1":Node("root")} #for every line in our extracted philpapers taxonomy, make a new node for our tree for line in reader(philFile): #get just the integers from our ID (get rid of spaces, commas, etc...) though we keep them as strings currID=''.join(filter(str.isdigit, line[1])) #create N nodes. the key for each node is the ID from philFile, meaning that we can #access any arbitrary category using its ID nodes[currID]=(Node("temp")) #reset out position in philFile so we can restart from the front philFile.seek(0) #remember that our file is organized as follows ["name" "ID" "parent IDs" "primary parent ID"] for line in reader(philFile): #Take the ID number of the category and find the corresponding node from our dictionary #after finding said node, we set its parent node to the corresponding node from our dictionary #We also get just the integers from our ID (get rid of spaces, commas, etc...) though we keep them #as strings so that they play nice with the dictionary currID=''.join(filter(str.isdigit, line[1])) currParentID=''.join(filter(str.isdigit, line[len(line)-1])) currName = line[0] currName=str(currName) #set the current node's name to the corresponding name nodes[currID].name = currName #we then set the parent ID to the primary parent ID nodes[currID].parent=nodes[currParentID] philFile.close() #export our tree to json exporter = JsonExporter(indent=2, sort_keys=True) with open('data.json', 'w') as f: exporter.write(nodes["1"],f) data = pd.read_json("data.json") df = pd.DataFrame(data['children'])
def to_json(self, filename: str = "tree_WKO.json"): # Create anytree json export json_file = JsonExporter(indent=4).export(self.tree).replace( "NaN", "null") with open(filename, 'w') as f: f.write(json_file) return print(json_file)
def create_tree(self): d0 = MCTSNode(state=str(self.starting_board_state.fen()), wins=1, sims=3) # parent_state = self.starting_board_state.copy() self.starting_board_state.push_san("e4") d1n1 = MCTSNode(state=str(self.starting_board_state.fen()), wins=0, sims=1, parent=d0) self.starting_board_state.push_san("e5") d1n2 = MCTSNode(state=str(self.starting_board_state.fen()), wins=1, sims=2, parent=d0) self.starting_board_state.push_san("Qh5") d2n1 = MCTSNode(state=str(self.starting_board_state.fen()), wins=1, sims=1, parent=d1n2) # d1n2.sims = 3 print(RenderTree(d0)) print("\n") # backpropagation function sim_node = d2n1 while (sim_node.parent): sim_node.parent.wins += 1 sim_node.parent.sims += 1 sim_node = sim_node.parent for pre, _, node in RenderTree(d0): treestr = u"%s%s" % (pre, node.score) print(treestr.ljust(8), node.wins, node.sims) print("\n") # exporter = DictExporter(dictcls=OrderedDict, attriter=sorted) # pprint(exporter.export(d0)) # print("\n") exporter = JsonExporter(indent=2, sort_keys=True) # print(exporter.export(d0)) filename = "test.json" filehandle = open(filename, 'w') filehandle.write(exporter.export(d0))
def treeBuilder(): # Tree Builder Function def returnJsonTree(d): exporter = JsonExporter(indent=2, sort_keys=True, ensure_ascii=False) return (exporter.export(d)) exporter = JsonExporter(indent=2, sort_keys=True, ensure_ascii=False) # Create a dictionary to procedurally store node objects in libTreeDict = {} try: libTreeDict["myRoot"] = libNode("my Library Folders", fullpath="/", topDir=1) # Build first layer, user selected library locations for row in c.execute('SELECT location FROM location'): # and in the loop use the name as key when you add your instance: libTreeDict[(row[0])] = libNode( os.path.basename(Path(row[0])), fullpath=row[0], parent=libTreeDict[("myRoot")], topDir=1) # root is parent '/' and locationDir ID is '/path' for row in c.execute( '''SELECT name, location, parent_dir, checked FROM directories WHERE checked == 1''' ): # TODO dictionary insert name variable, is it too long? libTreeDict[(row[1])] = libNode( row[0], fullpath=row[1], parent=libTreeDict[ row[2]]) # parent is stored in dir database as '/path' # End points, files. Directories or locations are possible parents for row in c.execute( '''SELECT name, location, parent_dir, track_id, type, size, checked FROM library WHERE checked == 1''' ): libTreeDict[(row[1])] = libNode( row[0], row[1], parent=libTreeDict[(row[2])], track_data=[row[3], row[0], row[4], row[5] ]) # parent is stored in dir database as '/path' # returnTree return (exporter.export(libTreeDict["myRoot"])) except Exception as e: if hasattr(e, 'message'): return (getattr(e, 'message', str(e))) else: return (e)