def summarize_old(data, DG): size = int(len(DG.nodes())/10) print("Size = " + str(size)) centralnodes = [n[0] for n in most_degree_centrality(DG, limit = size)] suc = mostPopularSuc(data, DG, limit = size) pred = mostPopularPred(data, DG, limit = size) totalNodes = [] totalNodes.extend(centralnodes) totalNodes.extend(suc) totalNodes.extend(pred) print("Pred Nodes: ") print() for n in pred: print(str(n.id) + ": " + str(n.title)) print() print("Central Nodes: ") print() for n in centralnodes: print(str(n.id) + ": " + str(n.title)) print() print("Suc Nodes: ") print() for n in suc: print(str(n.id) + ": " + str(n.title)) p = input("plot? ") if p == "y" or p == "yes": subDG = DG.subgraph(totalNodes) labels = labeler(data) pos = nx.spring_layout(subDG) labelDict = {n:lab for n,lab in labels.items() if n in pos} nx.draw(subDG, pos, with_labels = True, font_size = 12, labels = labelDict) plt.draw() plt.show()
def writer(topic, data=0, init_description=""): from writer import writeToFile, isint #cleaning function making sure each node is properly linked def clean(data, curNode): for n in curNode.future: if n >= len(data): continue pastArray = data[n].past if n not in pastArray: data[n].flashback(curNode.id) for n in curNode.past: if n >= len(data): continue futureArray = data[n].future if n not in futureArray: data[n].flashforward(curNode.id) for n in curNode.related: if n >= len(data): continue relatedArray = data[n].related if n not in relatedArray: data[n].relate(curNode.id) file = "../data/" + topic + ".json" data = reader(topic) DG = init(data) DG = cleanPred(data, DG) max = len(data) content = "" summary = init_description print(topic + ".write: " + summary) while content != "end": content = input("") if content == "end": continue #summary function if content == "ls": print(summary) continue #enter if content == "\n": summary += "\n" continue #premature break if content == "break": break #writing the actual content summary += content + " " #premature break if content == "break": return "" #connecting the content #get title print("Title: ") title = input("") if title == "up": title = summary print("Type: ") type = [] t = "" while t != "end": t = input("") if t == "end": continue type.append(t) print("Past: ") past_temp = "" back = [] while past_temp != "end": past_temp = input("") if past_temp == "end": continue if past_temp[:2] == "ls": ls(past_temp, data) continue if past_temp == "search": se = "" while se != "end": se = input("search: ") if se == "end": continue if isint(se): get(data, DG, id=int(se)) for n in searcher(se, data): print(str(n.id) + ": " + n.title) continue if past_temp == "suc": for n in mostPopularSuc(data, DG, limit=10): print(str(n.id) + ": " + n.title) continue if past_temp == "pre": for n in mostPopularPred(data, DG, limit=10): print(str(n.id) + ": " + n.title) continue if past_temp == "cen": for n in most_degree_centrality(DG, limit=10): print(str(n[0].id) + ": " + n[0].title) continue if past_temp == "project": get_project(data, DG) continue if past_temp == "branch": branch(data, DG) continue if past_temp == "get": get(data, DG) continue if isint(past_temp): result = int(past_temp) back.append(result) else: print([ str(n.id) + ": " + str(n.title) for n in searcher(past_temp, data) ]) print(back) print("Future: ") future_temp = "" future = [] while future_temp != "end": future_temp = input("") if future_temp == "end": continue if future_temp[:2] == "ls": if future_temp[:2] == "ls": ls(future_temp, data) continue if future_temp == "search": se = "" while se != "end": se = input("search: ") if se == "end": continue if isint(se): get(data, DG, id=int(se)) for n in searcher(se, data): print(str(n.id) + ": " + n.title) continue if future_temp == "suc": for n in mostPopularSuc(data, DG, limit=10): print(str(n.id) + ": " + n.title) if future_temp == "pre": for n in mostPopularPred(data, DG, limit=10): print(str(n.id) + ": " + n.title) if future_temp == "cen": for n in most_degree_centrality(DG, limit=10): print(str(n[0].id) + ": " + n[0].title) if future_temp == "get": get(data, DG) if isint(future_temp): result = int(future_temp) future.append(result) else: print([ str(n.id) + ": " + str(n.title) for n in searcher(future_temp, data) ]) print(future) #simplify things, break things up into if you want to add related c = "" related = [] keyword = [] while c != "end": c = input("") if c == "end": continue if c == "break": break #if you want to add related if c == "related": print("Related: ") r_temp = "" while r_temp != "end": r_temp = input("") if r_temp == "end": continue if isint(r_temp): result = int(r_temp) related.append(result) else: print([ str(n.id) + ": " + str(n.title) for n in searcher(r_temp, data) ]) print(related) #if you want to add keywords if c == "keywords": print("Keywords: ") k_temp = "" while k_temp != "end": k_temp = input("") if k_temp == "end": continue keyword.append(k_temp) if c == "edit": data = edit(data) if c == "break": return "" print(title) print(type) print(summary) #CLEANING current_Node = Node(title, type, summary, keyword, back, future, related, max) clean(data, current_Node) data.append(current_Node) max += 1 #WRITING BACK TO TXT FILE writeToFile(file, data)
if start == "ls": for n in data: if isinstance(n, int): continue print(str(n.id) + ": " + str(n.title)) if start == "graph": draw_all(data, DG) #summarize data #degree centrality, pred nodes, suc nodes (page rank) if start == "summarize": size = int(len(DG.nodes()) / 10) print("Size = " + str(size)) centralnodes = [ n[0] for n in most_degree_centrality(DG, limit=size) ] suc = mostPopularSuc(data, DG, limit=size) pred = mostPopularPred(data, DG, limit=size) totalNodes = [] totalNodes.extend(centralnodes) totalNodes.extend(suc) totalNodes.extend(pred) print("Pred Nodes: ") print() for n in pred: print(str(n.id) + ": " + str(n.title)) print()
def interface(init_topic=""): from mapper import reload topic = "" while topic != "end": #loading topic if init_topic == "": topic = input("topic: ") if topic == "end": continue if topic == "ls": for n in [ n.replace(".json", "") for n in os.listdir("../data/") if ".json" in n ]: print(n) continue if len(searcher(topic, list(os.listdir("../data/")))) > 0: print() for n in [ n.replace(".json", "") for n in searcher(topic, list(os.listdir("../data/"))) if ".json" in n ]: print(n) else: topic = init_topic file = "../data/" + topic + ".json" if topic + ".json" not in set(os.listdir("../data/")): print("NEW") conf = input("confirm: ") #need a confirm option if conf == "y" or conf == "yes": with open(file, "w+") as cur_file: cur_file.write(json.dumps([])) else: continue start = "" data = reader(topic) DG = reload(data) #for the purpose of recording changes to length of dataset preLen = len(data) path = "" walker = 0 SET_OF_COMMANDS = ["end", "print", "edit", "write", "search"] #actions while start != "end": start = input(topic + ": ") if start == "end": continue #print the new data? useless method because it writes to file everytime if start == "print": for i in range(preLen + 1, len(data)): print(str(data[i].id) + ": " + data[i].title) print(data) conf = input("confirm?: ") if conf == "y" or conf == "yes": writeToFile(file, data) else: continue if start == "edit": data = edit(data) writeToFile(file, data) DG = reload(data) #WRITING CONTENT if start == "write": writer(topic) data = reader(topic) DG = reload(data) if start == "search": se = "" while se != "end": se = input("search: ") if se == "end": continue if isint(se): get(data, DG, id=int(se)) for n in searcher(se, data): print(str(n.id) + ": " + n.title) if isint(start): get(data, DG, id=int(start)) if start[:2] == "ls": ls(start, data) continue if start == "graph": draw_all(data, DG) #summarize data #degree centrality, pred nodes, suc nodes (page rank) if start == "summarize": size = int(len(DG.nodes()) / 10) print("Size = " + str(size)) centralnodes = [ n[0] for n in most_degree_centrality(DG, limit=size) ] suc = mostPopularSuc(data, DG, limit=size) pred = mostPopularPred(data, DG, limit=size) totalNodes = [] totalNodes.extend(centralnodes) totalNodes.extend(suc) totalNodes.extend(pred) print("Pred Nodes: ") print() for n in pred: print(str(n.id) + ": " + str(n.title)) print() print("Central Nodes: ") print() for n in centralnodes: print(str(n.id) + ": " + str(n.title)) print() print("Suc Nodes: ") print() for n in suc: print(str(n.id) + ": " + str(n.title)) p = input("plot? ") if p == "y" or p == "yes": subDG = DG.subgraph(totalNodes) labels = labeler(data) pos = nx.spring_layout(subDG) labelDict = { n: lab for n, lab in labels.items() if n in pos } nx.draw(subDG, pos, with_labels=True, font_size=12, labels=labelDict) plt.draw() plt.show() if start == "get": get(data, DG) if start == "axioms": leaves = [n for n, d in DG.out_degree() if d == 0] print(len(leaves)) dprint(leaves) if start == "cycle": isCycle(DG) if start == "project": get_project(data, DG) if start == "branch": branch(data, DG) if start == "pyvis": from net_vis import net_vis net_vis(data) if start == "load": from load import file_finder, term_text_editor, pdf_loader, txt_loader if len(path) > 0: s = input("LOADED PATH: " + path + "\n") if s == "new": path = file_finder() if ".txt" in path: t_data = txt_loader(path) elif ".pdf" in path: t_data = pdf_loader(path) else: print("NO VALID FILE") term_text_editor(path, from_topic=topic) data = reader(topic) DG = reload(data) else: walker = term_text_editor(t_data, init_index=walker, from_topic=topic) data = reader(topic) DG = reload(data) else: path = file_finder() if ".txt" in path: t_data = txt_loader(path) elif ".pdf" in path: t_data = pdf_loader(path) else: print("NO VALID FILE") walker = term_text_editor(t_data, from_topic=topic) data = reader(topic) DG = reload(data)