def reg(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) GSA_file_CSV = fileDict.get('GSA_Input_CSV') GSA_file_SHP = fileDict.get('GSA_Input_SHP') gsa_meta = fileDict.get('GSA_meta') svgNaming = fileDict.get('GSA_data')[0] with open('out/gsa/mymap.svg', 'r') as myfile: mymap = myfile.read() mymap = mymap.replace('"', "'") observations = weights.extractObservations(GSA_file_CSV, "ALL", gsa_meta[3]) w = weights.generateWeightsUsingShapefile(GSA_file_SHP, idVariable=gsa_meta[2]) regions = regionalization.generateRegions(w=w, observations=observations)[0] regions = regionalization.getNamesFromRegions(regions) nameMapping = util.getNameMapping('out/gsa/mymap.svg', gsa_meta[0], gsa_meta[1]) nameMapping = { key: value.replace("'", "APOSTROPHE") for key, value in nameMapping.items() } numRegs = len(set(regions.values())) return render_template("regionalization.html", case_num=case_num, mymap=json.dumps(mymap), regions=json.dumps(regions), numRegs=numRegs, svgNaming=svgNaming, nameMapping=json.dumps(str(nameMapping)))
def shp_vars_post(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) name_var = "data-" + request.form.get('gsa-id').lower() fileDict['GSA_SHP_VARS'] = [request.form.get('gsa-id'), name_var] #fileDict['GSA_SHP_VARS'] = [request.form.get('gsa-id'), request.form.get('gsa-name-var')] return redirect(url_for("gsa_blueprint.upload_csv_get", case_num = case_num))
def upload_csv_post(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) #TODO: check for .csv extention error fileDict['GSA_Input_CSV'] = io_service.storefile( request.files.get('GSA_Input_CSV')) return redirect(url_for("gsa_blueprint.gsa_select", case_num=case_num))
def checkExtensions(case_num): errors = [] fileDict = dao.getFileDict(case_num) gsa_file_list = fileDict['GSA_file_list'] exts = ['.shp', '.shx', '.dbf'] if gsa_file_list is not None and len(gsa_file_list) > 0 and gsa_file_list[0].filename != '': for ext in exts: ext_in = False for f in gsa_file_list: if f.filename.endswith(ext): ext_in = True if not ext_in: errors.append("Error: please upload shp, shx, and dbf file for GSA.") break sna_file = fileDict['SNA_Input'] if sna_file != None: if not sna_file.endswith(('.xls', '.xlsx')): errors.append("Error: please upload xls OR xlsx file for SNA.") nlp_file = fileDict['NLP_Input_LDP'] # terms = fileDict.get('NLP_LDP_terms') # if nlp_file != None: # if not nlp_file.endswith('.txt'): # errors.append("Error: please upload txt file for NLP Lexical Dispersion Plot.") sentiment_file = fileDict["NLP_Input_Sentiment"] if sentiment_file != None: if not sentiment_file.endswith('.txt'): errors.append("Error: please upload txt file for Sentiment Analysis.") return errors
def sent(): type = request.args.get('type', 0, type=str) print(type) case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) NLP_new_example_file = fileDict.get('NLP_Input_corpus') if NLP_new_example_file == None: NLP_new_example_file = fileDict.get('NLP_New_Example') filename = '' if (type == "tab-five"): filename = "f" + NLP_TO_NETWORK.sentiment_mining(NLP_new_example_file) elif (type == "tab-four"): filename = "f" + NLP_TO_NETWORK.relationship_mining( NLP_new_example_file) elif (type == "tab-six"): filename = "f" + NLP_OTHER.wordcloud(NLP_new_example_file) elif (type == "tab-seven"): filename = "f" + NLP_OTHER.stemmerize(NLP_new_example_file) elif (type == "tab-eight"): filename = "f" + NLP_OTHER.lemmatize(NLP_new_example_file) elif (type == "tab-nine"): filename = "f" + NLP_OTHER.abstract(NLP_new_example_file) elif (type == "tab-ten"): filename = "f" + NLP_OTHER.top20_verbs(NLP_new_example_file) elif (type == "tab-eleven"): filename = "f" + NLP_OTHER.top20_persons(NLP_new_example_file) elif (type == "tab-twelve"): filename = "f" + NLP_OTHER.top20_locations(NLP_new_example_file) elif (type == "tab-thirteen"): filename = "f" + NLP_OTHER.top20_organizations(NLP_new_example_file) elif (type == "tab-fourteen"): filename = "f" + NLP_OTHER.sentence_sentiment_distribution( NLP_new_example_file) print(filename) return jsonify(result=filename)
def checkExtensionsSNA(case_num): errors = [] fileDict = dao.getFileDict(case_num) sna_file = fileDict['SNA_Input'] if sna_file != None: if not sna_file.endswith(('.xls', '.xlsx')): errors.append("Error: please upload xls OR xlsx file for SNA.") return errors
def geoNetwork(case_num): fileDict = dao.getFileDict(case_num) outputShape = network.create_network(fileDict['Geonet_Input_Streets'], fileDict['Geonet_Input_Crimes']) geojson.convert(outputShape + '.shp') #can modify filedict in here so that you can access the files in visualization.py #call ur code in here return fileDict
def upload_csv_get(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) GSA_file_SHP = fileDict.get('GSA_Input_SHP') gsaSVG = "out/gsa/mymap.svg" #gsa_service.generateMap(GSA_file_SHP, gsaSVG) nameMapping = gsa_service.getNameMapping(gsaSVG, fileDict['GSA_SHP_VARS'][1]) return render_template("gsaUploadCsv.html", names = nameMapping)
def gsa_select(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) #csv = "usjoin.csv" if sample_path == "usa" else "IRQcasualties.csv" #shp = "us48.shp" if sample_path == "usa" else "IRQ_adm1.shp" #csv = "IRQcasualties.csv" csv = "IRQattacks_Oct27_GSA.csv" shp = "IRQ_adm1.shp" if 'GSA_Input_CSV' not in fileDict: fileDict['GSA_Input_CSV'] = url_for('static', filename="sample/gsa/" + csv)[1:] fileDict['GSA_Input_SHP'] = url_for('static', filename="sample/gsa/" + shp)[1:] # TODO: take in years and file names instead of hard coding # TODO: reorganize use of gsa_meta class Input: def __init__(self, autoRow, autoCol, dynRow, dynCol, id): self.autoRow = autoRow self.autoCol = autoCol self.dynRow = dynRow self.dynCol = dynCol self.id = id info = Input("ALL", ["2014.0"], "ALL", np.arange(2014, 2017, 0.25).tolist(), "NAME_1") if request.method == 'GET': return render_template("gsaselect.html", info=info, case_num=case_num) if request.method == 'POST': info.autoRow = request.form.get('auto-row') info.autoCol = ast.literal_eval(request.form.get('auto-col')) info.dynRow = request.form.get('dyn-row') #info.dynCol = ast.literal_eval(request.form.get('dyn-col')) info.id = request.form.get('gsa-id') #id = fileDict['GSA_SHP_VARS'][0] localAutoCorrelation, globalAutoCorrelation, spatialDynamics = gsa_service.runGSA( case_num, info.autoRow, info.autoCol, info.dynRow, info.dynCol, info.id) fileDict['GSA_data'] = ('id-1', localAutoCorrelation, globalAutoCorrelation, spatialDynamics[0], spatialDynamics[1], spatialDynamics[2], spatialDynamics[3]) #fileDict['GSA_meta'] = ('data-id-1', 'data-name-1', "NAME_1", np.arange(2014, 2017, 0.25).tolist(), fileDict['GSA_SHP_VARS'][1]) fileDict['GSA_meta'] = ('data-id-1', 'data-name-1', "NAME_1", np.arange(2014, 2017, 0.25).tolist(), "name-1") return redirect( url_for('visualize_blueprint.visualize', case_num=case_num))
def upload(): # each new "session" has a random case number associated with it # obviously, there is a small chance that case numbers will collide. # In that case, the person who used it second would overwrite the other persons data. # So this is not how it should be in its final version. But it's fine for now. case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) fileDict['research_question'] = request.form.get('smartsearch') if fileDict['research_question'] is not None and fileDict['research_question'].strip() != '': if validators.url(fileDict['research_question'].strip()): return redirect(url_for('visualize_blueprint.visualize', case_num=case_num)) # temporary submission for SmartSearch for demo else: return redirect(url_for('smart_search_blueprint.sheetSelect', case_num=case_num)) # if its not a url take it to smartSearch input # here the use of fileDict is probably more clear # the strings used to index request.files come from the HTML name of the input field # see upload.html fileDict['GSA_Input_CSV'] = io_service.storefile(request.files.get('GSA_Input_CSV')) fileDict['GSA_Input_SHP'] = io_service.storeGSA(request.files.getlist('GSA_Input_map')) fileDict['GSA_file_list'] = request.files.getlist('GSA_Input_map') fileDict['NLP_Input_corpus'] = io_service.storeNLP(request.files.getlist('NLP_Input_corpus')) fileDict['NLP_Input_LDP'] = io_service.storefile(request.files.get('NLP_Input_LDP')) fileDict['NLP_Input_Sentiment'] = io_service.storefile(request.files.get('NLP_Input_Sentiment')) fileDict["NLP_INPUT_NER"] = request.form.get("NLP_INPUT_NER") fileDict["NLP_INPUT_IOB"] = request.form.get("NLP_INPUT_IOB") fileDict['SNA_Input'] = io_service.storefile(request.files.get('SNA_Input')) fileDict['GSA_Input'] = io_service.storefile(request.files.get('SGA_Input')) fileDict['research_question'] = request.form.get('research_question') errors = io_service.checkExtensions(case_num) # helper method to make sure there are no input errors by the user # i.e. if there are errors, we can't proceed so we stay on the upload page if len(errors) > 0: return render_template('upload.html', errors=errors, case_num=case_num) # there are intermediary steps for SNA and NLP analyses if fileDict['SNA_Input']: return redirect(url_for('sna_blueprint.sheetSelect', case_num=case_num)) if fileDict['GSA_Input_CSV']: return redirect(url_for('gsa_blueprint.gsa_select', case_num=case_num)) # if a user does both SNA and NLP, as it stands, the NLP intermediary data will never be gotten to. This is a problem. if fileDict['NLP_Input_corpus']: return redirect(url_for('visualize_blueprint.visualize', case_num=case_num)) # if NLP chosen, allow them to pick from the different tools available # do i redirect to another url to choose then save the results then redirect to visualize? # no, just add the radio buttons under the file upload before the hr (in the template) return redirect(url_for('visualize_blueprint.visualize', case_num=case_num))
def checkExtensionsNLP(case_num): errors = [] fileDict = dao.getFileDict(case_num) nlp_file = fileDict['NLP_Input_corpus'] if nlp_file != None: if not nlp_file.endswith('.txt'): errors.append("Error: please upload txt file for NLP.") return errors
def runGSA(case_num, autocorrelationRows, autocorrelationCols, sdRows, sdCols, idVariable): fileDict = dao.getFileDict(case_num) observations = weights.extractObservations(fileDict['GSA_Input_CSV'], autocorrelationRows, autocorrelationCols) w = weights.generateWeightsUsingShapefile(fileDict['GSA_Input_SHP'], idVariable=idVariable) globalAutoCorrelation = autocorrelation.globalAutocorrelation(observations, w) localAutoCorrelation = autocorrelation.localAutocorrelation(observations, w) observations = weights.extractObservations(fileDict['GSA_Input_CSV'], sdRows, sdCols) spatialDynamics = spatial_dynamics.markov(observations, w, method="spatial") return localAutoCorrelation, globalAutoCorrelation, spatialDynamics
def view_sent_change(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) response = fileDict["SentimentChange"] return render_template("sentiment_change.html", sent_json=json.dumps(response, sort_keys=True, indent=4, separators=(',', ':')))
def subgraph_viz(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) centralNode = request.args.get('centralNode', '', type=str) toJson = {} for item in fileDict['Cliques']: if item[0] == centralNode: toJson = item[1] return jsonify(toJson) return jsonify(toJson)
def get_autocorrelation(case_num): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) GSA_file_CSV = fileDict.get('GSA_Input_CSV') GSA_file_SHP = fileDict.get('GSA_Input_SHP') year = request.args.get('year', 0, type=int) if year != 0: loc, glob = fileDict['ac'][year] return jsonify(year=year, loc=loc, glob=glob) return jsonify(year="something went wrong", loc=0, glob=0)
def sheetSelect(): # case_num = None case_num = request.args.get("case_num", None) if request.method == 'GET': # that means a form was submitted case_num = request.args.get("case_num", None) fileDict = dao.getFileDict(case_num) researchQuestion = fileDict.get('research_question', None).strip() sentences = top5CAMEO(researchQuestion) # read sentences.txt, read in sentences, and pass the results on to that cameoCodes = [] with open("static/resources/smartsearch/cameocodes.txt", 'r') as file: for line in file: cameoCodes.append(line) return render_template('smart_search_select.html', sentences=sentences, cameoCodes=cameoCodes) if request.method == 'POST': case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) researchQuestion = fileDict.get('research_question', None).strip() sentences = top5CAMEO(researchQuestion) if request.form.get('cameo') != 'SeeDropdown': cameo_code = sentences[int(request.form.get('cameo', 0))] else: cameo_code = request.form.get('cameos', '') # Now it only works for radio button. Need to create a special case for # drop down menu articles_to_scrape = int(request.form.get('numArticlesScraped', 0)) # Name entities is going to be subjects only subjects = request.form.get('name_entities', '') search_question = subjects + ' ' + cameo_code # fill in this follow example from sna_blueprint.sheet_select return redirect( url_for('smart_search_blueprint.landing', case_num=case_num, sentence=search_question, article_count=articles_to_scrape))
def textnets_sample(sample_path): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) if sample_path == 'new_textnets_example': fileDict[ 'textnets_New_Example'] = 'static/sample/nlp/State-of-the-Union-Addresses.csv' else: fileDict['Textnets_Input'] = url_for('static', filename="sample/nlp/" + sample_path + '/')[1:] return redirect(url_for('textnets_blueprint.textnetviz', case_num=case_num))
def jgvis(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) NLP_new_example_file = fileDict.get('NLP_New_Example') jgdata, graph = NLP_TO_NETWORK.sentiment3D(NLP_new_example_file) systemMeasures = {} systemMeasures["Description"] = "Description" return render_template("Jgraph.html", jgdata=jgdata, graph=graph, case_num=case_num, systemMeasures=systemMeasures)
def get_edge_data(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) graph = fileDict.get('copy_of_graph') name = request.args.get('name', '', type=str) if graph == None or len(graph.G) == 0: return jsonify(name=name) pair = name.split(",") link = graph.G[pair[0]][pair[1]] toJsonify = dict(name=name, source=pair[0], target=pair[1]) for attr in link: toJsonify[attr] = link[attr] return jsonify(toJsonify)
def sheetSelect(): if request.method == 'GET': # that means a form was submitted case_num = request.args.get("case_num", None) fileDict = dao.getFileDict(case_num) researchQuestion = fileDict.get('research_question', None).strip() sentences = top5CAMEO(researchQuestion) # read sentences.txt, read in sentences, and pass the results on to that return render_template('smart_search_select.html', sentences=sentences) # fill in this follow example from sna_blueprint.sheet_select return redirect(url_for('visualize_blueprint.visualize'))
def nlp_sample(sample_path): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) if sample_path == 'iran': fileDict[ 'NLP_Input_Sentiment'] = 'static/sample/nlp/sample_sentiment.txt' elif sample_path == 'new_nlp_example': fileDict['NLP_New_Example'] = 'static/sample/nlp/nlp_new_example.txt' else: fileDict['NLP_Input_corpus'] = url_for('static', filename="sample/nlp/" + sample_path + '/')[1:] return redirect(url_for('visualize_blueprint.visualize', case_num=case_num))
def nodeSelect(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) graph = SNA(fileDict['SNA_Input'], nodeSheet=fileDict['nodeSheet'], attrSheet=fileDict['attrSheet']) fileDict['graph'] = graph if request.method == 'POST': nodeColNames = [] classAssignments = {} nodeColNames.append(graph.header[0]) # add source column for header in graph.header[ 1:]: # exclude first column, automatically included as source set fileDict[header + "IsNode"] = True if request.form.get( header + "IsNode") == "on" else False classAssignments[header] = request.form[header + "Class"] fileDict[header + "Name"] = request.form[header + "Name"] if fileDict[header + "IsNode"] == True: nodeColNames.append(fileDict[header + "Name"]) fileDict['nodeColNames'] = nodeColNames fileDict['propToggle'] = { 'emo': True if request.form.get("emo") == "on" else False, 'infl': True if request.form.get("infl") == "on" else False, 'role': True if request.form.get("role") == "on" else False } graph.createNodeList(nodeColNames) if fileDict['attrSheet'] != None: graph.loadAttributes() graph.createEdgeList(nodeColNames[0]) graph.loadOntology(source=nodeColNames[0], classAssignments=classAssignments) if fileDict['attrSheet'] != None: graph.calculatePropensities(fileDict['propToggle']) # Only the first column is a source graph.closeness_centrality() graph.degree_centrality() graph.betweenness_centrality() return redirect( url_for('visualize_blueprint.visualize', case_num=case_num)) return render_template("nodeselect.html", nodes=graph.header, case_num=case_num)
def gsa_sample(sample_path): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) if sample_path == "geonet": #populate the fileDict with the path to the input files #run gsa_service.geonet, redirect to visualization page fileDict['Geonet_Input_Streets'] = 'static/mygeodata/streets.shp' fileDict['Geonet_Input_Crimes'] = 'static/mygeodata/crimes.shp' return redirect(url_for('gsa_blueprint.get_json', case_num=case_num)) elif sample_path == "emoSpace": fileDict["emotionalSpace"] = True return redirect( url_for('gsa_blueprint.emotional_space', case_num=case_num)) else: return redirect(url_for('gsa_blueprint.gsa_select', case_num=case_num))
def jgvis(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) graph = fileDict.get('graph') jgdata, SNAbpPlot, attr, systemMeasures = sna_service.SNA2Dand3D(graph, request, case_num, _2D=False) return render_template("Jgraph.html", jgdata=jgdata, SNAbpPlot=SNAbpPlot, attr=attr, graph=graph, colors=sna_service.colors, case_num=case_num, systemMeasures=systemMeasures)
def checkExtensionsGSA(case_num): errors = [] fileDict = dao.getFileDict(case_num) gsa_file_list = fileDict['GSA_file_list'] exts = ['.shp', '.shx', '.dbf'] if gsa_file_list is not None and len( gsa_file_list) > 0 and gsa_file_list[0].filename != '': for ext in exts: ext_in = False for f in gsa_file_list: if f.filename.endswith(ext): ext_in = True if not ext_in: errors.append( "Error: please upload shp, shx, and dbf file for GSA.") break return errors
def textnetviz(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) systemMeasures = {} systemMeasures["Description"] = "Description" try: if fileDict['Textnets_Input']: textnetAllText(csvPath=fileDict['Textnets_Input'], textColumnName='Text', groupVarColumnName='President') os.rename("sotu_textnet.html", "templates/sotu_textnet.html") return render_template("sotu_textnet.html", case_num=case_num, systemMeasures=systemMeasures) except: return render_template("textnets_new_example.html", case_num=case_num, systemMeasures=systemMeasures)
def get_node_data(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) graph = fileDict.get('copy_of_graph') name = request.args.get('name', '', type=str) if graph == None or len(graph.G) == 0: return jsonify(name=name, eigenvector=None, betweenness=None, sentiment=None) graph.closeness_centrality() graph.betweenness_centrality() graph.degree_centrality() # graph.katz_centrality() image = graph.getImage(name) graph.eigenvector_centrality() graph.load_centrality() if graph.eigenvector_centrality_dict != {} and graph.eigenvector_centrality_dict != None and graph.eigenvector_centrality_dict.get( name) != None: eigenvector = str(round(graph.eigenvector_centrality_dict.get(name), 4)) else: eigenvector = "clustering not available" if graph.betweenness_centrality_dict != {} and graph.betweenness_centrality_dict != None and graph.betweenness_centrality_dict.get( name) != None: betweenness = str(round(graph.betweenness_centrality_dict.get(name), 4)) else: betweenness = "clustering not available" if graph.sentiment_dict != {} and graph.sentiment_dict != None and graph.sentiment_dict.get( name) != None: sentiment = str(round(graph.sentiment_dict.get(name), 4)) else: sentiment = "Sentiment not available for this node." attributes = graph.get_node_attributes(name) toJsonify = dict(name=name, img=image, eigenvector=eigenvector, betweenness=betweenness, sentiment=sentiment, attributes=attributes) return jsonify(toJsonify)
def sheetSelect(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) inputFile = fileDict['SNA_Input'] workbook = xlrd.open_workbook(inputFile, on_demand=True) fileDict['sheets'] = workbook.sheet_names() # if workbook only has one sheet, the user shouldn't have to specify it if len(fileDict['sheets']) == 1: fileDict['nodeSheet'] = fileDict['sheets'][0] fileDict['attrSheet'] = None return redirect(url_for('sna_blueprint.nodeSelect', case_num=case_num)) if request.method == 'POST': fileDict['nodeSheet'] = request.form.get('nodeSheet') fileDict['attrSheet'] = request.form.get('attrSheet') return redirect(url_for('sna_blueprint.nodeSelect', case_num=case_num)) return render_template("sheetselect.html", sheets=fileDict['sheets'], case_num=case_num)
def SNA2Dand3D(graph, request, case_num, _3D=True, _2D=False, label=False): fileDict = dao.getFileDict(case_num) systemMeasures = {} if graph == None: return None, None, None, None # make both attr = {} colorInput = [] if request.form.get("options") == None: i = 0 for nodeSet in graph.classList: attr[nodeSet] = [colors[i], 50] colorInput.append(hexColors[colors[i]]) i += 1 if i == 8: i = 0 else: for nodeSet in graph.classList: attr[nodeSet] = [request.form.get(nodeSet + "Color"), 50] c = request.form.get(nodeSet + "Color") colorInput.append(hexColors[c]) if request.form.get("removeNodeSubmit") != None: graph.removeNode(request.form.get("a")) # Get new node info, if available if request.form.get("addNodeSubmit") != None: node = request.form.get("nodeName") attrDict = { 'block': request.form.get("classList"), 'class': request.form.get("classList") } i = 0 while (request.form.get("attribute" + str(i)) is not None) and ( request.form.get("attribute" + str(i)) != '') and ( request.form.get("value" + str(i)) is not None) and ( request.form.get("value" + str(i)) != ''): key = request.form.get("attribute" + str(i)) value = request.form.get("value" + str(i)) if request.form.get("weight" + str(i)) is not None and request.form.get("weight" + str(i)) != '': value = [value, {'W': request.form.get("weight" + str(i))}] dictForm = {key: value} attrDict.update(dictForm) i += 1 links = [] j = 0 while request.form.get("link" + str(j)) != None: links.append(request.form.get("link" + str(j))) j += 1 graph.addNode(node, attrDict, links) if request.form.get("eventSubmit") != None: fileDict['SNA_Events'] = 'static/sample/sna/suicide_attacks_subset.xlsx' ##TODO add a blueprint route for event sheet here inputFile = fileDict['SNA_Events'] iters = int(request.form.get("iters")) systemMeasures['SentimentDict'] = True fileDict['SentimentChange'] = graph.event_update(inputFile,iters) # Add system measures dictionary try: systemMeasures["Node Connectivity"] = graph.node_connectivity() # Currently only returning zero... except: "No node connectivity" try: systemMeasures["Average Clustering"] = graph.average_clustering() except: "No average clustering" # try: # systemMeasures["Average Degree Connectivity"] = graph.average_degree_connectivity() # except: # "No average degree connectivity" try: systemMeasures["Degree Assortativity"] = graph.degree_assortativity() except: "No degree assortativity" try: systemMeasures["Center"] = graph.center() except: "No center" try: systemMeasures["Diameter"] = graph.diameter() except: "No periphery" try: systemMeasures["Periphery"] = graph.periphery() except: "No periphery" systemMeasures["Overall Sentiment"] = graph.sentiment(types=["Belief","Audience","Actor"],key='W') # try: # systemMeasures["Triadic Census"] = graph.triadic_census() # except: # "No triadic census" # systemMeasures["Attribute Assortivity"] = graph.attribute_assortivity() # Which attributes...? UI? if graph.is_strongly_connected(): systemMeasures["Connection Strength"] = "Strong" elif graph.is_weakly_connected(): systemMeasures["Connection Strength"] = "Weak" # Add system measures descriptions to dictionary systemMeasures["Description"] = { 'Average Clustering': 'A high clustering coefficient indicates that actors within the network are closely connected to a statistically significant degree. It is a sophisticated measure of the density of a network.', 'Connection Strength': 'Knowing whether a graph is strongly or weakly connected is helpful because it demonstrates the robustness of the graph based on its redundancy. If a graph is strongly connected, there are two links between each actor in the network, one in each direction. A strongly connected graph thus would likely have more redundant communication/information flow and be more difficult to perturb than a weakly connected graph.', 'Resilience': 'The baseline value for resilience is determined by perturbing each community in the network and measuring the mean shortest path average over several perturbations. The results are scaled on a normal curve across all cliques and a percentile resilience is determined for each clique. A high percentile resilience denotes resilience to perturbation. These values are visualized on a color spectrum from red to blue, where red is low relative resilience and blue is high relative resilience.', 'AddNode': 'Introduces a new node to the network, complete with a user-defined name, user-defined attributes and known links. Using the DRAG link prediction model, node attributes are used to form likely connections and intelligently model the effects of external change on the network. New nodes and their predicted links are colored red for easy identification.', 'RemoveNode': 'Removes the node inputted in the box below and any links to which it belongs.', 'eigenvector': 'Centrality measure which sums the centralities of all adjacent nodes.', 'betweenness': 'Centrality based on the shortest path that passes through the node.', 'sentiment':'The sum of all actor sentiments towards this node.', 'Overall Sentiment': 'The sum of all actor sentiments towards this node.', 'Cliques':'Influence communities are detected in two-step Louvain modularity optimization. First, the core myth-symbol complexes are identified and named. Second, very proximate actors are grouped with the myth-symbol complex to form a full influence network.', 'EventAddition': 'Choose a number of iterations to simulate event addition into the network. Events are drawn from input file.', } # Find cliques when requested if request.form.get("cliqueSubmit") != None: cliques, names = graph.communityDetection() systemMeasures["Cliques"] = [] fileDict["Cliques"] = [] for name, clique in zip(names, cliques): central = graph.G.node[name].get('Name')[0] if graph.G.node[name].get('Name') is not None else name nodes = [] json_clique = {} i = 0 for node in clique.nodes(): nodes.append(graph.G.node[node].get('Name')[0] if graph.G.node[node].get('Name') is not None else node) json_clique["node"+str(i)] = node i+=1 systemMeasures["Cliques"].append((central,nodes)) fileDict["Cliques"].append((central,json_clique)) # Calculate resilience when requested if request.form.get("resilienceSubmit") != None: try: systemMeasures["Baseline"], systemMeasures["Resilience"], systemMeasures["Robustness"] = graph.calculateResilience() # gets a scaled resilience value for each clique identified in network # Add colors for each resilience measure def addColors(systemMeasure): for cluster in systemMeasure: systemMeasure[cluster] = int(systemMeasure[cluster]) percentile = systemMeasure[cluster] b = int(percentile) r = int(100 - percentile) systemMeasure[cluster] = [percentile, r, b] addColors(systemMeasures["Baseline"]) addColors(systemMeasures["Resilience"]) addColors(systemMeasures["Robustness"]) except nx.exception.NetworkXError: systemMeasures["Resilience"] = "Could not calculate resilience, NetworkX error." copy_of_graph = copy.deepcopy(graph) fileDict['copy_of_graph'] = copy_of_graph # return based on inputs ret3D = graph.create_json(graph.classList, colorInput) if _3D else None label = True if not label and len(graph.nodes) < 20 else False ret2D = graph.plot_2D(attr, label) if _2D else None fileDict['jgdata'] = ret3D return ret3D, ret2D, attr, systemMeasures
def visualize(): case_num = request.args.get('case_num', None) fileDict = dao.getFileDict(case_num) GSA_file_CSV = fileDict.get('GSA_Input_CSV') GSA_file_SHP = fileDict.get('GSA_Input_SHP') GSA_file_SVG = fileDict.get('GSA_Input_SVG') NLP_dir = fileDict.get('NLP_Input_corpus') NLP_urls = fileDict.get('NLP_LDP_terms') NLP_file_sentiment = fileDict.get('NLP_Input_Sentiment') NLP_new_example_file = fileDict.get('NLP_New_Example') research_question = fileDict.get('research_question') tropes = fileDict.get('tropes') graph = fileDict.get('graph') GSA_sample = fileDict.get('GSA_data') error = False auto = None sp_dyn = None svgNaming = None if GSA_sample != None: svgNaming = GSA_sample[0] auto = GSA_sample[1:3] sp_dyn = [mat for mat in GSA_sample[3:]] gsaCSV = None mymap = None nameMapping = None if (GSA_file_CSV is not None and GSA_file_SHP is not None and fileDict.get('GSA_meta') is not None): gsaCSV, mymap, nameMapping = gsa_service.tempParseGSA( GSA_file_CSV, GSA_file_SHP, fileDict['GSA_meta'][0], fileDict['GSA_meta'][1]) if GSA_file_SVG != None: gsaCSV, mymap = gsa_service.parseGSA(GSA_file_CSV, GSA_file_SVG) if gsaCSV == None and mymap == True: error = True mymap = None sna_service.prep(graph) jgdata, SNAbpPlot, attr, systemMeasures = sna_service.SNA2Dand3D(graph, request, case_num, _2D=True) fileDict['SNAbpPlot'] = '/' + SNAbpPlot if SNAbpPlot != None else None copy_of_graph = copy.deepcopy(graph) fileDict['copy_of_graph'] = copy_of_graph if NLP_dir: nlp_summary, nlp_entities, nlp_network, nlp_sources, nlp_tropes = nlp_service.nlp_dir( NLP_dir) else: nlp_summary, nlp_entities, nlp_network, nlp_sources, nlp_tropes = nlp_service.nlp_urls( NLP_urls) nlp_sentiment = nlp_service.sentiment(NLP_file_sentiment) research_question = scraper_service.scrape(research_question) nlp_new_example_sentiment = '' nlp_new_example_relationship = '' if NLP_new_example_file != None: nlp_new_example_sentiment = NLP_TO_NETWORK.sentiment_mining( NLP_new_example_file) nlp_new_example_relationship = NLP_TO_NETWORK.relationship_mining( NLP_new_example_file) nlp_summary = 'Enable' return render_template( 'visualizations.html', research_question=research_question, SNAbpPlot=SNAbpPlot, graph=copy_of_graph, attr=attr, colors=sna_service.colors, gsaCSV=gsaCSV, mymap=mymap, svgNaming=svgNaming, nameMapping=nameMapping, jgdata=jgdata, tropes=tropes, GSA_sample=GSA_sample, auto=auto, sp_dyn=sp_dyn, error=error, case_num=case_num, nlp_sentiment=nlp_sentiment, nlp_summary=nlp_summary, nlp_entities=nlp_entities, nlp_sources=nlp_sources, nlp_tropes=nlp_tropes, systemMeasures=systemMeasures, NLP_new_example_file=NLP_new_example_file, nlp_new_example_sentiment=nlp_new_example_sentiment, nlp_new_example_relationship=nlp_new_example_relationship, )