def quality(self): nodes, edges = utils.get_no_redundant_graph() export_data = {'nodes': [], "links": []} type2group = {'Actor': 1, 'Message': 2, 'System': 3} # clustering is needed # prepare the nodes for anode in nodes: export_data['nodes'].append({ 'name': anode['text'], 'group': type2group[anode['type']] }) # prepare the edges for aedge in edges: export_data['links'].append({ 'source': aedge['source-index'], 'target': aedge['target-index'], 'value': aedge['count'] }) # return the json data to the page visualization import json jsonfile = open('ucframe/public/data/ams_matrix.json', 'w') jsonfile.write(json.dumps(export_data)) jsonfile.close() return dict(page='quality')
def contribute_pre(self, **kw): other_input = {} self_input = {} self_parameters_name = [ "PreCondition-Status-", "PreCondition-Condition-" ] self_parameters = [] for i in range(1, 10): #[seLain] TOFIX: No better way to do this ? for parameters_name in self_parameters_name: key = parameters_name + str(i) if key not in self_parameters: self_parameters.append(key) remove_key = [] #[seLain] TOFIX: Unknown function code for i in kw: if kw[i] == '': remove_key.append(i) #[seLain] TOFIX: Unknown function code elif i in self_parameters: #[seLain] TOFIX: Unknown function code self_input[i] = kw[i] else: #[seLain] this code seems to be passed other_input[i] = kw[ i] # to the next page, for params accumulation # for embedded view test export_nodes_data, export_edges_data = utils.get_no_redundant_graph() #return {'parameters':other_input, 'self_input': self_input} return { 'parameters': other_input, 'self_input': self_input, 'nodes': export_nodes_data, 'edges': export_edges_data }
def view(self): export_nodes_data, export_edges_data = utils.get_no_redundant_graph() # return the json data to the page visualization import json jsonfile = open('graph.json', 'w') jsonfile.write(json.dumps(export_nodes_data)) jsonfile.close() jsonfile = open('edges.json', 'w') jsonfile.write(json.dumps(export_edges_data)) jsonfile.close() return dict(page='view', nodes=export_nodes_data, edges=export_edges_data)
def postcon_metrics(self, **kw): postcon_list = [] for key in kw.keys(): postcon_list.append(key.strip()) current_terms = [] for postcon in postcon_list: splitted_terms = postcon.split(' ') for aterm in splitted_terms: if self.is_ams_term(aterm) and aterm not in current_terms: current_terms.append(aterm) print 'current_terms in postcon_metrics', current_terms # metrics evaluation nodes, edges = utils.get_no_redundant_graph() # CRT computation import metrics CRT_data = {} ## data to be returned ## calculate the CRT CRT_dict = metrics.CRT() CRT_data['terms'] = {} ## get the average CRT CRT_current_terms = [] for aterm in current_terms: if aterm in CRT_dict.keys(): CRT_current_terms.append(CRT_dict[aterm]) CRT_data['terms'][aterm] = CRT_dict[ aterm] ## put ony the context terms if len(CRT_current_terms) == 0: CRT_average = 0 else: CRT_average = round( sum(CRT_current_terms) / len(CRT_current_terms), 2) CRT_data['average'] = CRT_average CRT_data['description'] = 'Contributor Ratio of a Term (CRT)' +'\\n\\n' + \ '在目前輸入頁面中您所使用的 AMS 詞彙(Terms), ' + \ '平均來說也被多少比例的其他使用案例貢獻者所使用.' + \ '\\n\\n' + '舉例來說: PuzzleBreaker(57%) 代表有 57% 的貢獻者' + \ '也使用了 PuzzleBreaker 在他們的使用案例中.' return {'CRT': CRT_data}
def create_tagtable(self, ): table_location = os.sep.join( ['ucframe', 'model', 'Tag', 'Tag Table.txt']) TagTable = ConfigObj(table_location, encoding='UTF8') export_nodes_data, export_edges_data = utils.get_no_redundant_graph() for anode in export_nodes_data: TagTable[anode['text']] = { 'Name': anode['text'], 'RealWord': anode['text'], 'Category': anode['type'], 'Type': 'Instance', 'Abstraction': anode['text'] } TagTable.write()
def cuc_metrics(self, **kw): event_list = [] for key in kw.keys(): event_list.append(key.strip()) current_terms = [] for event in event_list: splitted_terms = event.split(' ') for aterm in splitted_terms: if self.is_ams_term(aterm) and aterm not in current_terms: current_terms.append(aterm) print 'current_terms in cuc_metrics', current_terms # metrics evaluation nodes, edges = utils.get_no_redundant_graph() # calculate the number of senders # calculate the numder of receivers ''' NOS = 0 NOR = 0 for aedge in edges: if current_term == nodes[aedge['target-index']]['text']: NOS += 1 if current_term == nodes[aedge['source-index']]['text']: NOR += 1 ''' # CRT computation import metrics CRT_data = {} ## data to be returned ## calculate the CRT CRT_dict = metrics.CRT() CRT_data['terms'] = {} ## get the average CRT CRT_current_terms = [] for aterm in current_terms: if aterm in CRT_dict.keys(): CRT_current_terms.append(CRT_dict[aterm]) CRT_data['terms'][aterm] = CRT_dict[ aterm] ## put ony the context terms CRT_average = round(sum(CRT_current_terms) / len(CRT_current_terms), 2) CRT_data['average'] = CRT_average CRT_data['description'] = 'Contributor Ratio of a Term (CRT)' +'\\n\\n' + \ '在目前輸入頁面中您所使用的 AMS 詞彙(Terms), ' + \ '平均來說也被多少比例的其他使用案例貢獻者所使用.' + \ '\\n\\n' + '舉例來說: PuzzleBreaker(57%) 代表有 57% 的貢獻者' + \ '也使用了 PuzzleBreaker 在他們的使用案例中.' # LLC computation NLLC_data = {} ## calculate the LLC NLLC_result = metrics.NLLC(event_list) NLLC_data['value'] = NLLC_result NLLC_data['description'] = 'Normalized Local Logical Conherence (NLLC)' +'\\n\\n' + \ '在目前輸入頁面中您所有敘述之間的邏輯連貫性高低.' + \ '通常越高的 NLLC 代表所有敘述整體越容易被閱讀及理解.' + \ '\\n\\n' + '我們提供了 Average NLLC 做為參考.' + \ '這是其他已經被貢獻的使用案例之平均 NLLC 值.' ANLLC_result = metrics.ANLLC() NLLC_data['average'] = ANLLC_result return {'CRT': CRT_data, 'NLLC': NLLC_data}
def quality_indiv(self): type_constraint = "Message" nodes, edges = utils.get_no_redundant_graph() export_data = {'nodes': [], "links": []} type2group = {'Actor': 1, 'Message': 2, 'System': 3} # clustering is needed # prepare the nodes. only the actor type is considered this time. for anode in nodes: if anode['type'] is type_constraint: export_data['nodes'].append({ 'name': anode['text'], 'group': type2group[anode['type']] }) # prepare the edges # note that the reverse edges are calculated, not natural from the graph # each calculated edge is made by comparing the similarity between two nodes ## prepare the node array ## each node array is made of node[linked_node1, linked_node2...] node_array = {} for anode in nodes: if anode[ 'type'] is not type_constraint: # only the actor type is considered this time. continue node_array[anode['text']] = {} for aedge in edges: #print '[in comp]', nodes.index(anode), aedge['source-index'] if nodes.index(anode) is aedge['source-index']: node_array[anode['text']][ aedge['target-index']] = aedge['count'] ## compare similarity for key_1 in node_array.keys(): for key_2 in node_array.keys(): node_1 = node_array[key_1] node_2 = node_array[key_2] # print '[in rev]', node_1, node_2 overlaps = [ min(node_1[x], node_2[x]) for x in node_1 if x in node_2 ] # print '[in rev]', key_1, key_2, overlaps ###### worked !!!!!!!! but the correctness is questionable ###### DO THE CODE REVIEW !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! export_data['links'].append({ 'source': node_array.keys().index(key_1), 'target': node_array.keys().index(key_2), 'value': sum(overlaps) }) ''' for aedge in edges: export_data['links'].append({'source':aedge['source-index'], 'target':aedge['target-index'], 'value':aedge['count']}) ''' # return the json data to the page visualization import json jsonfile = open('ucframe/public/data/ams_matrix_indiv.json', 'w') jsonfile.write(json.dumps(export_data)) jsonfile.close() return dict(page='quality_indiv')