def get_docglobalm2m(globalm2m, decomposition, doc_m2m_prob_threshold=None, doc_m2m_overlap_threshold=None): # experiment = mass2motif.experiment experiment = Experiment.objects.get(pk=decomposition.experiment_id) ## default prob_threshold 0.05, default overlap_threshld 0.0 if not doc_m2m_prob_threshold: doc_m2m_prob_threshold = get_option('doc_m2m_prob_threshold', experiment=experiment) if doc_m2m_prob_threshold: doc_m2m_prob_threshold = float(doc_m2m_prob_threshold) else: doc_m2m_prob_threshold = 0.05 if not doc_m2m_overlap_threshold: doc_m2m_overlap_threshold = get_option('doc_m2m_overlap_threshold', experiment=experiment) if doc_m2m_overlap_threshold: doc_m2m_overlap_threshold = float(doc_m2m_overlap_threshold) else: doc_m2m_overlap_threshold = 0.0 ## Notice need to add *decomposition* when search database for *DocumentGlobalMass2Motif* ## For LDA experiment, *DocumentMass2Motif* does not have *experiment* entry, so does not need to do that dm2m = DocumentGlobalMass2Motif.objects.filter( mass2motif=globalm2m, decomposition=decomposition, probability__gte=doc_m2m_prob_threshold, overlap_score__gte=doc_m2m_overlap_threshold).order_by('-probability') return dm2m
def get_degree_matrix(request, mf_id): if request.is_ajax(): mfe = MultiFileExperiment.objects.get(id=mf_id) if not mfe.degree_matrix: links = MultiLink.objects.filter( multifileexperiment=mfe).order_by('experiment__name') individuals = [l.experiment for l in links] deg_vals = [] for individual in individuals: doc_m2m_threshold = get_option('doc_m2m_threshold', experiment=individual) if doc_m2m_threshold: doc_m2m_threshold = float(doc_m2m_threshold) else: doc_m2m_threshold = 0.0 default_score = get_option('default_doc_m2m_score', experiment=individual) if not default_score: default_score = 'probability' new_row = [] motif_set = individual.mass2motif_set.all().order_by('name') for motif in motif_set: motif_name = motif.name m2m = Mass2Motif.objects.get(name=motif_name, experiment=individual) docs = get_docm2m(m2m) # Following modified to make the degrees here consistent with the plots # dm2m = motif.documentmass2motif_set.all() # if default_score == 'probability': # new_row.append(len([d for d in dm2m if d.probability > doc_m2m_threshold])) # else: # new_row.append(len([d for d in dm2m if d.overlap_score > doc_m2m_threshold])) new_row.append(len(docs)) deg_vals.append(new_row) deg_vals = map(list, zip(*deg_vals)) deg_vals = [[motif_set[i].name, motif_set[i].annotation] + dv for i, dv in enumerate(deg_vals)] data = json.dumps(deg_vals) mfe.degree_matrix = jsonpickle.encode(deg_vals) mfe.save() else: deg_vals = jsonpickle.decode(mfe.degree_matrix) data = json.dumps(deg_vals) return HttpResponse(data, content_type='application/json') else: raise Http404
def get_parents_decomposition(motif_id, decomposition, vo_id=None, experiment=None): if vo_id: viz_options = VizOptions.objects.get(id=vo_id) edge_choice = viz_options.edge_choice edge_thresh = viz_options.edge_thresh elif experiment: edge_choice = get_option('default_doc_m2m_score', experiment=experiment) edge_thresh = get_option('doc_m2m_threshold', experiment=experiment) else: edge_choice = 'probability' edge_thresh = 0.05 motif = GlobalMotif.objects.get(id=motif_id) parent_data = [] if edge_choice == 'probability': docm2m = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, probability__gte=edge_thresh, decomposition=decomposition).order_by('-probability') elif edge_choice == 'overlap_score': docm2m = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, decomposition=decomposition).order_by('-overlap_score') elif edge_choice == 'both': docm2m = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, probability__gte=edge_thresh, decomposition=decomposition).order_by('-overlap_score') for dm in docm2m: document = dm.document parent_data.append( get_parent_for_plot_decomp(decomposition, document, motif=motif, edge_choice=edge_choice)) return parent_data
def view_parents(request, mass2motif_id, decomposition_id): context_dict = {} decomposition = Decomposition.objects.get(id=decomposition_id) experiment = decomposition.experiment mass2motif = GlobalMotif.objects.get(id=mass2motif_id) context_dict['mass2motif'] = mass2motif context_dict['experiment'] = experiment context_dict['decomposition'] = decomposition # Thought -- should these options be decomposition specific? edge_choice = get_option('default_doc_m2m_score', experiment=experiment) edge_thresh = get_option('doc_m2m_threshold', experiment=experiment) if edge_choice == 'probability': dm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=mass2motif, probability__gte=edge_thresh, decomposition=decomposition).order_by('-probability') elif edge_choice == 'overlap_score': dm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=mass2motif, overlap_score__gte=edge_thresh, decomposition=decomposition).order_by('-overlap_score') elif edge_choice == 'both': dm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=mass2motif, overlap_score__gte=edge_thresh, probability__gte=edge_thresh, decomposition=decomposition).order_by('-overlap_score') else: dm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=mass2motif, probability__gte=edge_thresh, decomposition=decomposition).order_by('-probability') originalfeatures = Mass2MotifInstance.objects.filter( mass2motif=mass2motif.originalmotif) context_dict['motif_features'] = originalfeatures context_dict['dm2ms'] = dm2ms return render(request, 'decomposition/view_parents.html', context_dict)
def get_doc_topics(request, decomposition_id, doc_id): document = Document.objects.get(id=doc_id) decomposition = Decomposition.objects.get(id=decomposition_id) score_type = get_option('default_doc_m2m_score', experiment=document.experiment) plot_fragments = [ get_parent_for_plot_decomp(decomposition, document, edge_choice=score_type, get_key=True) ] return HttpResponse(json.dumps(plot_fragments), content_type='application/json')
import django django.setup() from basicviz.models import Experiment, User, UserExperiment, SystemOptions from options.views import get_option exp_id_set = set() for opt in SystemOptions.objects.all(): exp_id_set.add(opt.experiment_id) ## check if something wrong with fetching doc_m2m_threshold data for exp_id in exp_id_set: if exp_id: experiment = Experiment.objects.get(id=exp_id) doc_m2m_threshold = get_option('doc_m2m_threshold', experiment=experiment) if not doc_m2m_threshold: print("!!!get_option for expreriment {} failed".format(exp_id)) break ''' Updating Rules: for one experiment: if not doc_m2m_threshold setting: 1. if no default_doc_m2m_score => DO NOTHING 2. if default_doc_m2m_score is 'probability' => DO NOTHING 3. if default_doc_m2m_score is 'overlap_score' => probability threshold to 0.0 => overlap threshold to original gloabl doc_m2m_threshold 4. if default_doc_m2m_score is 'both'
def get_doc_table(request, mf_id, motif_name): mfe = MultiFileExperiment.objects.get(id=mf_id) links = MultiLink.objects.filter(multifileexperiment=mfe).order_by('experiment__name') individuals = [l.experiment for l in links] individual_motifs = {} for individual in individuals: thismotif = Mass2Motif.objects.get(experiment=individual, name=motif_name) individual_motifs[individual] = thismotif doc_table = [] individual_names = [] peaksets = {} peakset_list = [] peakset_masses = [] for i, individual in enumerate(individuals): individual_names.append(individual.name) docs = get_docm2m(individual_motifs[individual]) for doc in docs: peakset_index = -1 ii = doc.document.intensityinstance_set.all() if len(ii) > 0: ii = ii[0] ps = ii.peakset if not ps in peaksets: peaksets[ps] = {} peakset_list.append(ps) peakset_masses.append(ps.mz) peakset_index = peakset_list.index(ps) peaksets[ps][individual] = ii.intensity mz = 0 rt = 0 md = jsonpickle.decode(doc.document.metadata) if 'parentmass' in md: mz = md['parentmass'] elif 'mz' in md: mz = md['mz'] elif '_' in doc.document.name: split_name = doc.document.name.split('_') mz = float(split_name[0]) if 'rt' in md: rt = md['rt'] elif '_' in doc.document.name: split_name = doc.document.name.split('_') rt = float(split_name[1]) doc_table.append([rt, mz, i, doc.probability, peakset_index]) # Add the peaks to the peakset object that are not linked to a document # (i.e. the MS1 peak is present, but it wasn't fragmented) for ps in peaksets: # Grab the intensity instances for this peakset intensity_instances = ps.intensityinstance_set.all() # Extract the individual experiments that are represented individuals_present = [i.experiment for i in intensity_instances] # Loop over the experiment for individual in individuals: # If the experiment is not in the current peakset but there is an intensity instance if (not individual in peaksets[ps]) and individual in individuals_present: # Find the intensity instance int_int = filter(lambda x: x.experiment == individual, intensity_instances) peaksets[ps][individual] = int_int[0].intensity print ps, individual, int_int[0].intensity intensity_table = [] unnormalised_intensity_table = [] counts = [] final_peaksets = [] min_count_options = get_option('heatmap_minimum_display_count',experiment = individuals[0]) # min_count_options = SystemOptions.objects.filter(key='heatmap_minimum_display_count') if len(min_count_options) > 0: min_count = int(min_count_options) else: min_count = 5 log_intensities_options = get_option('log_peakset_intensities',experiment = individuals[0]) # log_intensities_options = SystemOptions.objects.filter(key='log_peakset_intensities') if len(log_intensities_options) > 0: val = log_intensities_options if val == 'true': log_peakset_intensities = True else: log_peakset_intensities = False else: log_peakset_intensities = True normalise_heatmap_options = get_option('heatmap_normalisation',experiment = individuals[0]) if len(normalise_heatmap_options) == 0: normalise_heatmap_options = 'none' for peakset in peaksets: new_row = [] for individual in individuals: new_row.append(peaksets[peakset].get(individual, 0)) count = sum([1 for i in new_row if i > 0]) if min_count >= 0: nz_vals = [v for v in new_row if v > 0] if log_peakset_intensities: nz_vals = [np.log(v) for v in nz_vals] new_row = [np.log(v) if v > 0 else 0 for v in new_row] me = sum(nz_vals) / (1.0 * len(nz_vals)) va = sum([v ** 2 for v in nz_vals]) / len(nz_vals) - me ** 2 va = math.sqrt(va) maxval = max(nz_vals) if normalise_heatmap_options == 'none': intensity_table.append(new_row) unnormalised_intensity_table.append(new_row) counts.append(count) final_peaksets.append(peakset) elif normalise_heatmap_options == 'max': new_row_n = [v/maxval for v in new_row] intensity_table.append(new_row_n) unnormalised_intensity_table.append(new_row) counts.append(count) final_peaksets.append(peakset) elif normalise_heatmap_options == 'standard' and va > 0: # if variance is zero, skip... unnormalised_intensity_table.append(new_row) new_row_n = [(v - me) / va if v > 0 else 0 for v in new_row] intensity_table.append(new_row_n) counts.append(count) final_peaksets.append(peakset) # Order so that the most popular are at the top if len(final_peaksets) > 0: temp = zip(counts, intensity_table, final_peaksets) temp = sorted(temp, key=lambda x: x[0], reverse=True) counts, intensity_table, final_peaksets = zip(*temp) intensity_table = list(intensity_table) # Change the indexes in the doc table to match the new ordering for row in doc_table: old_ps_index = row[-1] if old_ps_index > -1: old_ps = peakset_list[old_ps_index] if old_ps in final_peaksets: new_ps_index = final_peaksets.index(old_ps) else: new_ps_index = -1 row[-1] = new_ps_index final_peakset_masses = [p.mz for p in final_peaksets] final_peakset_rt = [p.rt for p in final_peaksets] final_peakset_rt_variance = np.array(final_peakset_rt).var() return HttpResponse(json.dumps(( individual_names, doc_table, intensity_table, final_peakset_masses, final_peakset_rt, unnormalised_intensity_table,final_peakset_rt_variance)), content_type='application/json')
def make_decomposition_graph(decomposition, experiment, min_degree=5, topic_scale_factor=5, edge_scale_factor=5, ms1_analysis_id=None, doc_max_size=200, motif_max_size=1000): # This is the graph maker for a decomposition experiment ## Notice mass2motif here is an object of GlobalMotif ## Get all unique Global mass2motifs, prepare for *get_docglobalm2m* all_docm2ms = DocumentGlobalMass2Motif.objects.filter( decomposition=decomposition) mass2motifs = set([docm2m.mass2motif for docm2m in all_docm2ms]) # Find the degrees topics = {} docm2m_dict = {} for mass2motif in mass2motifs: topics[mass2motif] = 0 docm2ms = get_docglobalm2m(mass2motif, decomposition) docm2m_dict[mass2motif] = list(docm2ms) for d in docm2ms: topics[mass2motif] += 1 to_remove = [] for topic in topics: if topics[topic] < min_degree: to_remove.append(topic) for topic in to_remove: del topics[topic] docm2mset = [] for topic in topics: docm2mset += docm2m_dict[topic] do_plage_flag = True if ms1_analysis_id: analysis = DecompositionAnalysis.objects.filter(id=ms1_analysis_id)[0] all_logfc_vals = [] res = DecompositionAnalysisResult.objects.filter( analysis=analysis, document__in=[docm2m.document for docm2m in docm2mset]) for analysis_result in res: foldChange = analysis_result.foldChange logfc = np.log(foldChange) if not np.abs(logfc) == np.inf: all_logfc_vals.append(np.log(foldChange)) min_logfc = np.min(all_logfc_vals) max_logfc = np.max(all_logfc_vals) ## try make graph for plage all_plage_vals = [] for plage_result in DecompositionAnalysisResultPlage.objects.filter( analysis=analysis, globalmotif__in=topics.keys()): plage_t_value = plage_result.plage_t_value all_plage_vals.append(plage_t_value) if all_plage_vals: min_plage = np.min(all_plage_vals) max_plage = np.max(all_plage_vals) else: do_plage_flag = False print "First" # Add the topics to the graph G = nx.Graph() for topic in topics: mass2motif = topic.originalmotif metadata = jsonpickle.decode(mass2motif.metadata) ## try make graph for plage if ms1_analysis_id and do_plage_flag: ## white to green lowcol = [255, 255, 255] endcol = [0, 255, 0] plage_result = DecompositionAnalysisResultPlage.objects.filter( analysis=analysis, globalmotif=topic)[0] plage_t_value = plage_result.plage_t_value plage_p_value = plage_result.plage_p_value pos = (plage_t_value - min_plage) / (max_plage - min_plage) r = lowcol[0] + int(pos * (endcol[0] - lowcol[0])) g = lowcol[1] + int(pos * (endcol[1] - lowcol[1])) b = lowcol[2] + int(pos * (endcol[2] - lowcol[2])) col = "#{}{}{}".format("{:02x}".format(r), "{:02x}".format(g), "{:02x}".format(b)) if plage_p_value == None: size = 10 elif plage_p_value == 0: size = motif_max_size else: size = min(10 - np.log(plage_p_value) * 200, motif_max_size) na = mass2motif.short_annotation if na: na += ' (' + topic.name + ')' else: na = topic.name G.add_node( topic.name, group=2, name=na + ", " + str(plage_t_value) + ", " + str(plage_p_value), # size=topic_scale_factor * topics[topic], size=size, special=True, in_degree=topics[topic], highlight_colour=col, score=1, node_id=topic.id, is_topic=True) else: if mass2motif.short_annotation: # if 'annotation' in metadata: G.add_node(topic.name, group=2, name=mass2motif.short_annotation, size=topic_scale_factor * topics[topic], special=True, in_degree=topics[topic], score=1, node_id=topic.id, is_topic=True) else: G.add_node(topic.name, group=2, name=topic.name, size=topic_scale_factor * topics[topic], special=False, in_degree=topics[topic], score=1, node_id=topic.id, is_topic=True) doc_nodes = [] print "Second" edge_choice = get_option('default_doc_m2m_score', experiment) for docm2m in docm2mset: # if docm2m.mass2motif in topics: if not docm2m.document in doc_nodes: metadata = jsonpickle.decode(docm2m.document.metadata) if 'compound' in metadata: name = metadata['compound'] elif 'annotation' in metadata: name = metadata['annotation'] else: name = docm2m.document.name ## do MS1 expression analysis only when user choose a ms1 analysis setting if not ms1_analysis_id: G.add_node(docm2m.document.name, group=1, name=name, size=20, type='square', peakid=docm2m.document.name, special=False, in_degree=0, score=0, is_topic=False) else: analysis_result = DecompositionAnalysisResult.objects.filter( analysis=analysis, document=docm2m.document)[0] foldChange = analysis_result.foldChange pValue = analysis_result.pValue logfc = np.log(foldChange) ## lowest: blue, logfc==0: white, highest: red ## use scaled colour to represent logfc of document if logfc == np.inf: col = "#{}{}{}".format('FF', '00', '00') elif -logfc == np.inf: col = "#{}{}{}".format('00', '00', 'FF') else: lowcol = [0, 0, 255] endcol = [255, 0, 0] midcol = [255, 255, 255] if logfc < 0: # if logfc < -3: # logfc = -3 pos = logfc / min_logfc r = midcol[0] + int(pos * (lowcol[0] - midcol[0])) g = midcol[1] + int(pos * (lowcol[1] - midcol[1])) b = midcol[2] + int(pos * (lowcol[2] - midcol[2])) else: pos = logfc / max_logfc r = midcol[0] + int(pos * (endcol[0] - midcol[0])) g = midcol[1] + int(pos * (endcol[1] - midcol[1])) b = midcol[2] + int(pos * (endcol[2] - midcol[2])) col = "#{}{}{}".format("{:02x}".format(r), "{:02x}".format(g), "{:02x}".format(b)) ## use size to represent pValue of document if not pValue: size = 5 else: size = min(5 - np.log(pValue) * 15, doc_max_size) ## represent document node with name + logfc + pValue if pValue: name = "{}, {:.3f}, {:.3f}".format(name, logfc, pValue) else: name = "{}, {:.3f}, None".format(name, logfc) # name += ", " + str(logfc) + ", " + str(pValue) G.add_node(docm2m.document.name, group=1, name=name, size=size, type='square', peakid=docm2m.document.name, special=True, highlight_colour=col, logfc=docm2m.document.logfc, in_degree=0, score=0, is_topic=False) doc_nodes.append(docm2m.document) if edge_choice == 'probability': weight = edge_scale_factor * docm2m.probability elif edge_choice == 'both': weight = docm2m.overlap_score else: weight = edge_scale_factor * docm2m.overlap_score G.add_edge(docm2m.mass2motif.name, docm2m.document.name, weight=weight) print "Third" return G
def make_intensity_graph(request, motif_id, vo_id, decomposition_id): decomposition = Decomposition.objects.get(id=decomposition_id) experiment = decomposition.experiment if not vo_id == 'nan': viz_options = VizOptions.objects.get(id=vo_id) experiment = viz_options.experiment edge_thresh = viz_options.edge_thresh edge_choice = viz_options.edge_choice elif experiment: edge_choice = get_option('default_doc_m2m_score', experiment=experiment) edge_thresh = get_option('doc_m2m_threshold', experiment=experiment) else: edge_choice = 'probability' edge_thresh = 0.05 colours = ['#404080', '#0080C0'] colours = ['red', 'blue'] data_for_json = [] motif = GlobalMotif.objects.get(id=motif_id) originalmotif = motif.originalmotif originalfeatures = Mass2MotifInstance.objects.filter( mass2motif=originalmotif, probability__gte=0.01) globalfeatures = FeatureMap.objects.filter( localfeature__in=[o.feature for o in originalfeatures]) globalfeatures = [g.globalfeature for g in globalfeatures] if edge_choice == 'probability': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, probability__gte=edge_thresh, decomposition=decomposition) elif edge_choice == 'overlap_score': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, decomposition=decomposition) elif edge_choice == 'both': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, probability__gte=edge_thresh, decomposition=decomposition) else: docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, probability__gte=edge_thresh, decomposition=decomposition) documents = [d.document for d in docm2ms] feat_total_intensity = {} feat_motif_intensity = {} for feature in globalfeatures: feat_total_intensity[feature] = 0.0 feat_motif_intensity[feature] = 0.0 for feature in globalfeatures: fi = DocumentGlobalFeature.objects.filter( document__experiment=experiment, feature=feature) for ft in fi: feat_total_intensity[feature] += ft.intensity if ft.document in documents: feat_motif_intensity[feature] += ft.intensity feat_list = [] feat_tot_intensity = zip(feat_total_intensity.keys(), feat_total_intensity.values()) feat_tot_intensity = sorted(feat_tot_intensity, key=lambda x: x[1], reverse=True) for feature, tot_intensity in feat_tot_intensity: feat_type = feature.name.split('_')[0] feat_mz = feature.name.split('_')[1] short_name = "{}_{:.4f}".format(feat_type, float(feat_mz)) feat_list.append( [short_name, feat_total_intensity[feature], colours[0]]) feat_list.append(['', feat_motif_intensity[feature], colours[1]]) feat_list.append(('', 0, '')) data_for_json.append(feat_tot_intensity[0][1]) data_for_json.append(feat_list) return data_for_json
def make_word_graph(request, motif_id, vo_id, decomposition_id): decomposition = Decomposition.objects.get(id=decomposition_id) experiment = decomposition.experiment if not vo_id == 'nan': viz_options = VizOptions.objects.get(id=vo_id) experiment = viz_options.experiment edge_thresh = viz_options.edge_thresh edge_choice = viz_options.edge_choice elif experiment: edge_choice = get_option('default_doc_m2m_score', experiment=experiment) edge_thresh = get_option('doc_m2m_threshold', experiment=experiment) else: edge_choice = 'probability' edge_thresh = 0.05 data_for_json = [] motif = GlobalMotif.objects.get(id=motif_id) originalmotif = motif.originalmotif originalfeatures = Mass2MotifInstance.objects.filter( mass2motif=originalmotif, probability__gte=0.01) globalfeatures = FeatureMap.objects.filter( localfeature__in=[o.feature for o in originalfeatures]) globalfeatures = [g.globalfeature for g in globalfeatures] if edge_choice == 'probability': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, probability__gte=edge_thresh, decomposition=decomposition) elif edge_choice == 'overlap_score': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, decomposition=decomposition) elif edge_choice == 'both': docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, overlap_score__gte=edge_thresh, probability__gte=edge_thresh, decomposition=decomposition) else: docm2ms = DocumentGlobalMass2Motif.objects.filter( mass2motif=motif, probability__gte=edge_thresh, decomposition=decomposition) data_for_json.append(len(docm2ms)) feat_counts = {} for feature in globalfeatures: feat_counts[feature] = 0 for dm2m in docm2ms: fi = DocumentGlobalFeature.objects.filter(document=dm2m.document) for ft in fi: if ft.feature in feat_counts: feat_counts[ft.feature] += 1 colours = '#404080' feat_list = [] for feature in feat_counts: feat_type = feature.name.split('_')[0] feat_mz = feature.name.split('_')[1] short_name = "{}_{:.4f}".format(feat_type, float(feat_mz)) feat_list.append([short_name, feat_counts[feature], colours]) feat_list = sorted(feat_list, key=lambda x: x[1], reverse=True) data_for_json.append(feat_list) return data_for_json
import django django.setup() from basicviz.models import Experiment,User,UserExperiment,SystemOptions from options.views import get_option exp_id_set = set() for opt in SystemOptions.objects.all(): exp_id_set.add(opt.experiment_id) ## check if something wrong with fetching doc_m2m_threshold data for exp_id in exp_id_set: if exp_id: experiment = Experiment.objects.get(id = exp_id) doc_m2m_threshold = get_option('doc_m2m_threshold', experiment=experiment) if not doc_m2m_threshold: print("!!!get_option for expreriment {} failed".format(exp_id)) break ''' Updating Rules: for one experiment: if not doc_m2m_threshold setting: 1. if no default_doc_m2m_score => DO NOTHING 2. if default_doc_m2m_score is 'probability' => DO NOTHING 3. if default_doc_m2m_score is 'overlap_score' => probability threshold to 0.0 => overlap threshold to original gloabl doc_m2m_threshold