def get_input_data(input_dir): file_names = ( 'alternatives.xml', 'categories.xml', 'categoriesProfiles.xml', 'credibility.xml', 'method_parameters.xml', ) trees = get_trees(input_dir, file_names) alternatives = px.getAlternativesID(trees['alternatives']) categories = px.getCategoriesID(trees['categories']) categories_rank = px.getCategoriesRank(trees['categories'], categories) categories_profiles = get_categories_profiles_central(trees['categoriesProfiles']) credibility = getAlternativesComparisons(trees['credibility'], alternatives, categories_profiles) cut_threshold = px.getParameterByName(trees['method_parameters'], 'cut_threshold') check_cut_threshold(cut_threshold) ret = { 'alternatives': alternatives, 'categories_rank': categories_rank, 'categories_profiles': categories_profiles, 'credibility': credibility, 'cut_threshold': cut_threshold, } return ret
def _sort_profiles (category_profiles, categories_names, rank_tree): sortedCategories = {} categories_rank = px.getCategoriesRank(rank_tree, categories_names) for category in categories_rank: sortedCategories[categories_rank[category]] = {} sortedCategories[categories_rank[category]]["classes"] = category_profiles[category] sortedCategories[categories_rank[category]]["id"] = category return sortedCategories
def parse_xmcda_files(in_dir): xml_crit = PyXMCDA.parseValidate(in_dir+"/criteria.xml") xml_alt = PyXMCDA.parseValidate(in_dir+"/alternatives.xml") xml_pt = PyXMCDA.parseValidate(in_dir+"/perfs_table.xml") xml_assign = PyXMCDA.parseValidate(in_dir+"/assign.xml") xml_cat = PyXMCDA.parseValidate(in_dir+"/categories.xml") if xml_crit == None: error_list.append("Invalid criteria file") return if xml_alt == None: error_list.append("Invalid alternative file") return if xml_pt == None: error_list.append("Invalid performance table file") return if xml_assign == None: error_list.append("Invalid assignment file") return if xml_cat == None: error_list.append("Invalid categories file") return try: alt_id = PyXMCDA.getAlternativesID(xml_alt) crit_id = PyXMCDA.getCriteriaID(xml_crit) pt = PyXMCDA.getPerformanceTable(xml_pt, alt_id, crit_id) cat_id = PyXMCDA.getCategoriesID(xml_cat) cat_rank = PyXMCDA.getCategoriesRank(xml_cat, cat_id) assign = PyXMCDA.getAlternativesAffectations(xml_assign) pref_dir = PyXMCDA.getCriteriaPreferenceDirections(xml_crit, crit_id) except: error_list.append("Failed to parse one or more file") return return (alt_id, crit_id, pt, cat_id, cat_rank, assign, pref_dir)
def main(argv=None): if argv is None: argv = sys.argv parser = OptionParser() parser.add_option("-i", "--in", dest="in_dir") parser.add_option("-o", "--out", dest="out_dir") (options, args) = parser.parse_args(argv[1:]) in_dir = options.in_dir out_dir = options.out_dir # Creating lists for error and log messages errorList = [] logList = [] # If some mandatory input files are missing if not os.path.isfile (in_dir+"/alternatives.xml") or not os.path.isfile (in_dir+"/categories.xml") or not os.path.isfile (in_dir+"/categoriesProfiles.xml") or not os.path.isfile (in_dir+"/stabilityRelation.xml"): errorList.append("Some input files are missing") else : if os.path.isfile (in_dir+"/sortingMode.xml") : xmltree_mode = PyXMCDA.parseValidate(in_dir+"/sortingMode.xml") if xmltree_mode == None : errorList.append ("sortingMode file cannot be validated.") else : mode = PyXMCDA.getParameterByName (xmltree_mode, "sortingMode") if not (mode == "pessimistic" or mode == "optimistic"): errorList.append ("Value of parameter sortingMode should be 'pessimistic' or 'optimistic'.") xmltree_alternatives = PyXMCDA.parseValidate(in_dir+"/alternatives.xml") xmltree_categories = PyXMCDA.parseValidate(in_dir+"/categories.xml") xmltree_profiles = PyXMCDA.parseValidate(in_dir+"/categoriesProfiles.xml") xmltree_altStability = PyXMCDA.parseValidate(in_dir+"/stabilityRelation.xml") if xmltree_alternatives == None : errorList.append("The alternatives file cannot be validated.") if xmltree_categories == None : errorList.append("The categories file cannot be validated.") if xmltree_profiles == None : errorList.append("The categoriesProfiles file cannot be validated.") if xmltree_altStability == None : errorList.append("The alternatives comparisons file cannot be validated.") if not errorList : alternativesId = PyXMCDA.getAlternativesID(xmltree_alternatives, "ACTIVEREAL") allalt = PyXMCDA.getAlternativesID(xmltree_alternatives, "ACTIVE") categoriesId = PyXMCDA.getCategoriesID(xmltree_categories) categoriesRank = PyXMCDA.getCategoriesRank(xmltree_categories, categoriesId) altStability = PyXMCDA.getAlternativesComparisons (xmltree_altStability, allalt) if not alternativesId: errorList.append("No alternatives found.") if not categoriesId: errorList.append("No categories found.") if not altStability : errorList.append("No alternatives comparisons found.") if not errorList : catPro = PyXMCDA.getCategoriesProfiles(xmltree_profiles, categoriesId) proCat = PyXMCDA.getProfilesCategories(xmltree_profiles, categoriesId) profilesId = proCat.keys() # On retourne la liste pour trier selon les rangs rankCategories = {} for i, j in categoriesRank.iteritems(): rankCategories[j] = i ranks = rankCategories.keys()[:] ranks.sort() lowestRank = ranks.pop() # Un tableau pour conserver les affectations affectations = {} if mode == "pessimistic": # Electre tri pessimistic rule for alt in alternativesId: affectations[alt] = [] for rank in ranks: profile = catPro[rankCategories[rank]]["lower"] if altStability[alt][profile] >= -1 and altStability[alt][profile] <= 1: # Surclassement instable, on ajoute les categories sup et inf if affectations[alt].count(proCat[profile]["lower"]) == 0: affectations[alt].append(proCat[profile]["lower"]) if affectations[alt].count(proCat[profile]["upper"]) == 0: affectations[alt].append(proCat[profile]["upper"]) if altStability[alt][profile] > 1: # Surclassement stable, on ajoute que sup et on arrete if affectations[alt].count(proCat[profile]["upper"]) == 0: affectations[alt].append(proCat[profile]["upper"]) break if affectations[alt] == []: # Tous les surc stables et negatifs, on force la categorie la plus basse affectations[alt] = [rankCategories[lowestRank]] else: errorList.append("Optimistic rule is not taken into account yet") if not errorList : # Creating alternativesAffectations file fileAffectations = open(out_dir+"/alternativesAffectations.xml",'w') PyXMCDA.writeHeader(fileAffectations) # We write some information about the generated file fileAffectations.write ("\t<projectReference>\n\t\t<title>Stable alternatives affectation</title>\n\t\t<version>"+VERSION+"</version>\n\t\t<author>ws_PyXMCDA suite (TV)</author>\n\t</projectReference>\n\n") fileAffectations.write("\t<alternativesAffectations>\n") for alt in alternativesId: fileAffectations.write("\t\t<alternativeAffectation>\n\t\t\t<alternativeID>"+alt+"</alternativeID>\n\t\t\t<categoriesSet>\n") for cat in affectations[alt]: fileAffectations.write("\t\t\t\t<element><categoryID>"+cat+"</categoryID></element>\n") fileAffectations.write("\t\t\t</categoriesSet>\n\t\t</alternativeAffectation>\n") fileAffectations.write("\t</alternativesAffectations>\n") PyXMCDA.writeFooter(fileAffectations) fileAffectations.close() # Creating log and error file, messages.xml fileMessages = open(out_dir+"/messages.xml", 'w') PyXMCDA.writeHeader (fileMessages) if not errorList : logList.append("Execution ok") PyXMCDA.writeLogMessages (fileMessages, logList) else : PyXMCDA.writeErrorMessages (fileMessages, errorList) PyXMCDA.writeFooter(fileMessages) fileMessages.close()
def get_input_data(input_dir, filenames, params, **kwargs): trees = _get_trees(input_dir, filenames) d = _create_data_object(params) for p in params: if p == 'alternatives': d.alternatives = px.getAlternativesID(trees['alternatives']) elif p == 'categories_profiles': comparison_with = kwargs.get('comparison_with') if comparison_with is None: comparison_with = px.getParameterByName( trees['method_parameters'], 'comparison_with') d.categories_profiles = _get_categories_profiles( trees.get('categories_profiles'), comparison_with) elif p == 'categories_rank': categories = px.getCategoriesID(trees['categories']) d.categories_rank = px.getCategoriesRank(trees['categories'], categories) elif p == 'comparison_with': d.comparison_with = px.getParameterByName( trees['method_parameters'], 'comparison_with') elif p == 'concordance': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles( trees['categories_profiles'], comparison_with) d.concordance = _get_alternatives_comparisons( trees['concordance'], alternatives, categories_profiles) else: d.concordance = px.getAlternativesComparisons( trees['concordance'], alternatives) elif p == 'credibility': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = kwargs.get('comparison_with') if not comparison_with: comparison_with = px.getParameterByName( trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles( trees['categories_profiles'], comparison_with) else: categories_profiles = None eliminate_cycles_method = px.getParameterByName( trees.get('method_parameters'), 'eliminate_cycles_method') tree = trees.get('credibility') if eliminate_cycles_method == 'cut_weakest' and tree is None: raise RuntimeError( "'cut_weakest' option requires credibility as " "an additional input (apart from outranking).") d.credibility = _get_alternatives_comparisons( tree, alternatives, categories_profiles=categories_profiles) elif p == 'criteria': d.criteria = px.getCriteriaID(trees['criteria']) elif p == 'cut_threshold': cut_threshold = px.getParameterByName(trees['method_parameters'], 'cut_threshold') if cut_threshold is None or not (0 <= float(cut_threshold) <= 1): raise RuntimeError( "'cut_threshold' should be in range [0, 1] " "(most commonly used values are 0.6 or 0.7).") d.cut_threshold = cut_threshold # 'cv_crossed' == 'counter-veto crossed' elif p == 'cv_crossed': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles( trees['categories_profiles'], comparison_with) else: categories_profiles = None d.cv_crossed = _get_alternatives_comparisons( trees['counter_veto_crossed'], alternatives, categories_profiles=categories_profiles, use_partials=True, mcda_concept='counterVetoCrossed') elif p == 'discordance': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if kwargs.get('use_partials') is not None: use_partials = kwargs.get('use_partials') else: parameter = px.getParameterByName(trees['method_parameters'], 'use_partials') use_partials = True if parameter == 'true' else False if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles( trees['categories_profiles'], comparison_with) else: categories_profiles = None d.discordance = _get_alternatives_comparisons( trees['discordance'], alternatives, categories_profiles=categories_profiles, use_partials=use_partials) elif p == 'eliminate_cycles_method': d.eliminate_cycles_method = px.getParameterByName( trees['method_parameters'], 'eliminate_cycles_method') elif p == 'interactions': criteria = px.getCriteriaID(trees['criteria']) d.interactions = _get_criteria_interactions( trees['interactions'], criteria) elif p == 'outranking': d.outranking = _get_outranking_crisp(trees['outranking']) elif p == 'performances': d.performances = px.getPerformanceTable(trees['performance_table'], None, None) elif p == 'pref_directions': criteria = px.getCriteriaID(trees['criteria']) d.pref_directions = px.getCriteriaPreferenceDirections( trees['criteria'], criteria) elif p == 'profiles_performance_table': if comparison_with in ('boundary_profiles', 'central_profiles'): tree = trees.get('profiles_performance_table') if tree is None: msg = ( "Missing profiles performance table (did you forget " "to provide 'profiles_performance_table.xml' file?).") raise RuntimeError(msg) d.profiles_performance_table = px.getPerformanceTable( tree, None, None) else: d.profiles_performance_table = None elif p == 'reinforcement_factors': criteria = px.getCriteriaID(trees['criteria']) factors = {} for c in criteria: rf = px.getCriterionValue(trees['reinforcement_factors'], c, 'reinforcement_factors') if len(rf) == 0: continue if rf.get(c) <= 1: msg = ("Reinforcement factor for criterion '{}' should be " "higher than 1.0 (ideally between 1.2 and 1.5).") raise RuntimeError(msg) factors.update(rf) d.reinforcement_factors = factors elif p == 'thresholds': criteria = px.getCriteriaID(trees['criteria']) d.thresholds = _get_thresholds(trees['criteria']) elif p == 'weights': criteria = px.getCriteriaID(trees['criteria']) d.weights = px.getCriterionValue(trees['weights'], criteria) elif p == 'z_function': d.z_function = px.getParameterByName(trees['method_parameters'], 'z_function') elif p == 'with_denominator': parameter = px.getParameterByName(trees['method_parameters'], 'with_denominator') d.with_denominator = True if parameter == 'true' else False elif p == 'only_max_discordance': parameter = px.getParameterByName(trees['method_parameters'], 'only_max_discordance') d.only_max_discordance = True if parameter == 'true' else False elif p == 'use_partials': parameter = px.getParameterByName(trees['method_parameters'], 'use_partials') d.use_partials = True if parameter == 'true' else False elif p == 'use_pre_veto': parameter = px.getParameterByName(trees['method_parameters'], 'use_pre_veto') d.use_pre_veto = True if parameter == 'true' else False else: raise RuntimeError("Unknown parameter '{}' specified.".format(p)) for param in params: data = getattr(d, param) if type(data) in (type(list), type(dict)) and len(data) == 0: raise RuntimeError( "No content for '{}' parameter provided.".format(param)) return d
def get_categories_rank(*args, **kwargs): categories = px.getCategoriesID(trees['categories']) categories_rank = px.getCategoriesRank(trees['categories'], categories) return categories_rank # dict
def get_input_data(input_dir, filenames, params, **kwargs): trees = _get_trees(input_dir, filenames) d = _create_data_object(params) for p in params: if p == 'alternatives': d.alternatives = px.getAlternativesID(trees['alternatives']) elif p == 'categories_profiles': comparison_with = kwargs.get('comparison_with') if comparison_with is None: comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') d.categories_profiles = _get_categories_profiles(trees.get('categories_profiles'), comparison_with) elif p == 'categories_rank': categories = px.getCategoriesID(trees['categories']) d.categories_rank = px.getCategoriesRank(trees['categories'], categories) elif p == 'comparison_with': d.comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') elif p == 'concordance': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles(trees['categories_profiles'], comparison_with) d.concordance = _get_alternatives_comparisons(trees['concordance'], alternatives, categories_profiles) else: d.concordance = px.getAlternativesComparisons(trees['concordance'], alternatives) elif p == 'credibility': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = kwargs.get('comparison_with') if not comparison_with: comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles(trees['categories_profiles'], comparison_with) else: categories_profiles = None eliminate_cycles_method = px.getParameterByName(trees.get('method_parameters'), 'eliminate_cycles_method') tree = trees.get('credibility') if eliminate_cycles_method == 'cut_weakest' and tree is None: raise RuntimeError("'cut_weakest' option requires credibility as " "an additional input (apart from outranking).") d.credibility = _get_alternatives_comparisons(tree, alternatives, categories_profiles=categories_profiles) elif p == 'criteria': d.criteria = px.getCriteriaID(trees['criteria']) elif p == 'cut_threshold': cut_threshold = px.getParameterByName(trees['method_parameters'], 'cut_threshold') if cut_threshold is None or not (0 <= float(cut_threshold) <= 1): raise RuntimeError( "'cut_threshold' should be in range [0, 1] " "(most commonly used values are 0.6 or 0.7)." ) d.cut_threshold = cut_threshold # 'cv_crossed' == 'counter-veto crossed' elif p == 'cv_crossed': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles(trees['categories_profiles'], comparison_with) else: categories_profiles = None d.cv_crossed = _get_alternatives_comparisons(trees['counter_veto_crossed'], alternatives, categories_profiles=categories_profiles, use_partials=True, mcda_concept='counterVetoCrossed') elif p == 'discordance': alternatives = px.getAlternativesID(trees['alternatives']) comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with') if kwargs.get('use_partials') is not None: use_partials = kwargs.get('use_partials') else: parameter = px.getParameterByName(trees['method_parameters'], 'use_partials') use_partials = True if parameter == 'true' else False if comparison_with in ('boundary_profiles', 'central_profiles'): categories_profiles = _get_categories_profiles(trees['categories_profiles'], comparison_with) else: categories_profiles = None d.discordance = _get_alternatives_comparisons(trees['discordance'], alternatives, categories_profiles=categories_profiles, use_partials=use_partials) elif p == 'eliminate_cycles_method': d.eliminate_cycles_method = px.getParameterByName(trees['method_parameters'], 'eliminate_cycles_method') elif p == 'interactions': criteria = px.getCriteriaID(trees['criteria']) d.interactions = _get_criteria_interactions(trees['interactions'], criteria) elif p == 'outranking': d.outranking = _get_outranking_crisp(trees['outranking']) elif p == 'performances': d.performances = px.getPerformanceTable(trees['performance_table'], None, None) elif p == 'pref_directions': criteria = px.getCriteriaID(trees['criteria']) d.pref_directions = px.getCriteriaPreferenceDirections(trees['criteria'], criteria) elif p == 'profiles_performance_table': if comparison_with in ('boundary_profiles', 'central_profiles'): tree = trees.get('profiles_performance_table') if tree is None: msg = ("Missing profiles performance table (did you forget " "to provide 'profiles_performance_table.xml' file?).") raise RuntimeError(msg) d.profiles_performance_table = px.getPerformanceTable(tree, None, None) else: d.profiles_performance_table = None elif p == 'reinforcement_factors': criteria = px.getCriteriaID(trees['criteria']) factors = {} for c in criteria: rf = px.getCriterionValue(trees['reinforcement_factors'], c, 'reinforcement_factors') if len(rf) == 0: continue if rf.get(c) <= 1: msg = ("Reinforcement factor for criterion '{}' should be " "higher than 1.0 (ideally between 1.2 and 1.5).") raise RuntimeError(msg) factors.update(rf) d.reinforcement_factors = factors elif p == 'thresholds': criteria = px.getCriteriaID(trees['criteria']) d.thresholds = _get_thresholds(trees['criteria']) elif p == 'weights': criteria = px.getCriteriaID(trees['criteria']) d.weights = px.getCriterionValue(trees['weights'], criteria) elif p == 'z_function': d.z_function = px.getParameterByName(trees['method_parameters'], 'z_function') elif p == 'with_denominator': parameter = px.getParameterByName(trees['method_parameters'], 'with_denominator') d.with_denominator = True if parameter == 'true' else False elif p == 'only_max_discordance': parameter = px.getParameterByName(trees['method_parameters'], 'only_max_discordance') d.only_max_discordance = True if parameter == 'true' else False elif p == 'use_partials': parameter = px.getParameterByName(trees['method_parameters'], 'use_partials') d.use_partials = True if parameter == 'true' else False elif p == 'use_pre_veto': parameter = px.getParameterByName(trees['method_parameters'], 'use_pre_veto') d.use_pre_veto = True if parameter == 'true' else False else: raise RuntimeError("Unknown parameter '{}' specified.".format(p)) for param in params: data = getattr(d, param) if type(data) in (type(list), type(dict)) and len(data) == 0: raise RuntimeError("No content for '{}' parameter provided." .format(param)) return d
def get_input_data(input_dir, filenames, params, **kwargs): trees = _get_trees(input_dir, filenames) d = _create_data_object(params) for p in params: if p == "alternatives": d.alternatives = px.getAlternativesID(trees["alternatives"]) elif p == "profiles": d.profiles = px.getProfilesID(trees["profiles"]) elif p == "categories_profiles": comparison_with = kwargs.get("comparison_with") if comparison_with is None: comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") d.categories_profiles = _get_categories_profiles(trees.get("categories_profiles"), comparison_with) elif p == "categories_rank": categories = px.getCategoriesID(trees["categories"]) d.categories_rank = px.getCategoriesRank(trees["categories"], categories) elif p == "comparison_with": d.comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") elif p == "concordance": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = kwargs.get("comparison_with") if trees.has_key("methos_parameters"): comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if kwargs.get("use_partials") is not None: use_partials = kwargs.get("use_partials") else: if trees.has_key("methos_parameters"): parameter = px.getParameterByName(trees["method_parameters"], "use_partials") use_partials = True if parameter == "true" else False categories_profiles = None profiles = None if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) if comparison_with == "profiles": profiles = px.getProfilesID(trees["profiles"]) d.concordance = _get_alternatives_comparisons( trees["concordance"], alternatives, profiles=profiles, categories_profiles=categories_profiles, use_partials=use_partials, ) elif p == "crisp_concordance": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = kwargs.get("comparison_with") if trees.has_key("methos_parameters"): comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if kwargs.get("use_partials") is not None: use_partials = kwargs.get("use_partials") else: if trees.has_key("methos_parameters"): parameter = px.getParameterByName(trees["method_parameters"], "use_partials") use_partials = True if parameter == "true" else False categories_profiles = None profiles = None if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) if comparison_with == "profiles": profiles = px.getProfilesID(trees["profiles"]) d.concordance = _get_alternatives_comparisons( trees["concordance"], alternatives, profiles=profiles, categories_profiles=categories_profiles, use_partials=use_partials, use_value=False, ) elif p == "credibility": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = kwargs.get("comparison_with") if not comparison_with: comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) else: categories_profiles = None eliminate_cycles_method = px.getParameterByName(trees.get("method_parameters"), "eliminate_cycles_method") tree = trees.get("credibility") if eliminate_cycles_method == "cut_weakest" and tree is None: raise RuntimeError( "'cut_weakest' option requires credibility as " "an additional input (apart from outranking)." ) d.credibility = _get_alternatives_comparisons(tree, alternatives, categories_profiles=categories_profiles) elif p == "criteria": if trees.has_key("criteria"): d.criteria = px.getCriteriaID(trees["criteria"]) elif p == "cut_threshold": cut_threshold = px.getParameterByName(trees["method_parameters"], "cut_threshold") if cut_threshold is None or not (0 <= float(cut_threshold) <= 1): raise RuntimeError( "'cut_threshold' should be in range [0, 1] " "(most commonly used values are 0.6 or 0.7)." ) d.cut_threshold = cut_threshold # 'cv_crossed' == 'counter-veto crossed' elif p == "cv_crossed": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) else: categories_profiles = None d.cv_crossed = _get_alternatives_comparisons( trees["counter_veto_crossed"], alternatives, categories_profiles=categories_profiles, use_partials=True, mcda_concept="counterVetoCrossed", ) elif p == "discordance": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = kwargs.get("comparison_with") if trees.has_key("methos_parameters"): comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if kwargs.get("use_partials") is not None: use_partials = kwargs.get("use_partials") else: if trees.has_key("methos_parameters"): parameter = px.getParameterByName(trees["method_parameters"], "use_partials") use_partials = True if parameter == "true" else False categories_profiles = None profiles = None if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) if comparison_with == "profiles": profiles = px.getProfilesID(trees["profiles"]) d.discordance = _get_alternatives_comparisons( trees["discordance"], alternatives, profiles=profiles, categories_profiles=categories_profiles, use_partials=use_partials, ) elif p == "crisp_discordance": alternatives = px.getAlternativesID(trees["alternatives"]) comparison_with = kwargs.get("comparison_with") if trees.has_key("methos_parameters"): comparison_with = px.getParameterByName(trees["method_parameters"], "comparison_with") if kwargs.get("use_partials") is not None: use_partials = kwargs.get("use_partials") else: if trees.has_key("methos_parameters"): parameter = px.getParameterByName(trees["method_parameters"], "use_partials") use_partials = True if parameter == "true" else False categories_profiles = None profiles = None if comparison_with in ("boundary_profiles", "central_profiles"): categories_profiles = _get_categories_profiles(trees["categories_profiles"], comparison_with) if comparison_with == "profiles": profiles = px.getProfilesID(trees["profiles"]) d.discordance = _get_alternatives_comparisons( trees["discordance"], alternatives, profiles=profiles, categories_profiles=categories_profiles, use_partials=use_partials, use_value=False, ) elif p == "preorder": if trees.has_key("preorder"): alternatives = px.getAlternativesID(trees["alternatives"]) d.preorder = px.getAlternativeValue(trees["preorder"], alternatives, None) elif p == "downwards": alternatives = px.getAlternativesID(trees["alternatives"]) d.downwards = px.getAlternativeValue(trees["downwards"], alternatives, None) elif p == "upwards": alternatives = px.getAlternativesID(trees["alternatives"]) d.upwards = px.getAlternativeValue(trees["upwards"], alternatives, None) elif p == "eliminate_cycles_method": d.eliminate_cycles_method = px.getParameterByName(trees["method_parameters"], "eliminate_cycles_method") elif p == "interactions": criteria = px.getCriteriaID(trees["criteria"]) d.interactions = _get_criteria_interactions(trees["interactions"], criteria) elif p == "outranking": alternatives = px.getAlternativesID(trees["alternatives"]) outranking = _get_intersection_distillation(trees["outranking"], alternatives) if outranking == None: outranking = px.getAlternativesComparisons(trees["outranking"], alternatives) if outranking == {}: outranking = _get_outranking(trees["outranking"]) d.outranking = outranking elif p == "nonoutranking": if trees.has_key("nonoutranking"): alternatives = px.getAlternativesID(trees["alternatives"]) nonoutranking = _get_intersection_distillation(trees["nonoutranking"], alternatives) if nonoutranking == None: nonoutranking = px.getAlternativesComparisons(trees["nonoutranking"], alternatives) if nonoutranking == {}: nonoutranking = _get_outranking(trees["nonoutranking"]) d.nonoutranking = nonoutranking elif p == "performances": d.performances = px.getPerformanceTable(trees["performance_table"], None, None) elif p == "pref_directions": criteria = px.getCriteriaID(trees["criteria"]) d.pref_directions = px.getCriteriaPreferenceDirections(trees["criteria"], criteria) elif p == "profiles_performance_table": if comparison_with in ("boundary_profiles", "central_profiles"): tree = trees.get("profiles_performance_table") if tree is None: msg = ( "Missing profiles performance table (did you forget " "to provide 'profiles_performance_table.xml' file?)." ) raise RuntimeError(msg) d.profiles_performance_table = px.getPerformanceTable(tree, None, None) else: d.profiles_performance_table = None elif p == "reinforcement_factors": criteria = px.getCriteriaID(trees["criteria"]) factors = {} for c in criteria: rf = px.getCriterionValue(trees["reinforcement_factors"], c, "reinforcement_factors") if len(rf) == 0: continue if rf.get(c) <= 1: msg = ( "Reinforcement factor for criterion '{}' should be " "higher than 1.0 (ideally between 1.2 and 1.5)." ) raise RuntimeError(msg) factors.update(rf) d.reinforcement_factors = factors elif p == "thresholds": criteria = px.getCriteriaID(trees["criteria"]) d.thresholds = _get_thresholds(trees["criteria"]) elif p == "weights": criteria = px.getCriteriaID(trees["criteria"]) d.weights = px.getCriterionValue(trees["weights"], criteria) elif p == "z_function": d.z_function = px.getParameterByName(trees["method_parameters"], "z_function") elif p == "with_denominator": parameter = px.getParameterByName(trees["method_parameters"], "with_denominator") d.with_denominator = True if parameter == "true" else False elif p == "use_partials": parameter = px.getParameterByName(trees["method_parameters"], "use_partials") d.use_partials = True if parameter == "true" else False elif p == "use_pre_veto": parameter = px.getParameterByName(trees["method_parameters"], "use_pre_veto") d.use_pre_veto = True if parameter == "true" else False elif p == "alpha": d.alpha = px.getParameterByName(trees["method_parameters"], "alpha") elif p == "beta": d.beta = px.getParameterByName(trees["method_parameters"], "beta") elif p == "s1": d.s1 = px.getParameterByName(trees["method_parameters"], "s1") elif p == "s2": d.s2 = px.getParameterByName(trees["method_parameters"], "s2") elif p == "crisp_outranking": d.crisp_outranking = px.getParameterByName(trees["method_parameters"], "crisp_outranking") elif p == "direction": d.direction = px.getParameterByName(trees["method_parameters"], "direction") elif p == "conc_threshold": d.conc_threshold = px.getParameterByName(trees["method_parameters"], "conc_threshold") elif p == "disc_threshold": d.disc_threshold = px.getParameterByName(trees["method_parameters"], "disc_threshold") elif p == "comprehensive": d.comprehensive = px.getParameterByName(trees["method_parameters"], "comprehensive") else: raise RuntimeError("Unknown parameter '{}' specified.".format(p)) return d