def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('flows.xml', False), ] params = [ 'alternatives', 'categories', 'alternatives_flows', 'categories_flows', 'categories_rank', 'profiles_categories' ] d = get_input_data(input_dir, filenames, params, comparison_with='central_profiles') assignments = sortPrometheeTri(d.alternatives, d.categories, d.profiles_categories, d.alternatives_flows, d.categories_flows) xmcda_assign = assignments_to_xmcda(assignments) write_xmcda(xmcda_assign, os.path.join(output_dir, 'assignments.xml')) except Exception as err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes_profiles.xml', True), ('criteria.xml', False), ('method_parameters.xml', False), ('performance_table.xml', False), ('profiles_performance_table.xml', True), ] params = [ 'alternatives', 'categories_profiles', 'comparison_with', 'criteria', 'performances', 'pref_directions', 'profiles_performance_table', 'thresholds', ] d = get_input_data(input_dir, filenames, params) # getting the elements to compare comparables_a = d.alternatives comparables_perf_a = d.performances if d.comparison_with in ('boundary_profiles', 'central_profiles'): comparables_b = d.categories_profiles comparables_perf_b = d.profiles_performance_table else: comparables_b = d.alternatives comparables_perf_b = d.performances credibility = get_credibility(comparables_a, comparables_perf_a, comparables_b, comparables_perf_b, d.criteria, d.pref_directions, d.thresholds) # serialization etc. if d.comparison_with in ('boundary_profiles', 'central_profiles'): mcda_concept = 'alternativesProfilesComparisons' else: mcda_concept = None comparables = (comparables_a, comparables_b) xmcda = comparisons_to_xmcda(credibility, comparables, mcda_concept=mcda_concept) write_xmcda(xmcda, os.path.join(output_dir, 'credibility.xml')) create_messages_file(None, ('Everything OK.', ), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes_profiles.xml', True), ('criteria.xml', False), ('method_parameters.xml', False), ('performance_table.xml', False), ('profiles_performance_table.xml', True), ] params = [ 'alternatives', 'categories_profiles', 'comparison_with', 'criteria', 'performances', 'pref_directions', 'profiles_performance_table', 'thresholds', ] d = get_input_data(input_dir, filenames, params) # getting the elements to compare comparables_a = d.alternatives comparables_perf_a = d.performances if d.comparison_with in ('boundary_profiles', 'central_profiles'): comparables_b = d.categories_profiles comparables_perf_b = d.profiles_performance_table else: comparables_b = d.alternatives comparables_perf_b = d.performances credibility = get_credibility(comparables_a, comparables_perf_a, comparables_b, comparables_perf_b, d.criteria, d.pref_directions, d.thresholds) # serialization etc. if d.comparison_with in ('boundary_profiles', 'central_profiles'): mcda_concept = 'alternativesProfilesComparisons' else: mcda_concept = None comparables = (comparables_a, comparables_b) xmcda = comparisons_to_xmcda(credibility, comparables, mcda_concept=mcda_concept) write_xmcda(xmcda, os.path.join(output_dir, 'credibility.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes_profiles.xml', True), ('concordance.xml', False), ('counter_veto_crossed.xml', False), ('discordance.xml', False), ('method_parameters.xml', False), ] params = [ 'alternatives', 'categories_profiles', 'comparison_with', 'concordance', 'cv_crossed', 'discordance', 'only_max_discordance', 'with_denominator', ] d = get_input_data(input_dir, filenames, params, use_partials=True) # getting the elements to compare comparables_a = d.alternatives if d.comparison_with in ('boundary_profiles', 'central_profiles'): # central_profiles is a dict, so we need to get the keys comparables_b = [i for i in d.categories_profiles] else: comparables_b = d.alternatives credibility = get_credibility(comparables_a, comparables_b, d.concordance, d.discordance, d.with_denominator, d.only_max_discordance, d.cv_crossed) # serialization etc. if d.comparison_with in ('boundary_profiles', 'central_profiles'): mcda_concept = 'alternativesProfilesComparisons' else: mcda_concept = None comparables = (comparables_a, comparables_b) xmcda = comparisons_to_xmcda(credibility, comparables, mcda_concept=mcda_concept) write_xmcda(xmcda, os.path.join(output_dir, 'credibility.xml')) create_messages_file(None, ('Everything OK.', ), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes_profiles.xml', True), ('concordance.xml', False), ('counter_veto_crossed.xml', False), ('discordance.xml', False), ('method_parameters.xml', False), ] params = [ 'alternatives', 'categories_profiles', 'comparison_with', 'concordance', 'cv_crossed', 'discordance', 'only_max_discordance', 'with_denominator', ] d = get_input_data(input_dir, filenames, params, use_partials=True) # getting the elements to compare comparables_a = d.alternatives if d.comparison_with in ('boundary_profiles', 'central_profiles'): # central_profiles is a dict, so we need to get the keys comparables_b = [i for i in d.categories_profiles] else: comparables_b = d.alternatives credibility = get_credibility(comparables_a, comparables_b, d.concordance, d.discordance, d.with_denominator, d.only_max_discordance, d.cv_crossed) # serialization etc. if d.comparison_with in ('boundary_profiles', 'central_profiles'): mcda_concept = 'alternativesProfilesComparisons' else: mcda_concept = None comparables = (comparables_a, comparables_b) xmcda = comparisons_to_xmcda(credibility, comparables, mcda_concept=mcda_concept) write_xmcda(xmcda, os.path.join(output_dir, 'credibility.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def run_part2(args): # Get The input Arguments input_args = get_input_data(args) print input_args X_train, Y_train, X_test = get_vectors(input_args) output = analyseorder(input_args['sigma'], input_args['lambda'], X_train, Y_train, X_test) print_results(output, input_args['lambda'], input_args['sigma']) pass
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('method_parameters.xml', False), ('positive_flows.xml', False), ('negative_flows.xml', False), ] params = [ 'alternatives', 'categories', 'alternatives_positive_flows', 'alternatives_negative_flows', 'categories_positive_flows', 'categories_negative_flows', 'categories_rank', 'profiles_categories', 'cut_point' ] d = get_input_data(input_dir, filenames, params, comparison_with='boundary_profiles') output = sortPromsort(d.alternatives, d.categories, d.profiles_categories, d.alternatives_positive_flows, d.alternatives_negative_flows, d.categories_positive_flows, d.categories_negative_flows, d.cut_point) #print (output) #print (output[0]) assignments = output[0] first_step_assignments = output[1] xmcda_assign = assignments_to_xmcda(assignments) xmcda_first_step_assign = assignments_as_intervals_to_xmcda( first_step_assignments) write_xmcda(xmcda_assign, os.path.join(output_dir, 'assignments.xml')) write_xmcda(xmcda_first_step_assign, os.path.join(output_dir, 'first_step_assignments.xml')) except Exception as err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('outranking.xml', False), ('nonoutranking.xml', True), ('method_parameters.xml', False), ] params = [ 'alternatives', 'outranking', 'nonoutranking', 'crisp_outranking' ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives outranking = d.outranking nonoutranking = d.nonoutranking crisp_outranking = d.crisp_outranking alg = algorithm(alternativesId, outranking, nonoutranking, crisp_outranking) result = alg.Run() type = 'real' if crisp_outranking == "true": type = 'integer' xmcda = ranks_to_xmcda(result[0], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'nfs.xml')) xmcda = ranks_to_xmcda(result[1], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'strength.xml')) xmcda = ranks_to_xmcda(result[2], type, None) write_xmcda(xmcda, os.path.join(output_dir, 'weakness.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('method_parameters.xml', False), ('flows.xml', False), ] params = [ 'alternatives', 'categories', 'comparison_with', 'alternatives_flows', 'categories_flows', 'categories_rank', 'profiles_categories' ] d = get_input_data(input_dir, filenames, params) if d.comparison_with == 'boundary_profiles': assignments = sortWithBoundaryProfiles(d.alternatives, d.categories, d.profiles_categories, d.alternatives_flows, d.categories_flows) xmcda_assign = assignments_to_xmcda(assignments) elif d.comparison_with == 'central_profiles': assignments = sortWithCentralProfiles(d.alternatives, d.categories, d.profiles_categories, d.alternatives_flows, d.categories_flows) xmcda_assign = assignments_to_xmcda(assignments) else: raise InputDataError( "Wrong comparison type ('{}') specified.".format( comparison_with)) write_xmcda(xmcda_assign, os.path.join(output_dir, 'assignments.xml')) except Exception as err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('outranking.xml', False), ] params = [ 'alternatives', 'categories_profiles', 'categories_rank', 'outranking', ] d = get_input_data(input_dir, filenames, params, comparison_with='boundary_profiles') assignments = assign_class(d.alternatives, d.categories_rank, d.categories_profiles, d.outranking) # uncomment this if you want output combined as a single file (and # remember to import assignments_as_intervals_to_xmcda): # xmcda_intervals = assignments_as_intervals_to_xmcda(assignments) # write_xmcda(xmcda_intervals, # os.path.join(output_dir, 'assignments_intervals.xml')) assignments_con = {i[0]: i[1][0] for i in assignments.iteritems()} xmcda_con = assignments_to_xmcda(assignments_con) write_xmcda(xmcda_con, os.path.join(output_dir, 'assignments_conjuctive.xml')) assignments_dis = {i[0]: i[1][1] for i in assignments.iteritems()} xmcda_dis = assignments_to_xmcda(assignments_dis) write_xmcda(xmcda_dis, os.path.join(output_dir, 'assignments_disjunctive.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('downwards.xml', False), ('upwards.xml', False), ] params = [ 'alternatives', 'downwards', 'upwards', ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives downwards = d.downwards upwards = d.upwards alg = algorithm(alternativesId, downwards, upwards) result = alg.Run() comparables = (alternativesId, alternativesId) #xmcda = comparisons_to_xmcda(result[0], comparables) xmcda = outranking_to_xmcda(result[0]) write_xmcda(xmcda, os.path.join(output_dir, 'intersection.xml')) xmcda = ranks_to_xmcda(result[1], 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'rank.xml')) xmcda = ranks_to_xmcda(result[2], 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'median.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('outranking.xml', False), ('preorder.xml', True), ('method_parameters.xml', True), ] params = [ 'alternatives', 'outranking', 'preorder', 'direction', ] d = get_input_data(input_dir, filenames, params) alternatives = d.alternatives preorder = None if (d.preorder is not None): preorder = d.preorder outranking = d.outranking direction = d.direction alg = algorithm(alternatives, outranking, preorder, direction) result = alg.Run() xmcda = ranks_to_xmcda(result, 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'ranking.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def run_part1(sysargs): # Get The input Arguments input_args = get_input_data(sysargs) print input_args X_train, Y_train, X_test = get_vectors(input_args) [n, d] = X_train.shape print("X_train shape:%s" % str(X_train.shape)) print("Y_train shape:%s" % str(Y_train.shape)) print("X_test shape:%s" % str(X_test.shape)) # We compute the Ridge Regression estimate using the training set and the Lambda input w_rr = compute_wrr(X_train, Y_train, input_args['lambda']) print "w_rr computed%s" % str(w_rr) print "w_rr shape is %s" % str(w_rr.reshape(1, d).shape) print "w_rr computed%s" % str(w_rr) # We output the results in the file 'wRR_{lbd}.csv' with each element of the vector wrr in a row output_results(w_rr, input_args['lambda']) pass
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('alternatives.xml', False), ('credibility.xml', False), ('method_parameters.xml', True), ] params = [ 'alternatives', 'credibility', 'direction', 'alpha', 'beta' ] d = get_input_data(input_dir, filenames, params) alternativesId = d.alternatives credibility = d.credibility direction = d.direction alpha = d.alpha beta = d.beta alg = algorithm(alternativesId, credibility, direction, alpha, beta) result = alg.Run() xmcda = ranks_to_xmcda(result, 'integer', None) write_xmcda(xmcda, os.path.join(output_dir, 'ranking.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('credibility.xml', False), ('outranking.xml', False), ] params = [ 'alternatives', 'categories_profiles', 'categories_rank', 'credibility', 'outranking', ] d = get_input_data(input_dir, filenames, params, comparison_with='central_profiles') assignments = assign_class(d.alternatives, d.categories_rank, d.categories_profiles, d.outranking, d.credibility) # serialization etc. xmcda_assign = assignments_as_intervals_to_xmcda(assignments) write_xmcda(xmcda_assign, os.path.join(output_dir, 'assignments.xml')) create_messages_file(None, ('Everything OK.', ), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('classes.xml', False), ('classes_profiles.xml', False), ('credibility.xml', False), ('outranking.xml', False), ] params = [ 'alternatives', 'categories_profiles', 'categories_rank', 'credibility', 'outranking', ] d = get_input_data(input_dir, filenames, params, comparison_with='central_profiles') assignments = assign_class(d.alternatives, d.categories_rank, d.categories_profiles, d.outranking, d.credibility) # serialization etc. xmcda_assign = assignments_as_intervals_to_xmcda(assignments) write_xmcda(xmcda_assign, os.path.join(output_dir, 'assignments.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ # every tuple below == (filename, is_optional) ('alternatives.xml', False), ('credibility.xml', True), ('method_parameters.xml', False), ('outranking.xml', False), ] params = [ 'alternatives', 'credibility', 'eliminate_cycles_method', 'outranking', ] d = get_input_data(input_dir, filenames, params) graph = build_graph(d.alternatives, d.outranking, d.credibility) # because of the 'eliminate_cycles' routine used by 'find_kernel, # a graph is returned with the kernel which allows for further # examination / debugging kernel, graph_without_cycles = find_kernel(graph, d.eliminate_cycles_method) kernel_as_labels = get_kernel_as_labels(kernel, graph_without_cycles) xmcda = kernel_to_xmcda(kernel_as_labels) write_xmcda(xmcda, os.path.join(output_dir, 'kernel.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1
def main(): try: args = docopt(__doc__, version=__version__) output_dir = None input_dir, output_dir = get_dirs(args) filenames = [ ('method_parameters.xml', False) ] params = [ 'comparison_with' ] d = get_input_data(input_dir, filenames, params) comparison_with = d.comparison_with profilesId = None if comparison_with == "profiles": filenames = [ ('alternatives.xml', False), ('profiles.xml', False), ('criteria.xml', False), ('weights.xml', False), ('discordance.xml', False), ] params = [ 'alternatives', 'profiles', 'criteria', 'weights', 'discordance', ] kwargs = {'use_partials': True, 'comparison_with': comparison_with} d = get_input_data(input_dir, filenames, params, **kwargs) profilesId = d.profiles alternativesId = d.alternatives criteriaId = d.criteria weights = d.weights discordance = d.discordance else: filenames = [ ('alternatives.xml', False), ('criteria.xml', False), ('weights.xml', False), ('discordance.xml', False), ] params = [ 'alternatives', 'criteria', 'weights', 'discordance', ] kwargs = {'use_partials': True} d = get_input_data(input_dir, filenames, params, **kwargs) alternativesId = d.alternatives criteriaId = d.criteria weights = d.weights discordance = d.discordance alg = algorithm(alternativesId, profilesId, criteriaId, weights, discordance) result = alg.Run() if profilesId == None: comparables = (alternativesId, alternativesId) xmcda = comparisons_to_xmcda(result, comparables, None) write_xmcda(xmcda, os.path.join(output_dir, 'discordance.xml')) create_messages_file(None, ('Everything OK.',), output_dir) else: comparables = (alternativesId, profilesId) xmcda = comparisons_to_xmcda(result, comparables, None, with_profile = True) write_xmcda(xmcda, os.path.join(output_dir, 'discordance.xml')) create_messages_file(None, ('Everything OK.',), output_dir) return 0 except Exception, err: err_msg = get_error_message(err) log_msg = traceback.format_exc() print(log_msg.strip()) create_messages_file((err_msg, ), (log_msg, ), output_dir) return 1