def main(argv=None): if argv is None: argv = sys.argv # Parse command line (nalternatives, ncriteria, nprofiles, nlearning, seed, error) = parse_cmdline(argv) print "Input parameters" print "================" print "Nalternatives:", nalternatives print "Ncriteria:", ncriteria print "Nprofiles:", nprofiles print "Nlearning:", nlearning print "Seed:", seed # Create a model (alternatives, criteria, palternatives) = create_model(nalternatives, ncriteria, nprofiles) (pt, profiles, weights, lbda) = generate_random_data(seed, alternatives, criteria, palternatives) model = etri.electre_tri(pt, profiles, weights, lbda) affectations = model.pessimist() # Add errors in learning alternatives learning_alts = [ "a%d" % (i+1) for i in range(nlearning) ] add_errors_in_learning_alts(affectations, learning_alts, nprofiles, error) profs = [] for profile in profiles: profs.append(profile['refs']) # Infer ELECTRE Tri parameters (iweights, iprofiles, ilbda, icompat, info) = etri_infer_parameters(learning_alts, criteria, pt, affectations, nprofiles, "models/etri_bm_weights_compat.mod", profiles=profs) # Apply ELECTRE Tri model with infered parameters modeli = etri.electre_tri(pt, iprofiles, iweights, ilbda) iaffectations = modeli.pessimist() # Print result print "Output" print "======" print "Time used:", info[0] print "Memory used:", info[1] debug.print_lambda(lbda, ilbda) debug.print_weights(weights, criteria, iweights) debug.print_profiles(profiles, criteria, iprofiles) debug.print_performance_table_with_assignements(pt, alternatives, criteria, affectations, iaffectations, icompat)
prof1 = { 'refs': b1, 'q': q, 'p': p, 'v': v } prof2 = { 'refs': b2, 'q': q, 'p': p, 'v': v } prof3 = { 'refs': b3, 'q': q, 'p': p, 'v': v } profiles = [ prof1, prof2, prof3 ] # Categories categories = [(i+1) for i in range(len(profiles)+1)] categories_rank = {} for i, cat in enumerate(categories): categories_rank[cat] = i+1 learning_alts = pt.keys()[0:50] # ELECTRE TRI model model = etri.electre_tri(pt, profiles, weights, lbda) pessimist = model.pessimist() optimist = model.optimist() #debug.print_performance_table_with_assignements(pt, alternatives, criteria, pessimist) infile = glpk.create_input_file(learning_alts, criteria, pt, categories, categories_rank, pessimist) (status, output) = glpk.solve_normalized(infile.name) infile.close() if status: sys.exit("gklp returned status %d" % status) (iweights, iprofiles, ilbda, icompat) = glpk.parse_output(output, learning_alts, criteria) if iweights == None: sys.exit("Invalid weights");