Exemplo n.º 1
0
if __name__=="__main__": 
    #rel_closeness, rankings = alt_crisp_TOPSIS("alternative_data.txt", 'C', 'C', 0)
    #print rel_closeness
    #print rankings

    weights = [['001', 3378.],['002', 2881.],['003', 1751.],['004', 1472.],['005', 1436.]]
    weight_ranges = [ ['001', [0.09, 1.00]], \
                      ['002', [0.08, 0.90]], \
                      ['003', [0.04, 0.60]], \
                      ['004', [0.04, 0.56]], \
                      ['005', [0.04, 0.55]] ]

    #TESTING:
    import alt_data_reader as data_reader
    print "Testing... GO"
    qual_data, quant_data = data_reader.read_data('alt_evals_data.csv', 10)    
    data1 = data_reader.reduce_data(qual_data, 'AVG')
    data2 = data_reader.reduce_data(qual_data, 'AVG_RANGE')

    results = alt_crisp_TOPSIS(data1, weights, 1)


    #TEST MONTECARLO
    from itertools import cycle
    
    n = 3000
    quants = 50
    alts = 10
    rank_counter, full_ranks, quantiles, q_counter, norm_fits, full_RCs = prob_TOPSIS_uniform(data2, weight_ranges, n, quants, alts, 'U')
    
    print "RANK COUNTER"
                  ['003', [0.04, 0.26, 0.60]], \
                  ['004', [0.04, 0.22, 0.56]], \
                  ['005', [0.04, 0.22, 0.55]] ]
weights_trap =   [ ['001', [0.09, 0.26, 0.77, 1.00]], \
                   ['002', [0.08, 0.21, 0.68, 0.90]], \
                   ['003', [0.04, 0.13, 0.43, 0.60]], \
                   ['004', [0.04, 0.10, 0.38, 0.56]], \
                   ['005', [0.04, 0.10, 0.37, 0.55]] ]

input_ranges = [[1, 9], [100., 550.], [0.0, 1.0], [0., 25.],
                [0.0, 1.0]]  #display input ranges

########### Get Data ###########
#qual_data, quant_data = data_reader.read_data('alt_evals_data.csv', 10)
#qual_data_old, quant_data_old = data_reader.read_data('alt_evals_data_real.csv', 10)
qual_data2, quant_data2 = data_reader.read_data(
    'alt_evals_data_thesis_final_31aug15.csv', 10)
data_avg = data_reader.reduce_data(quant_data2, 'AVG')
data_avgRange = data_reader.reduce_data(quant_data2, 'AVG_RANGE')
data_tri = data_reader.reduce_data(quant_data2, 'FUZZ_TRI_1')
data_trap = data_reader.reduce_data(quant_data2, 'FUZZ_TRAP_UNI')

########### Run Monte Carlo TOPSIS ###########
alts = 10  #number of alternatives
n = 10000  #number of iterations
quants = 20  #quantiles
top_N = 4  #number of top alternatives to plot (CDF, PDF, etc.)
create_plots = True  #flag to create plots

rank_counter, full_ranks, quantiles, q_counter, norm_fits, full_RCs = \
    TOPSIS.prob_TOPSIS_uniform(data_tri, weights_tri, n, quants, alts, 'T')
Exemplo n.º 3
0
 
if __name__ == "__main__":
    weight_labels = ['Empty Wt.', 'Max AS', 'Hover Eff.', 'L/D', 'Prop. Eff.']
    weights =       [ ['001', 0.51],['002', 0.44],['003', 0.26], \
                      ['004', 0.22],['005', 0.22]]
    weights_ranges = [ ['001', [0.09, 1.00]], \
                       ['002', [0.08, 0.90]], \
                       ['003', [0.04, 0.60]], \
                       ['004', [0.04, 0.56]], \
                       ['005', [0.04, 0.55]] ]
    weights_tri =   [ ['001', [0.09, 0.51, 1.00]], \
                      ['002', [0.08, 0.44, 0.90]], \
                      ['003', [0.04, 0.26, 0.60]], \
                      ['004', [0.04, 0.22, 0.56]], \
                      ['005', [0.04, 0.22, 0.55]] ]
    weights_trap =   [ ['001', [0.09, 0.26, 0.77, 1.00]], \
                       ['002', [0.08, 0.21, 0.68, 0.90]], \
                       ['003', [0.04, 0.13, 0.43, 0.60]], \
                       ['004', [0.04, 0.10, 0.38, 0.56]], \
                       ['005', [0.04, 0.10, 0.37, 0.55]] ]
    #TESTING:
    import alt_data_reader as data_reader
    print "Testing... GO"
    qual_data, quant_data = data_reader.read_data('alt_evals_data.csv', 10)    
    data1 = data_reader.reduce_data(qual_data, 'AHP_RANGE')
    data_tri = data_reader.reduce_data(qual_data, 'AHP_FUZZ_TRI_1')
    data_trap = data_reader.reduce_data(qual_data, 'AHP_FUZZ_TRAP_1')
    
    results = alt_fuzzy_AHP3(data_tri, weights_tri, 'FT', 0)
    
    for r in results: print r
                  ['002', [0.08, 0.44, 0.90]], \
                  ['003', [0.04, 0.26, 0.60]], \
                  ['004', [0.04, 0.22, 0.56]], \
                  ['005', [0.04, 0.22, 0.55]] ]
weights_trap =   [ ['001', [0.09, 0.26, 0.77, 1.00]], \
                   ['002', [0.08, 0.21, 0.68, 0.90]], \
                   ['003', [0.04, 0.13, 0.43, 0.60]], \
                   ['004', [0.04, 0.10, 0.38, 0.56]], \
                   ['005', [0.04, 0.10, 0.37, 0.55]] ]
                   
input_ranges = [[1,9],[100.,550.],[0.0,1.0],[0.,25.],[0.0, 1.0]] #display input ranges

########### Get Data ###########
#qual_data, quant_data = data_reader.read_data('alt_evals_data.csv', 10)    
#qual_data_old, quant_data_old = data_reader.read_data('alt_evals_data_real.csv', 10)  
qual_data2, quant_data2 = data_reader.read_data('alt_evals_data_thesis_final_31aug15.csv', 10)  
data_avg = data_reader.reduce_data(quant_data2, 'AVG')
data_avgRange = data_reader.reduce_data(quant_data2, 'AVG_RANGE')
data_tri = data_reader.reduce_data(quant_data2, 'FUZZ_TRI_1')
data_trap = data_reader.reduce_data(quant_data2, 'FUZZ_TRAP_UNI')

########### Run Monte Carlo TOPSIS ###########
alts = 10                   #number of alternatives
n = 10000                   #number of iterations
quants = 20                 #quantiles
top_N = 4                   #number of top alternatives to plot (CDF, PDF, etc.)
create_plots = True        #flag to create plots

rank_counter, full_ranks, quantiles, q_counter, norm_fits, full_RCs = \
    TOPSIS.prob_TOPSIS_uniform(data_tri, weights_tri, n, quants, alts, 'T')
Exemplo n.º 5
0
    import matplotlib.pyplot as plt
    from itertools import cycle

    weights = [['001', 3378.], ['002', 2881.], ['003', 1751.], ['004', 1472.],
               ['005', 1436.]]
    weight_ranges = [ ['001', [0.09, 1.00]], \
                      ['002', [0.08, 0.90]], \
                      ['003', [0.04, 0.60]], \
                      ['004', [0.04, 0.56]], \
                      ['005', [0.04, 0.55]] ]

    #TESTING:
    import alt_data_reader as data_reader
    print "Testing... GO"
    qual_data, quant_data = data_reader.read_data(
        'alt_evals_data_thesis_final.csv', 10)
    data1 = data_reader.reduce_data(quant_data, 'AHP_CRISP')
    data2 = data_reader.reduce_data(quant_data, 'AHP_RANGE')
    data22 = data_reader.reduce_data(quant_data, 'AHP_RANGE_LARGE')
    data3 = data_reader.reduce_data(quant_data, 'AHP_FUZZ_TRI_1')

    results = alt_crisp_AHP1(data1, weights)
    results = alt_crisp_AHP2(data1, weights)
    for r in results:
        print r

    n = 2000
    alts = 10
    priority_quantiles = 70  #high number due to limited range of priority vector

    rank_counter, full_ranks, quantiles, q_counter, norm_fits, full_results = \
Exemplo n.º 6
0
if __name__ == "__main__":
    
    import matplotlib.pyplot as plt
    from itertools import cycle
    
    weights = [['001', 3378.],['002', 2881.],['003', 1751.],['004', 1472.],['005', 1436.]];
    weight_ranges = [ ['001', [0.09, 1.00]], \
                      ['002', [0.08, 0.90]], \
                      ['003', [0.04, 0.60]], \
                      ['004', [0.04, 0.56]], \
                      ['005', [0.04, 0.55]] ]
                      
    #TESTING:
    import alt_data_reader as data_reader
    print "Testing... GO"
    qual_data, quant_data = data_reader.read_data('alt_evals_data_thesis_final.csv', 10)    
    data1 = data_reader.reduce_data(quant_data, 'AHP_CRISP')
    data2 = data_reader.reduce_data(quant_data, 'AHP_RANGE')
    data22= data_reader.reduce_data(quant_data, 'AHP_RANGE_LARGE')
    data3 = data_reader.reduce_data(quant_data, 'AHP_FUZZ_TRI_1')
    
    results = alt_crisp_AHP1(data1, weights)
    results = alt_crisp_AHP2(data1, weights)
    for r in results: print r  
    
    
    n = 2000
    alts = 10
    priority_quantiles = 70 #high number due to limited range of priority vector
    
    rank_counter, full_ranks, quantiles, q_counter, norm_fits, full_results = \