示例#1
0
def get_quality_energy_values_directly(src_file, symbol, lOfPoints, points_to_graph, limit=False, lower_bound=-100, upper_bound=100):
 
     
    reminder(True,"the following needs to be uncommented if we want to show s6 results")
    #only change here
#    for el in lOfPoints:
#        el.set_input_number(0)

    lOfInput_number =  map(lambda x: x.get_input_number(), lOfPoints) #this is used for
                                                                      #only s4 and can be 
                                                                      #commented OW
    
    lOfQualityVals = map(lambda x: x.get_quality(), lOfPoints)
    lOfEnergyVals = map(lambda x: x.get_energy(), lOfPoints)
    lOfSetUps = map(lambda x: x.get_raw_setUp(), lOfPoints)
    if (limit):
        result = filter(lambda x: x[0] > lower_bound and x[0] <upper_bound, zip(lOfQualityVals, lOfEnergyVals))
        lOfQualityVals = map(lambda x: x[0], result)
        lOfEnergyVals = map(lambda x: x[1], result)
    
    
    
    reminder(True, "The normalization shouldn't been done for other quality metrics that already consider the accurate desing in their quality calculations")
    reminder(True, "normalization needs to be done automatically");
    reminder(True, "normalization of energy also needs to be automated") 
    print "before normialization" + str(lOfQualityVals) 
    #lOfAccurate_PSNR = [41.14, 39.67, 43.35, 40.34, 39.67, 41.14, 43.35]
    lOfAccurate_PSNR = [1,1, 1,1,1,1,1] #no need for other applications besides 
    lOfAccurate_Energy = [1,1,1,1,1,1,1,1,1,1,1] #dont matter anymore, cuase
    # I already normalize at in specializedEval
    # the ones with image outputs
    
    lOfQualityVals_normalized = normalized_quality(lOfQualityVals, lOfInput_number,lOfAccurate_PSNR) 
    #accurate_design_energy  = 516918 #for jpeg
    #lOfAccurate_Energy = [2.20284e+08,2.39439e+08,2.562e+08, 2.63383e+8,2.562e+08,2.53805e+08]  #for disparity
    #accurate_design_energy  = [516882432,516882432,516882432,516882432,516882432,516882432,516882432,516882432] #for jpeg
    
    lOfEnergyVals_normalized = normalized_energy(lOfEnergyVals, lOfInput_number, lOfAccurate_Energy)
    
    #lOfEnergyVals_normalized = map(lambda x: float(x)/float(accurate_design_energy), lOfEnergyVals)
    lOfQualityVals = lOfQualityVals_normalized 
    lOfEnergyVals= lOfEnergyVals_normalized
    
    #here
    points_to_graph.append([lOfQualityVals, lOfEnergyVals, lOfInput_number, lOfSetUps, src_file])
    pts = points_to_graph[0] #am not using scenario where points_to_graph is more than oneelement deep 
    
    
    """ 
示例#2
0
def generateGraph_for_all(valueList, xName, yName, benchmark_name, graph_title="pareto comparison for", name = "various_inputs", graph_dim = "2d", graph_type ="Q_vs_E", n_graphs="one"):
    assert(1==0, "this graph generation tool can not be used b/c it uses meani")

    name_counter = 0 
    fig = plt.figure(figsize=plt.figaspect(0.5)) 
    #--- sanity check 
    if (graph_dim == "3d"): 
        if (graph_type == "Q_E_product"): 
            print "ERROR: graph_teyp and graph_dim are incompatible"
            sys.exit()
        ax = fig.gca(projection='3d')
        #ax = fig.add_subplot(111, projection='3d') 
        ax.set_xlabel('Quality')
        ax.set_ylabel('mean')
        ax.set_zlabel('Energy')
    else: 
        fig, ax = plt.subplots()
        if (graph_type == "Q_E_product"):
            plt.ylabel("Q_E_product")
            plt.xlabel("mean")
        else: 
            #plt.xscale('log')
            plt.xlabel("mean")
            plt.ylabel("Quality")
#            plt.ylabel(yName)
#            plt.xlabel(xName)
    
    #here
    #----comment if not proving th s4 pont
    symbolsToChooseFrom = ['*', 'x', "o", "+","^", '1', '2', "3"] 
    color =['g', 'y', 'r', 'm']
    
    lOf_run_input_list = input_list.lOf_run_input_list
    number_of_inputs_used = len(lOf_run_input_list)
    input_results = map(list, [[]]*number_of_inputs_used) 
    base_dir = "/home/local/bulkhead/behzad/usr/local/apx_tool_chain/inputPics/"
    counter = 0
    energy_list_to_be_drawn = []
    quality_list_to_be_drawn = []
    std_list_to_be_drawn = []
    image_list_to_be_drawn = [] 
    z_vals = [] 
    

    for val in valueList:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            """ 
            if (counter > 50 ):
                break
            """ 
            print counter 
            if len(res) > 0:
                image_addr =  base_dir+lOf_run_input_list[index][0] + ".ppm"
                mR, mG, mB, stdR, stdG, stdB = cluster_images.calc_image_mean_std(image_addr)
                if (int(np.mean([mR,mG,mB]))) in z_vals:
                    continue
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                #--here 
                #---un comment the next line whenever you want to provide the resuls t professor, 
                

                #--- sort based on the quality
                Q = quality_values_shifted
                E = el[1]
#                Q_index_sorted = sorted(enumerate(Q), key=lambda x: x[1])
#                index_of_Q_sorted = map(lambda y: y[0], Q_index_sorted)
                
                E_index_sorted = sorted(enumerate(E), key=lambda x: x[1])
                index_of_E_sorted = map(lambda y: y[0], E_index_sorted)
                
#                
#                Q_sorted = [Q[i] for i in index_of_Q_sorted]
#                E_sorted = [E[i] for i in index_of_Q_sorted]

                Q_sorted = [Q[i] for i in index_of_E_sorted]
                E_sorted = [E[i] for i in index_of_E_sorted]

                quality_list_to_be_drawn.append(Q_sorted)
                energy_list_to_be_drawn.append(E_sorted)
                
#                std_list_to_be_drawn.append([int(np.mean([stdR,stdG,stdB]))]*len(E_sorted))
#                z_vals.append( int(np.mean([stdR,stdG,stdB])))

                std_list_to_be_drawn.append([int(np.mean([mR,mG,mB]))]*len(E_sorted))
                image_list_to_be_drawn.append([lOf_run_input_list[index][0]]*len(E_sorted))

                z_vals.append( int(np.mean([mR,mG,mB])))

                counter +=1
        
        reminder(True,"the following lines which creates a new image every len(symbolsToChooseFrom) should be commented if we use any flag but various_inputs")
        
        
        #--sorting the data. This is necessary for wire frame 
        zvals_index_sorted = sorted(enumerate(z_vals), key=lambda x: x[1])
        index_of_zvals_sorted = map(lambda y: y[0], zvals_index_sorted)
        quality_list_sorted_based_on_z = [quality_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        std_list_sorted_based_on_z = [std_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        energy_list_sorted_based_on_z = [energy_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        image_list_sorted_based_on_z = [image_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        #--- generate a spectrum of colors  
        #colors = ['b', 'g'] 
        n_energy_levels = 4
        #colors = gen_color_spec.gen_color(len(quality_list_sorted_based_on_z[0]), 'seismic') 
        colors = gen_color_spec.gen_color(n_energy_levels, 'seismic') 
        
                     
        print "here is the list of (images in the z_order, quality in z order, energy in z order)" 
        print zip([i[0] for i in image_list_sorted_based_on_z], [i[0] for i in quality_list_sorted_based_on_z], [i[0] for i in  std_list_sorted_based_on_z])
        print zip([i[0] for i in image_list_sorted_based_on_z], [i[1] for i in quality_list_sorted_based_on_z], [i[0] for i in  std_list_sorted_based_on_z])

        for x in range(len(energy_list_sorted_based_on_z[0][:n_energy_levels])):
        #for x in range(len(quality_list_sorted_based_on_z)):
            #my_label =  'mean:' + str(int(std_list_sorted_based_on_z[x][0]))
            my_label =  'En:' + str(float(energy_list_sorted_based_on_z[0][x]))
            if (graph_dim == "3d"): 
                """ the following is for plotting a wire_frame or surface plot
        surf = ax.plot_surface(np.asarray(energy_list_sorted_based_on_z), np.asarray(quality_list_sorted_based_on_z), np.asarray(std_list_sorted_based_on_z), rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
        fig.colorbar(surf, shrink=0.5, aspect=5)
        #ax.plot_wireframe(np.asarray(quality_list_sorted_based_on_z), np.asarray(std_list_sorted_based_on_z), np.asarray(energy_list_sorted_based_on_z))
        """ 
                ax.scatter(quality_list_sorted_based_on_z[x], std_list_sorted_based_on_z[x] , energy_list_sorted_based_on_z[x], c=colors[x], marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], depthshade=False)
            else:
                #my_label +=  lOf_run_input_list[index][0] 
                #--- note: get rid of linestyle='None' if you want a line through the graph 
                #line_style = 'None'
                line_style = '-'
                if (graph_type == "Q_E_product") :
                    Q_E_list = [a*b for a,b in zip(quality_list_sorted_based_on_z[x],energy_list_sorted_based_on_z[x])]
                    ax.plot(std_list_sorted_based_on_z[x], Q_E_list, marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], c= colors[x], label=my_label, linestyle=line_style)
                else:
                    #ax.plot(quality_list_sorted_based_on_z[x], energy_list_sorted_based_on_z[x], marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], c= colors[x], label=my_label, linestyle=line_style)
                    quality_as_w_diff_mean = map(lambda y: y[x], quality_list_sorted_based_on_z)


                    #--- if no quality value under 150, it is possibly the acc\
                    #    setUP. The accurate set up make seeing other setUPs difficult
                    """ 
                    found_one = False 
                    for el in quality_as_w_diff_mean:
                        if el < 150:
                            found_one = True
                            break
                    if not(found_one):
                        continue
                    """

                    l_mean = map(lambda y: y[x], std_list_sorted_based_on_z)
                    
                    ax.plot(l_mean, quality_as_w_diff_mean, marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], c= colors[x], label=my_label)
                            #, linestyle=line_style)
                if (n_graphs  == "multiple"):
                    finish_up_making_graph(ax, name, graph_title, benchmark_name, name_counter) 
                    fig = plt.figure(figsize=plt.figaspect(0.5)) 
                    #--- sanity check 
                    if (graph_dim == "3d"): 
                        ax = fig.gca(projection='3d')
                        #ax = fig.add_subplot(111, projection='3d') 
                        ax.set_xlabel('1/Q')
                        ax.set_ylabel('mean')
                        ax.set_zlabel('Energy')
                    else: 
                        fig, ax = plt.subplots()
                        if (graph_type == "Q_E_product"):
                            plt.ylabel("Q_E_product")
                            plt.xlabel("mean")
                        else: 
                            #plt.xscale('log')
                            plt.xlabel("mean")
                            plt.ylabel("Quality")
#                            plt.ylabel(yName)
#                            plt.xlabel(xName)

                    
                    name_counter += 1
        
        if (n_graphs  == "one"):
            finish_up_making_graph(ax, name, graph_title, benchmark_name,  0) 
示例#3
0
def sort_values(valueList):
    lOf_run_input_list = input_list.lOf_run_input_list
    number_of_inputs_used = len(lOf_run_input_list)
    input_results = map(list, [[]]*number_of_inputs_used) 
    base_dir = "/home/local/bulkhead/behzad/usr/local/apx_tool_chain/inputPics/"
    counter = 0
    energy_list_to_be_drawn = []
    setup_list_to_be_drawn = [] 
    quality_list_to_be_drawn = []
    std_list_to_be_drawn = []
    image_list_to_be_drawn = [] 
    z_vals = [] 
    
    mR =0 
    mG =0
    mB =0



    for val in valueList:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            """ 
            if (counter > 50 ):
                break
            """ 
            print counter 
            if len(res) > 0:
                image_addr =  base_dir+lOf_run_input_list[index][0] + ".ppm"
                # the following line is commented to incorperate applications
                # that are not images, b/c the following line is only applicable
                # for images
                #mR, mG, mB, stdR, stdG, stdB = cluster_images.calc_image_mean_std(image_addr)
                mR +=1 
                mG +=1
                mB +=1
                stdB = 0
                stdR = 0
                stdG = 0
                
                if (int(np.mean([mR,mG,mB]))) in z_vals:
                    continue
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                
                print "mean of image : " + (lOf_run_input_list[index][0]) + " is: " +  str(np.mean([mR,mG,mB]))
                #--- sort based on the quality
                Q = quality_values_shifted
                E = el[1]
                SetUps =  el[2]
                E_index_sorted = sorted(enumerate(E), key=lambda x: x[1])
                index_of_E_sorted = map(lambda y: y[0], E_index_sorted)
                Q_sorted = [Q[i] for i in index_of_E_sorted]
                E_sorted = [E[i] for i in index_of_E_sorted]
                SetUp_sorted = [SetUps[i] for i in index_of_E_sorted]
                quality_list_to_be_drawn.append(Q_sorted)
                energy_list_to_be_drawn.append(E_sorted)
                setup_list_to_be_drawn.append(SetUp_sorted)
                std_list_to_be_drawn.append([int(np.mean([mR,mG,mB]))]*len(E_sorted))
                image_list_to_be_drawn.append([lOf_run_input_list[index][0]]*len(E_sorted))
                z_vals.append( int(np.mean([mR,mG,mB])))
                counter +=1
        
        reminder(True,"the following lines which creates a new image every len(symbolsToChooseFrom) should be commented if we use any flag but various_inputs")
        
        #--sorting the data. This is necessary for wire frame 
        zvals_index_sorted = sorted(enumerate(z_vals), key=lambda x: x[1])
        index_of_zvals_sorted = map(lambda y: y[0], zvals_index_sorted)
        quality_list_sorted_based_on_z = [quality_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        std_list_sorted_based_on_z = [std_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        energy_list_sorted_based_on_z = [energy_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        image_list_sorted_based_on_z = [image_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        SetUp_list_sorted_based_on_z = [setup_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        print quality_list_sorted_based_on_z
        print std_list_sorted_based_on_z
        print energy_list_sorted_based_on_z
        
        return quality_list_sorted_based_on_z, std_list_sorted_based_on_z,energy_list_sorted_based_on_z            
    return [],[],[]        
示例#4
0
def generateGraph_for_all_alternative(valueList, valueList_2, xName, yName, benchmark_name, input_number, graph_title="pareto comparison for"):
    fig, ax = plt.subplots()
    #plt.yscale('log')
    plt.xscale('log')
    plt.ylabel(yName)
    plt.xlabel(xName)
    
    symbolsToChooseFrom = ['*', 'x', "o", "+","^"] #symbols to draw the plots with
    symbolsToChooseFrom += ['1', '2', "3"] #symbols to draw the plots with
    
    #color =['r','y', 'g', 'b', 'w']
    color =['b','g', 'r', 'c', 'm', 'y', 'k', 'w']
    number_of_inputs_used = 25 
    
    #lOf_run_input_list = [["flowerpots_1"], ["aloe_1"], ["monopoly_1"], ["baby1_1"], ["plastic_1"], ["rocks1_1"]]
    lOf_run_input_list = [["room_1.bmp", "room_2.bmp"], ["papers_1.bmp", "papers_2.bmp"], ["odd_1.bmp", "odd_2.bmp"], ["baby1_1.bmp", "baby1_2.bmp"], ["plastic_1.bmp", "plastic_2.bmp"], ["rocks1_1.bmp", "rocks1_2.bmp"]]
    #= [[] for i in range(settings_obj.n_clusters)]
    
    input_results = map(list, [[]]*number_of_inputs_used) 
    counter = 0
    for val in valueList:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            if (el[2] == input_number):
                input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            if len(res) > 0:
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                #ax.plot(quality_values_shifted, el[1], symbolsToChooseFrom[counter%len(symbolsToChooseFrom)]+color[counter/len(symbolsToChooseFrom)], label=val[3]) 
                
                
                reminder(True,"this label generation requires lOf_run_input_list which needs to be copied over manually")
                #---un comment(from here) the next line whenever you want to provide the resuls t professor, 
                #-- this requires manually updating lOf_run_input_list (by copying it from test_bench_mark_4.._)
                my_label =  lOf_run_input_list[index][0]
                ax.plot(quality_values_shifted, el[1], 3, symbolsToChooseFrom[input_number]+color[0], label=my_label)
                # to here
                
                #--uncomment if you want to use regular labels 
                #ax.plot(quality_values_shifted, el[1], symbolsToChooseFrom[counter%len(symbolsToChooseFrom)]+color[counter%len(symbolsToChooseFrom)], label=val[3])
                
                counter +=1
    #---comment up to here if not using proviing s4 point
    #--uncomment the following two lines to return back to without s4 inut consideration
#    for el in valueList: 
#        quality_values_shifted = map(lambda x: x+1, el[0]) 
#        ax.plot(quality_values_shifted, el[1], el[2], label=el[3])
#
    
    
    input_results = map(list, [[]]*number_of_inputs_used) 
    counter = 0
    for val in valueList_2:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            if (el[2] == input_number):
                input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            if len(res) > 0:
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                #ax.plot(quality_values_shifted, el[1], symbolsToChooseFrom[counter%len(symbolsToChooseFrom)]+color[counter/len(symbolsToChooseFrom)], label=val[3]) 
                
                #--here 
                #---un comment the next line whenever you want to provide the resuls t professor, 
                #-- this requires manually updating lOf_run_input_list (by copying it from test_bench_mark_4.._)
                my_label =  lOf_run_input_list[index][0]
                ax.plot(quality_values_shifted, el[1], symbolsToChooseFrom[input_number]+color[1], label=my_label)
                #ax.plot(quality_values_shifted, el[1], symbolsToChooseFrom[counter%len(symbolsToChooseFrom)]+color[counter%len(symbolsToChooseFrom)], label=val[3])
                
                counter +=1

    # ---- moving the legend outside of the graph (look bellow for placing inside)
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
    # Put a legend to the right of the current axis (note: prop changes the fontsize)
    ax.legend(loc='center left', bbox_to_anchor=(1, .9), prop={'size':8})
    plt.title(graph_title + str(benchmark_name) + " benchmark")
示例#5
0
def run_task_and_collect_points(settings_obj):
#if __name__ == "__main__":
    start = time.time() 
    
    #---------guide:::  promting ther user regarding the required input
#    print "the following inputs needs to be provided in the " + str(settings.userInputFile)
#    print "1.source folder address"
#    print "2.source file address"
#    print "3.generate Makefile (with YES or NO)"
#    print "4.CBuilderFolder"
#    print "5.AllInputScenariosInOneFile" #whether all the operand scenarios can be found in one file or no
#    print "6. AllInputFileOrDirectoryName" #the user should be providing a file name if AllInputScenariosInOneFile is true and a direcoty other   
#    print "7. finalResulstFileName"
#    
    symbolsToChooseFrom = ['*', 'x', "o", "+", "*", "-", "^", "1", "2", "3", "4"] #symbols to draw the plots with
    #settings_obj = settingsClass()
    inputObj = inputClass(settings_obj)
    inputObj.expandAddress()
    maxX = settings_obj.maxX
    maxY = settings_obj.maxY
    lOfAllPointsTried = []
    lOfPoints_out_of_heuristic = []  
    opIndexSelectedFile =settings_obj.opIndexSelectedFile
    open(opIndexSelectedFile, "w").close()
    CSrcFolderAddress = inputObj.CSrcFolderAddress
    lOfCSrcFileAddress = inputObj.lOfCSrcFileAddress 
    CBuildFolderName = inputObj.CBuildFolderName 
    generateMakeFile = inputObj.generateMakeFile
    rootFolder = inputObj.rootFolder 
    AllInputScenariosInOneFile = inputObj.AllInputScenariosInOneFile
    AllInputFileOrDirectoryName = inputObj.AllInputFileOrDirectoryName 
    finalResultFileName = inputObj.finalResultFileName
    PIK_all_points = inputObj.PIK_all_points
    PIK_pareto  = inputObj.PIK_pareto
    PIK_pareto_of_all = inputObj.PIK_pareto_of_all 
    PIK_UTC_file = inputObj.PIK_UTC_file
    input_for_s4_file = inputObj.input_for_s4_file
    bench_suit_name = inputObj.bench_suit_name; 
    
    #---------guide:::  checking the validity of the input and making necessary files
    #and folders
    rootResultFolderName = rootFolder + "/" + settings_obj.generatedTextFolderName
    rootResultFolderBackupName =  rootFolder + "/" + settings_obj.resultsBackups # is used to get a back up of the results generated in the previuos run of this program
    if not(os.path.isdir(rootResultFolderBackupName)):
        os.system("mkdir" + " " + rootResultFolderBackupName)
    os.system("rm -r " + rootResultFolderName)
    os.system("mkdir " + rootResultFolderName)
    executableName = "tool_exe" #src file to be analyzed
    CBuildFolder = rootFolder + "/" + CBuildFolderName
    #get the input to the executable 
    executableInputList = []
    if (settings_obj.runMode == "parallel"): 
        #the_lock = multiprocessing.Lock() 
        pool = multiprocessing.Pool() 


    
    # print "please provide the inputs to the executable. when done, type type" 
    # input = raw_input('provide the input: ')
    # while (input != "done"):
        # executableInputList.append(input)
        # input = raw_input('provide the next input: ')
    
   
    #checking whether the file (or directory) containging the operands(input) exist or no
    if (AllInputScenariosInOneFile): #if a file
        #print AllInputFileOrDirectoryName
        if not(os.path.isfile(AllInputFileOrDirectoryName)):
            print "All OperandsFile:" + AllInputFileOrDirectoryName + " does not exist"
            exit();
    else: #checking for the directory
        if not(os.path.isdir(AllInputFileOrDirectoryName)):
            print "All OperandsDir does not exist"
            exit();

    #---------guide:::  generate make file or no
    if not((generateMakeFile == "YES") or (generateMakeFile == "NO")): 
        #print generateMakeFile 
        print "generateMakeFile can only take YES or NO value (capital letters)"
        exit()

    #removing the result file
    os.system("rm " + rootResultFolderName + "/" + settings_obj.rawresultFileName)

    
    #---if make file needs to be re generated (generated) 
    if (generateMakeFile == "YES"): 
        currentDir = os.getcwd() #getting the current directory
        #CBuildFolder = "./../../" 
        os.chdir(rootFolder) #chaning the directory
        # os.system("cp CMakeLists_tool_chain.txt CMakeLists.txt") #restoring the correct CMakeLists.txt file
        os.chdir(currentDir) 
        #generate the makefile using CMAKE 
        print "**********************************************************************"
        print "******************************GENERATING MAKE FILE********************"
        print "**********************************************************************"
        currentDir = os.getcwd() #getting the current directory
        if not(os.path.isdir(CBuildFolder)):
            os.system("mkdir " + CBuildFolder); #making a new one
        os.chdir(CBuildFolder) #chaning the directory
        # os.system("export CC=clang++; export CXX=clang++") 
        os.environ["CC"] = "clag++";
        os.environ["CXX"] = "clag++";
        os.system("cmake ..");
        print "**********************************************************************"
        print "done generating the makeFile using CMake"
        print "**********************************************************************"
        
        os.chdir(currentDir) #chaning the directory
        #done generating the make file 

    
    #---------guide:::  removing the results associated with the previous runs
    AllOperandScenariosFullAddress = AllInputFileOrDirectoryName
    inputNumber = 0 
    #os.system("rm -r" + " " +  rootResultFolderName + "/" +settings.AllOperandsFolderName)
    #os.system("rm -r" + " " +  rootResultFolderName + "/" + settings.rawResultFolderName)
    os.system("mkdir" + " " + rootResultFolderName + "/" + settings_obj.rawResultFolderName)
    #---------guide:::  if the operands were all given in a file: separate them to different files
    #...................else: use the folder that they are in, as an input to the C source files
    #if all in one file 
    if (AllInputScenariosInOneFile):
        #check for error 
        if not(os.path.isfile(AllOperandScenariosFullAddress)):
            print AllOperandScenariosFullAddress + " does not exist"
            exit();

        #make a directory for all operand inputs 
        AllOperandsFolderName = rootResultFolderName + "/" + settings_obj.AllOperandsFolderName
        os.system("mkdir " + AllOperandsFolderName)
        #---------guide::: separates operands and put in a folder 
        with open(AllOperandScenariosFullAddress) as f:
            for line in f:
                if len(line.split())>0: 
                    fileToWriteName = AllOperandsFolderName + "/" + str(inputNumber) +".txt"
                    fileToWriteP = open(fileToWriteName ,"w");  
                    fileToWriteP.write(line)
                    fileToWriteP.close()
                    inputNumber +=1
    else: #this case is the case in which they are in a foler already ready
        if not(os.path.isdir(AllOperandScenariosFullAddress)):
            print "***********************ERRROR**************" 
            print "the folder that is told to contain the operands does not exist: " + AllOperandsFolderName
            exit();
        else: 
            AllOperandsFolderName = AllInputFileOrDirectoryName

    
    
    
                                                 #inputs. This means that we have multiple operand sets)
    #---------guide:::   parse the C source file to collect all the operands that can 
    #                        be approximatable
    lAllOpsInSrcFile = [] 
    for CSrcFileAddressItem in lOfCSrcFileAddress:
        lAllOpsInSrcFile += sourceFileParse(CSrcFileAddressItem, settings_obj)
    settings_obj.totalNumberOfOpCombinations = 1;
    energy = []
    error = []
    config = []
    inputFileNameList = []
    
    #---------guide:::  sampling operands
    inputNumber = 0 
    nameOfAllOperandFilesList = getNameOfFilesInAFolder(AllOperandsFolderName)
    #operandIndex = 0
    
    numberOfTriesList = [] 
    numberOfSuccessfulTriesList = []
    errorRequirementList = []
    errorDiffList =[] #contains the difference between the error request and the error recieved from simulated annealing ( in percentage)
     
    allPossibleScenariosForEachOperator, limitedListIndecies, ignoreListIndecies, accurateSetUp = generateAllPossibleScenariosForEachOperator(rootResultFolderName, lAllOpsInSrcFile, settings_obj)
    #---------guide:::  generate all possible apx setUps Possible (mainly used for full permutation design exploration, otherwise called exhustive search)
    IOAndProcessCharFileName = rootResultFolderName + "/" + settings_obj.IOAndProcessCharFileName
    IOAndProcessCharP = open(IOAndProcessCharFileName, "w")
    
    open(settings_obj.annealerProgressionOutputFileName, "w").close()
    open(rootResultFolderName +  "/" + settings_obj.annealerOutputFileName, "w").close()
    
    #---------guide::: go through operand files and sweep the apx space
    # lOfOperandSet = [] 
    timeBeforeFindingResults = datetime.datetime.now()
    lOfAccurateValues = []
    for inputNumber,operandSampleFileName in enumerate(nameOfAllOperandFilesList):
        countSoFar = 0 
        #clearly state where the new results associated with the new input starts 
        CSourceOutputForVariousSetUpFileName =  rootResultFolderName + "/" + settings_obj.rawResultFolderName + "/" + settings_obj.csourceOutputFileName + str(inputNumber) + ".txt" #where to collect C++ source results
        # newOperand =  operandSet(get_operand_values(operandSampleFileName))
        
        accurateValues = []
        error.append([])
        energy.append( [])
        config.append( [])
        inputFileNameList.append([])
        mode = settings_obj.mode 
        operatorSampleFileFullAddress = rootResultFolderName + "/"+ settings_obj.operatorSampleFileName + str(0) + ".txt"
        
        
        #---------guide:::  getting accurate values associated with the CSource output
        #accurateSetUp,workingList = generateAccurateScenario(allPossibleScenariosForEachOperator,ignoreListIndecies )
        workingList = generateWorkingList(ignoreListIndecies, allPossibleScenariosForEachOperator )
        
        apxIndexSetUp = 0 #zero is associated with the accurate results (this is a contract that needs to be obeyed)
        # status, setUp = generateAPossibleApxScenarios(rootResultFolderName + "/" + settings.AllPossibleApxOpScenarios, allPossibleApxScenarioursList , apxIndexSetUp, mode) 
        #---------guide:::  erasing the previuos content of the file
        CSourceOutputForVariousSetUpP = open(CSourceOutputForVariousSetUpFileName, "w").close()
        #---------guide:::  modify the operator sample file
        modifyOperatorSampleFile(operatorSampleFileFullAddress, accurateSetUp)
        
        
        sys.stdout.flush()
        #---------guide:::  run the CSrouce file with the new setUp(operators)
        if not(settings_obj.errorTest): 
            print("\n........running to get accurate values\n"); 
            reminder(settings_obj.reminder_flag,"make sure to change make_run to make_run_compile if you change the content of any of the cSRC files")
            make_run(executableName, executableInputList, rootResultFolderName, CSourceOutputForVariousSetUpFileName, CBuildFolder, operandSampleFileName, bench_suit_name, 0, settings_obj) #first make_run
            accurateValues = extractCurrentValuesForOneInput(CSourceOutputForVariousSetUpFileName, inputObj, settings_obj)
        else:
            newPath = "/home/local/bulkhead/behzad/usr/local/apx_tool_chain/src/python_files/scratch/acc.txt"
            accurateValues = extractCurrentValuesForOneInput(newPath, inputObj, settings_obj)
        
        assert(accurateValues != None)
        lOfAccurateValues.append(accurateValues)
        # print lOfAccurateValues
        # lOfOperandSet.append(newOperand)
        #---------guide:::  make a apx set up and get values associated with it
        
    lOfPoints = []  
    allPointsTried = []
    unique_point_list = []
    output_list = []
    previous_ideal_setUp_list = []
    #previous_ideal_setUp_output_list = []
    previous_ideal_setUp_list_reduced = []
    # ---- read previous ideal setUps and populate the ideal_pts(only contain the # of apx bits)
    print "right here" 
    sys.stdout.flush()
    

    if(settings_obj.get_UTC_optimal_configs or settings_obj.adjust_NGEN): 
        ideal_pts = []
        with open(PIK_UTC_file , "rb") as f:
            while True: 
                try: 
                    point = pickle.load(f)
                    ideal_pts.append(point) 
                    # listOfPeople.append(copy.copy(person))# 
                except Exception as ex:
                    if not (type(ex).__name__ == "EOFError"):
                        print type(ex).__name__ 
                        print ex.args
                        print "something went wrong"
                    break
        
         
        for pt in ideal_pts:
            previous_ideal_setUp_list.append(pt.get_raw_setUp())
            #previous_ideal_setUp_output_list.append(pt.get_raw_values())
    
        #from here
        lOf_UTC_PF = pareto_frontier(ideal_pts, maxX, maxY, settings_obj) 
        previous_ideal_setUp_list = []  
        for el in lOf_UTC_PF: 
            previous_ideal_setUp_list.append(el.get_raw_setUp())
        with open("lOf_UTC_PF", "wb") as f:
            for el in lOf_UTC_PF:     
                pickle.dump(copy.deepcopy(el), f)

        #to here
        
        # ---- santiy check
        assert not(settings_obj.get_UTC_optimal_configs and len(previous_ideal_setUp_list)== 0)
    
        # ---- more sanity check
        for el in previous_ideal_setUp_list: 
            print "accurate and el" 
            print accurateSetUp
            print el
            assert (len(accurateSetUp) == len(el))
   
        # ---- reduce the ideal_setUp_list to reduce computation time
        #previous_ideal_setUp_list_reduced = reduce_ideal_setUp_list(previous_ideal_setUp_list, previous_ideal_setUp_output_list)
        previous_ideal_setUp_list_reduced = reduce_ideal_setUp_list(previous_ideal_setUp_list)
    
    
    if (settings_obj.adjust_NGEN):
        NGEN_to_use = settings_obj.NGEN*len(previous_ideal_setUp_list_reduced)
    else:
        NGEN_to_use = settings_obj.NGEN
    
    if not(settings_obj.get_UTC_optimal_configs):
        previous_ideal_setUp_list_reduced = [(map(lambda x:x[2], accurateSetUp))]
        previous_ideal_setUp_output_list = []



    print "NGEN_to_use" + str(NGEN_to_use) 
    if (mode == "allPermutations"): 
        lengthSoFar = 1 
        
        """ ---- guide: making sure that it is possible to use permuation
                 if the number of permutations are too big to be held in memoery
                 we error out """
        for opOptions in allPossibleScenariosForEachOperator:
            print opOptions
            lengthSoFar *= len(opOptions)
            assert(lengthSoFar < settings_obj.veryHugeNumber), """numbr of permuations:""" + str(lengthSoFar)+""" is too big. 
            it is bigger than:""" + str(settings_obj.veryHugeNumber)

        allPossibleApxScenarioursList = generateAllPossibleApxScenariousList(allPossibleScenariosForEachOperator)
        for previous_ideal_setUp in previous_ideal_setUp_list_reduced:
            for index,config in enumerate(allPossibleApxScenarioursList):
                individual = map(lambda x: x[2], config) 
                specializedEval(False, 1, accurateSetUp, ignoreListIndecies, accurateSetUp, inputObj,nameOfAllOperandFilesList, rootResultFolderName, executableName,
                executableInputList, CBuildFolder, operandSampleFileName,lOfAccurateValues, allPointsTried, True, unique_point_list, output_list, previous_ideal_setUp, 0, settings_obj, individual)
            lOfPoints_out_of_heuristic = allPointsTried
    elif (mode == "genetic_algorithm" or mode == "swarm_particle"):
        input_Point_list = [] 
        if (settings_obj.runMode == "parallel"): 
            the_lock = multiprocessing.Lock() 
        
        allConfs = [] #first generation
        numberOfIndividualsToStartWith = settings_obj.numberOfIndividualsToStartWith
        tempAcc = accurateSetUp
        opIndexSelectedFile  = settings_obj.opIndexSelectedFile
        limitedList = [] 
        limitedListValues = getLimitedList(opIndexSelectedFile)
        allConfs = generateInitialPopulation(tempAcc, numberOfIndividualsToStartWith, inputObj,ignoreListIndecies, limitedListValues, limitedListIndecies, settings_obj)
        possibly_worse_case_setup = generate_possibly_worse_case_setup(tempAcc, settings_obj)
        population = []

        #---geting the possibly_worse_case_result info 
        possibly_worse_case_setup_individual = map (lambda x: x[2],  possibly_worse_case_setup[0])
        print("\n.......running to get possibly_worse_case_result\n"); 
        possibly_worse_case_result = specializedEval(False, 1, accurateSetUp, [], accurateSetUp, inputObj,nameOfAllOperandFilesList, rootResultFolderName, executableName,
                executableInputList, CBuildFolder, operandSampleFileName,lOfAccurateValues, allPointsTried,True, unique_point_list, output_list,[], 0, settings_obj,
                possibly_worse_case_setup_individual)
        possibly_worse_case_result_energy = possibly_worse_case_result[0]   
        possibly_worse_case_result_quality = possibly_worse_case_result[1]   
        
        if (settings_obj.benchmark_name == "sift"): 
             
            #print "here is the possibly_worse_case quality " + str(possibly_worse_case_result_quality)
            possibly_worse_case_result_energy = 1
            possibly_worse_case_result_quality = 1
        #----printing the possibly_worse_case_result info and exiting
        if (settings_obj.DEBUG): 
            print "worse_case energy: " + str(possibly_worse_case_result[0])
            print "worse_case quality: " + str(possibly_worse_case_result[1])
 
        
        
        print "total Number of itrations: " + str(len(previous_ideal_setUp_list_reduced))
        for iteration,previous_ideal_setUp in enumerate(previous_ideal_setUp_list_reduced):
            print  "iteration number: " + str(iteration)
            print "\n...... running the  accurate version of the succeeding stage with " + str(iteration) + "th"  + "iteration"
            UTC_acc = specializedEval(False, 1, accurateSetUp, [], accurateSetUp, inputObj,nameOfAllOperandFilesList, rootResultFolderName, executableName,
                    executableInputList, CBuildFolder, operandSampleFileName,lOfAccurateValues, allPointsTried,False, unique_point_list, output_list,[], 0, settings_obj,
                    previous_ideal_setUp)
            previous_ideal_setUp_energy = UTC_acc[0]   
            previous_ideal_setUp_quality = UTC_acc[1]   
            
            input_Point = points()
            input_Point.set_energy(UTC_acc[0])
            input_Point.set_quality(UTC_acc[1])
            input_Point.set_setUp(previous_ideal_setUp)
            #input_Point.set_raw_setUp(new_individual_raw_setUp)
            input_Point.set_input_number(iteration) 
            input_Point.set_setUp_number(0)
            input_Point_list.append(input_Point) 
             


            print "energy associated with acc version of idealSet of iteration number" + str(iteration) + ": " + str(previous_ideal_setUp_energy)
            print "quality associated with acc version of idealSet of iteration number" + str(iteration) + ": " + str(previous_ideal_setUp_quality)
             
            if (mode == "genetic_algorithm"): 
                population = run_spea2(population,
                            CSourceOutputForVariousSetUpFileName, operatorSampleFileFullAddress,
                            executableName, executableInputList, rootResultFolderName, CBuildFolder,

                            operandSampleFileName, lOfAccurateValues, nameOfAllOperandFilesList, inputObj, ignoreListIndecies, possibly_worse_case_result_quality, accurateSetUp, allConfs, NGEN_to_use,
                            settings_obj.MU, settings_obj.LAMBDA, unique_point_list, output_list,allPointsTried,  previous_ideal_setUp, iteration, settings_obj)
            
            elif (mode == "swarm_particle"):
                population = run_SP(population, NGEN_to_use,
                            CSourceOutputForVariousSetUpFileName, operatorSampleFileFullAddress,
                            executableName, executableInputList, rootResultFolderName, CBuildFolder,
                            operandSampleFileName, lOfAccurateValues, nameOfAllOperandFilesList, inputObj, ignoreListIndecies, possibly_worse_case_result_quality, accurateSetUp, allConfs,
                            unique_point_list, output_list, allPointsTried, previous_ideal_setUp, settings_obj)
            else:
                print "this mode" + str(mode) +" not defined"
                exit()

            #---some sanity check 
            #if (settings.get_UTC_optimal_configs): 
            assert (len(unique_point_list) > 0)
            assert(len(output_list) > 0)
            assert(len(allPointsTried) > 0)
            
            #--store all the points acquired by the heuristic in the list
            for individual in population:
                newPoint = points()
                if(eval(inputObj.dealingWithPics)): 
                    newPoint.set_PSNR(individual.fitness.values[1])
                else:
                    newPoint.set_quality((individual.fitness.values[1])) 
                newPoint.set_energy(individual.fitness.values[0])
                newPoint.set_setUp(modifyMold(accurateSetUp, individual))
                individual_converted_to_list = map(lambda x: x, individual)
                newPoint.set_raw_setUp(individual_converted_to_list)
                newPoint.set_setUp_number(0)
                newPoint.set_input_number(iteration) 
                #print "here is newPoint" + str(newPoint.get_input_number()) 
                lOfPoints_out_of_heuristic.append(newPoint)
        
        with open(input_for_s4_file, "w") as f:
            for el in input_Point_list:
                pickle.dump(el, f)   
        # lOfOperandSet[operandIndex].set_lOfPoints(copy.deepcopy(lOfPoints))
        #operandIndex += 1
    
    # --- update output_list, unique_point_list
    #if (settings.get_UTC_optimal_configs): 
#    for el in lOfPoints_out_of_heuristic: 
#        print "999" 
#        el.get_raw_values() 
#        update_unique(el, output_list, unique_point_list)
#
    
    #-- dumping the the points associated w/ new input to a file
    if (settings_obj.write_UTC_optimal_configs): 
        with open(PIK_UTC_file, "wb") as f:
            reminder(settings_obj.reminder_flag,"if UTC is used to extract rawValues and AccurateValues associated with points an error would happen b/c we empty the two list out. We did this to avoid the massive size of UTC otherwise")
            for el in unique_point_list:
                el.lOfAccurateValues = [] #I empty out the list b/c the size of the file would be massive
                                          #other wise
                el.lOfRawValues = [] 
                pickle.dump(copy.deepcopy(el), f)

    for individual in allPointsTried:
        newPoint = points()
        # newPoint.set_SNR(individual.fitness.values[1])
        if(eval(inputObj.dealingWithPics)): 
            newPoint.set_PSNR(individual.get_quality())
        else:
            newPoint.set_quality(individual.get_quality()) #normalizing the quality to the possibly_worse_case
        newPoint.set_energy(individual.get_energy())
        newPoint.set_setUp(list(individual.get_setUp()))
        newPoint.set_raw_setUp(individual.get_raw_setUp())
        newPoint.set_setUp_number(0)
        newPoint.set_input_number(individual.get_input_number()) 
        lOfAllPointsTried.append(newPoint)
    
    #---uncomment to compare prob_heur_points to genetic algo
    """
    #----note: lOfAllPointsTried need to be populated
    prob_heur_points = probabilistic_heuristic(pareto_frontier(lOfPoints_out_of_heuristic, maxX, maxY), CSourceOutputForVariousSetUpFileName, operatorSampleFileFullAddress,
                        executableName, executableInputList, rootResultFolderName, CBuildFolder,
                        operandSampleFileName, lOfAccurateValues, nameOfAllOperandFilesList, inputObj, ignoreListIndecies, possibly_worse_case_result_quality, accurateSetUp, allConfs,
                        lOfAllPointsTried)

    lOfAllPointsTried_cleaned_of_doubles = clean_doubles(lOfAllPointsTried)
    all_pareto_fronts_list = all_pareto_frontiers(lOfAllPointsTried_cleaned_of_doubles, maxX, maxY)
    
    #---- preparing the initial population for the next genetic run 
    new_allConfs =[]
    for el in lOfPoints_out_of_heuristic:
        new_allConfs.append(el.get_setUp())

    new_NGEN = int((len(lOfPoints_out_of_heuristic)*settings.number_of_probabilistic_trial)/settings.MU) + 1
    print "new_NGEN is " +str(new_NGEN)

    _, population = run_spea2(population, CSourceOutputForVariousSetUpFileName, operatorSampleFileFullAddress, executableName, executableInputList, rootResultFolderName, CBuildFolder,
            operandSampleFileName, lOfAccurateValues, nameOfAllOperandFilesList, inputObj, ignoreListIndecies, possibly_worse_case_result_quality, accurateSetUp, new_allConfs, new_NGEN,
            settings.MU, settings.LAMBDA, unique_point_list, output_list)


    lOfPoints_out_of_heuristic_2nd_round = []
    for individual in population:
        newPoint = points()
        # newPoint.set_SNR(individual.fitness.values[1])
        if(eval(inputObj.dealingWithPics)): 
            newPoint.set_PSNR(individual.fitness.values[1])
        else:
            newPoint.set_quality((individual.fitness.values[1])) 
        newPoint.set_energy(individual.fitness.values[0])
        newPoint.set_setUp(modifyMold(accurateSetUp, individual))
        individual_converted_to_list = map(lambda x: x, individual)
        newPoint.set_raw_setUp(individual_converted_to_list)
        newPoint.set_setUp_number(0)
        #lOfPoints.append(newPoint)
        lOfPoints_out_of_heuristic_2nd_round.append(newPoint)

#    if (tests.test_extracting_all_pareto_frontiers):
#        points_to_graph = [] 
#        for index, lOfPoints in enumerate(all_pareto_fronts_list):
#            get_quality_energy_values_directly(lOfPoints,symbolsToChooseFrom[index],  points_to_graph, symbolsToChooseFrom[index])
#            if (index >=2):
#                break
#        generateGraph_for_all(points_to_graph, "1/quality", "energy", "blah") 
#        
#        pylab.savefig("results.png") #saving the figure generated by generateGraph
#        print all_pareto_fronts_list
#        sys.exit()
    
    """
    """ 
    while(True): 
        number = raw_input('provide the num: ')
        total = 0 
        for el in my_histogram.keys():
            if my_histogram[el] > int(number):
                total +=1;
        print "\n" + str(total) + "number of moves"    
    """
    #---------guide:::  getting the end time
    timeAfterFindingResults = datetime.datetime.now()
    totalTime = findTotalTime(timeBeforeFindingResults, timeAfterFindingResults) 
    print "total Time: " + str(totalTime)
    
    
    #---------guide::: populating the IOAndProcessCharP 
    IOAndProcessCharP.write("the mode is: " + mode + "\n")
    IOAndProcessCharP.write("number of operators in the CSource file: " + str(len(lAllOpsInSrcFile)) + "\n")
    IOAndProcessCharP.write("number of Operands: " + str(len(nameOfAllOperandFilesList)) +"\n")
    IOAndProcessCharP.write("numberOfTriesList: " + str(numberOfTriesList) + "\n")
    IOAndProcessCharP.write("numberOfSuccessfulTriesList: " + str(numberOfSuccessfulTriesList) + "\n")
    
    #---------guide:::  find the pareto points and store them in resultTuple
    # resultTuple = [] #this is a list of pareto Triplets(setup, error, energy) associated with each 
                       #one of the inputs 
    #setting up the resultTupleList with the right length 
    # for i in range(0, len(error),1):
        # resultTuple.append([])
   
    # ---- pickle and write the results
    if not(mode == "only_read_values"):
        with open(PIK_pareto, "wb") as f:
            points_to_dump = pareto_frontier(lOfPoints_out_of_heuristic, maxX, maxY, settings_obj)
            for point in points_to_dump:
                pickle.dump(copy.deepcopy(point), f)
        with open(PIK_all_points, "wb") as f:
            points_to_dump = lOfAllPointsTried
            #points_to_dump = pareto_frontier(prob_heur_points[:], maxX, maxY)
            for point in points_to_dump:
                pickle.dump(copy.deepcopy(point), f)
        with open(PIK_pareto_of_all, "wb") as f:
            #points_to_dump = pareto_frontier(lOfPoints_out_of_heuristic_2nd_round, maxX, maxY) #righthere
            points_to_dump = pareto_frontier(lOfAllPointsTried, maxX, maxY, settings_obj) #righthere
            for point in points_to_dump:
                pickle.dump(copy.deepcopy(point), f)




    # ---- reading the values back
    if (mode == "only_read_values"):
        with open(PIK_pareto, "rb") as f:
            # pickle.load(f)
            while True: 
                try: 
                    point = pickle.load(f)
                    print point 
                    lOfPoints_out_of_heuristic.append(point) 
                    # listOfPeople.append(copy.copy(person))# 
                except Exception as ex:
                    if not (type(ex).__name__ == "EOFError"):
                        print type(ex).__name__ 
                        print ex.args
                        print "something went wrong"
                    break
        with open(PIK_all_points, "rb") as f:
            # pickle.load(f)
            while True: 
                try: 
                    point = pickle.load(f)
                    lOfAllPointsTried.append(point) 
                    # listOfPeople.append(copy.copy(person))# 
                except Exception as ex:
                    if not (type(ex).__name__ == "EOFError"):
                        print type(ex).__name__ 
                        print ex.args
                        print "something went wrong"
                    break
        with open(PIK_pareto_of_all, "rb") as f:
            # pickle.load(f)
            while True: 
                try: 
                    point = pickle.load(f)
                    lOfAllPointsTried.append(point) 
                    # listOfPeople.append(copy.copy(person))# 
                except Exception as ex:
                    if not (type(ex).__name__ == "EOFError"):
                        print type(ex).__name__ 
                        print ex.args
                        print "something went wrong"
                    break
   

   
   #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 
   #------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 
    # ---- find the pareto curve of lOfPoints
    delimeter = [workingList[0], workingList[-1] +1] 
    """ 
    if settings.method == "localParetoPieceParetoResult":
        resultPoints = pareto_frontier(lOfPoints, maxX, maxY)
        delimeter = [workingList[0], workingList[-1] +1] 
        pointSet  = point_set(resultPoints, "pareto", maxX, maxY);
        pointSet.set_delimeter(delimeter)
        with open(settings.lOfParetoSetFileName, "a") as f:
            pickle.dump(copy.deepcopy(pointSet), f)

    elif settings.method == "uniqueNoiseParetoResult":
        lOfUniqueNoisePoints = extract_unique_noise(lOfPoints, inputObj.dealingWithPics)
        
        resultPoints = lOfUniqueNoisePoints
        opIndexSelectedFile = open(settings.opIndexSelectedFile, "w");
        for myPoints in lOfUniqueNoisePoints:
            #fix req: we shouldn't be writing the whole set up but only part of it
            opIndexSelectedFile.write(str(myPoints.get_setUp())) 
        
        #fix req: we don't need a paretoSet, instead a point set as a parent,
        #and then later the child is the type of the set such as pareto set
        pointSet= point_set(resultPoints, "unique")
        #fix req: delmiter should be defined properly, change the numbers
        pointSet.set_delimeter(delimeter)
        with open(settings.lOfParetoSetFileName, "w") as f:
            pickle.dump(copy.deepcopy(pointSet), f)
    elif (settings.method == "allPoints"):
    """ 
    #resultPoints = lOfPoints_out_of_heuristic
    pareto_points =  pareto_frontier(lOfPoints_out_of_heuristic, maxX, maxY, settings_obj)
    pointSet= point_set(pareto_points, "pareto", maxX, maxY)
    #fix req: delmiter should be defined properly, change the numbers
    pointSet.set_delimeter(delimeter)
    if not(settings_obj.get_UTC_optimal_configs): 
        with open(settings_obj.lOfParetoSetFileName, "a") as f:
            pickle.dump(copy.deepcopy(pointSet), f)
#    if (runMode == "parallel"): 
#        print str(multiprocessing.current_process()._identity)
#        print str(multiprocessing.current_process()._identity[0])  + "at the end" 
    # ---- drawing the pareto set
    symbolsCollected = [] #this list contains the symbols collected for every new input 
    symbolIndex = 0  
    # ---- generate the graph
#    if settings.runToolChainGenerateGraph: 
#        plotPareto =settings.runToolChainPlotPareto
#        #symbolsCollected = generate_snr_energy_graph(inputObj.dealingWithPics, resultPoints, plotPareto, symbolsToChooseFrom, lOfAccurateValues, symbolIndex, maxY, maxX) 
#        if(len(lOfAllPointsTried) > 0):
#            symbolsCollected = generate_snr_energy_graph(inputObj.dealingWithPics, lOfAllPointsTried, plotPareto, symbolsToChooseFrom, lOfAccurateValues, 1, maxY, maxX) 
#        symbolsCollected = generate_snr_energy_graph(inputObj.dealingWithPics, pareto_points, plotPareto, symbolsToChooseFrom, lOfAccurateValues, 3, maxY, maxX) 
    # symbolsCollected.append(symbolsToChooseFrom[symbolIndex]) 
    #generateGraph(map(lambda x: x.get_lOfError(), lOfParetoPoints), map(lambda x: x.get_energy(), lOfParetoPoints), "Noise", "Energy", symbolsToChooseFrom[i])
            
        
    
    # ---- collecting the result in a list (for later printing)
    resultPoints = pareto_points
    resultTuple = [] 
    for index, point in enumerate(resultPoints):
        print index 
        if(eval(inputObj.dealingWithPics)): 
            resultTuple.append((point.get_setUp(), point.get_PSNR(), point.get_energy()))
        else:
            resultTuple.append((point.get_setUp(), point.get_quality(), point.get_energy()))

    if(settings_obj.DEBUG):
        print "---printing the results:" 
        for el in resultTuple:
            print el
    
    finalResultFileFullAddress = rootResultFolderName + "/" + finalResultFileName
#    if (settings.runToolChainGenerateGraph): 
#        writeReadableOutput(resultTuple,  symbolsCollected, finalResultFileFullAddress)
#        pylab.savefig(finalResultFileFullAddress[:-4]+".png") #saving the figure generated by generateGraph
    #----:::  getting back up of the results
    folderToCopyToNameProcessed = comeUpWithNewFolderNameAccordingly(rootFolder + "/" + settings_obj.resultsBackups) 
    listOfFoldersToCopyFrom = [rootResultFolderName, CSrcFolderAddress]  
    #generateBackup(rootResultFolderBackupName, listOfFoldersToCopyFrom, folderToCopyToNameProcessed) #generating a back of the results
    cleanUpExtras(rootResultFolderName, settings_obj) 
    #---------guide::: show the graph
    #plt.show() 
    end = time.time()
    #print "here is the total time:"

    #print end - start
    sys.stdout.flush()
示例#6
0
def main():
    assert(len(sys.argv) >= 2) 
    limit = False
    lower_bound = -100
    upper_bound = .001
    points_to_graph = [] 
    for arg in sys.argv[1:]:
        if (arg == "various_inputs"):
#            points_to_graph = [] 
#            get_quality_energy_values("various_inputs.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
#            generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name(), "optimal setUp for various prime inputs for ", "various_inputs") 
            """ 
            points_to_graph = [] 
            
            get_quality_energy_values("various_inputs_same_setUp.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name(), "One input optimal setUp imposed on others for ", "various_inputs_same_setUp") 
            #pylab.savefig("various_inputs_same_setUp.png") #saving the figure generated by generateGraph
            points_to_graph = [] 
            get_quality_energy_values("pickled_results_all_points.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name(), "all input", "all_points_tried") 
            #pylab.savefig("all_points_tried.png") #saving the figure generated by generateGraph
            exit()
            """

        #--- all results 
        
        get_quality_energy_values("pickled_results_all_points.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "","", "E_vs_Q_all_points", "E_vs_Q") 
        #--- various inputs 
        points_to_graph = [] 
        points_to_graph_2 = [] 
        get_quality_energy_values("various_inputs.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "same_Q_vs_input", "same_Q_vs_input") 
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "E_vs_Q", "E_vs_Q") 
        
        #--- imposed on various inputs
        points_to_graph = [] 
        get_quality_energy_values("various_inputs_same_setUp.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "" , "E_vs_Q_imposed", "E_vs_Q") 
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "Q_vs_mean_imposed", "same_E_vs_input") 
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "" , "Qstd_vs_E_imposed", "Qstd_vs_E_imposed") 

        reminder(True, "make sure to creat a file with universal name that holds the imposed values so we  can sample", "URGENT") 
        points_to_graph_3 =[] 
        
        #get_quality_energy_values("various_inputs_avg_setUp.PIK", "+", points_to_graph_3, limit, lower_bound, upper_bound)
        get_quality_energy_values("imposed_setUp.PIK", "+", points_to_graph_3, limit, lower_bound, upper_bound)
        print points_to_graph_3
        #get_quality_energy_values("various_inputs_worse_case_setUp.PIK", "+", points_to_graph_3, limit, lower_bound, upper_bound)
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "" , "Qmean_normalized_to_Q_promissed", "Qmean_normalized_to_Q_promissed", False, False, "one", points_to_graph_3) 
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "" , "Q_satisfaction_success_rate", "Q_satisfaction_success_rate", False, False, "one", points_to_graph_3) 
        #--- various inputs imposed vs regular 
        points_to_graph = [] 
        get_quality_energy_values("various_inputs_same_setUp.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        ax, fig = generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "E_vs_Q_imposed_vs_optimal", "E_vs_Q", True) #-- post pone saving
        
        points_to_graph = [] 
        get_quality_energy_values("various_inputs.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), ax, fig, "E_vs_Q_impose_vs_optimal", "E_vs_Q", False, True) 

        
        #--- various inputs E vs Q adjusted
        points_to_graph = [] 
        get_quality_energy_values("various_inputs.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        ax,fig =generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "E_vs_Q_adjusted", "E_vs_Q_adjusted")
        
        
        #--- (E vs Q adjusted) and vs (E vs Q adjusted imposed)
        points_to_graph = [] 
        get_quality_energy_values("various_inputs_same_setUp.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        ax,fig =generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), "", "", "E_vs_Q_adjusted_vs_imposed", "E_vs_Q_adjusted", True)
        points_to_graph = [] 
        get_quality_energy_values("various_inputs.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        fig = generateGraph_for_all_simplified(points_to_graph, "1/quality", "energy", get_benchmark_name(), ax, fig, "E_vs_Q_adjusted_vs_imposed", "E_vs_Q_adjusted", False, True) #-- post pone saving
        

        sys.exit()
        if (arg == "various_inputs_alter"):
            for index in range(6): 
                generateGraph_for_all_alternative(points_to_graph, points_to_graph_2, "1/quality", "energy", get_benchmark_name(), index, "One input optimal setUp imposed on others for ") 
                pylab.savefig("cmp_ideal_vs_imposed_for_input"+str(index)+".png") #saving the figure generated by generateGraph
            """ 
            points_to_graph = [] 
            get_quality_energy_values("pickled_results_all_points.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
            pylab.savefig("all_points_tried.png") #saving the figure generated by generateGraph
            """ 
            exit()
        if (arg == "hierarchical"):
            get_quality_energy_values("all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("all_of_s3.PIK", "1", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s3.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("all_of_combined.PIK", "x", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_combined.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "ref"): #---ref all
            get_quality_energy_values("pareto_of_heur_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("all_of_flattened.PIK", "+", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_all_of_flattened.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        
        if(arg == "only_pareto"):
            get_quality_energy_values("pareto_of_all_of_s3.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_heur_flattened.PIK", "^", points_to_graph, limit,  lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_all_of_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound) 
        if(arg == "s4" or arg == "s6"):
            get_quality_energy_values("all_of_s4.PIK", "*", points_to_graph, limit, lower_bound, upper_bound) 
        
        if(arg == "compare_pareto"):
            get_quality_energy_values("pareto_of_all_of_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_all_of_s4.PIK", "*", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_combined.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
        
        if(arg == "all"): #--all graph
            get_quality_energy_values("all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("all_of_s3.PIK", "1", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s3.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("all_of_combined.PIK", "x", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_combined.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_heur_flattened.PIK", "^", points_to_graph, limit,  lower_bound, upper_bound) 
            get_quality_energy_values("all_of_flattened.PIK", "+", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_all_of_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "main_two"): 
            get_quality_energy_values("pareto_of_combined.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_heur_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound) 

        if (arg == "s2"): #---stage 2 points
            get_quality_energy_values("all_of_s2.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s2.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "s3"): #---stage 3 points
            get_quality_energy_values("all_of_s3.PIK", "1", points_to_graph, limit, lower_bound, upper_bound)
            get_quality_energy_values("pareto_of_all_of_s3.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "combined_all"): #combined_all 
            get_quality_energy_values("all_of_combined.PIK", "x", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "combined_pareto"): #combined_pareto
            get_quality_energy_values("pareto_of_combined.PIK", "o", points_to_graph, limit, lower_bound, upper_bound)
        
        if (arg == "ref_pareto"): #---pareto points for ref 
            get_quality_energy_values("pareto_of_heur_flattened.PIK", "^", points_to_graph, limit, lower_bound, upper_bound) 
            get_quality_energy_values("pareto_of_all_of_flattened.PIK", "+", points_to_graph, limit, lower_bound, upper_bound) 
        
        if (arg == "ref_all"): #---ref all
            get_quality_energy_values("all_of_flattened.PIK", "+", points_to_graph, limit, lower_bound, upper_bound) 
         
    generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
    pylab.savefig("results.png") #saving the figure generated by generateGraph
    
    if (arg == "s4" or arg=="s6"):
        points_to_graph = [] 
        get_quality_energy_values("input_for_s4.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
        pylab.savefig("s4_inputs.png") #saving the figure generated by generateGraph
    if (arg=="s6"):
        points_to_graph = [] 
        get_quality_energy_values("all_of_combined.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
        pylab.savefig("s6_combined.png") #saving the figure generated by generateGraph
    
    if (arg=="clusters"):
        points_to_graph = [] 
        get_quality_energy_values("s2_output_acc.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
        pylab.savefig("s2_output_acc.png") #saving the figure generated by generateGraph
        points_to_graph = [] 
        get_quality_energy_values("cluster_rep.PIK", "+", points_to_graph, limit, lower_bound, upper_bound)
        generateGraph_for_all(points_to_graph, "1/quality", "energy", get_benchmark_name()) 
        pylab.savefig("cluster_rep.png") #saving the figure generated by generateGraph
def compare_adjusted(points_collected, points_collected_imposed):
    fig, ax = plt.subplots()

    symbolsToChooseFrom = ['*', 'x', "o", "+","^", '1', '2', "3"] 
    color =['g', 'y', 'r', 'm']
    
    """ 
    limit = False
    lower_bound = -100
    upper_bound = .001

    get_quality_energy_values("various_inputs.PIK", "+", points_collected, limit, lower_bound, upper_bound)
    """ 
    lOf_run_input_list = input_list.lOf_run_input_list
    number_of_inputs_used = len(lOf_run_input_list)
    input_results = map(list, [[]]*number_of_inputs_used) 
    base_dir = "/home/local/bulkhead/behzad/usr/local/apx_tool_chain/inputPics/"
    counter = 0
    energy_list_to_be_drawn = []
    setup_list_to_be_drawn = [] 
    quality_list_to_be_drawn = []
    std_list_to_be_drawn = []
    image_list_to_be_drawn = [] 
    z_vals = [] 

    mR =0 
    mG =0
    mB =0
    for val in points_collected:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            """ 
            if (counter > 50 ):
                break
            """ 
            print counter 
            if len(res) > 0:
                image_addr =  base_dir+lOf_run_input_list[index][0] + ".ppm"
                #mR, mG, mB, stdR, stdG, stdB = cluster_images.calc_image_mean_std(image_addr)
                mR +=1 
                mG +=1
                mB +=1
                stdB = 0
                stdR = 0
                stdG = 0

                if (int(np.mean([mR,mG,mB]))) in z_vals:
                    continue
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                
                #--- sort based on the quality
                Q = quality_values_shifted
                E = el[1]
                SetUps =  el[2]
                E_index_sorted = sorted(enumerate(E), key=lambda x: x[1])
                index_of_E_sorted = map(lambda y: y[0], E_index_sorted)
                Q_sorted = [Q[i] for i in index_of_E_sorted]
                E_sorted = [E[i] for i in index_of_E_sorted]
                SetUp_sorted = [SetUps[i] for i in index_of_E_sorted]
                quality_list_to_be_drawn.append(Q_sorted)
                energy_list_to_be_drawn.append(E_sorted)
                setup_list_to_be_drawn.append(SetUp_sorted)
                std_list_to_be_drawn.append([int(np.mean([mR,mG,mB]))]*len(E_sorted))
                image_list_to_be_drawn.append([lOf_run_input_list[index][0]]*len(E_sorted))
                z_vals.append( int(np.mean([mR,mG,mB])))
                counter +=1
        
        reminder(True,"the following lines which creates a new image every len(symbolsToChooseFrom) should be commented if we use any flag but various_inputs")
        
        
        #--sorting the data. This is necessary for wire frame 
        zvals_index_sorted = sorted(enumerate(z_vals), key=lambda x: x[1])
        index_of_zvals_sorted = map(lambda y: y[0], zvals_index_sorted)
        quality_list_sorted_based_on_z = [quality_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        std_list_sorted_based_on_z = [std_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        energy_list_sorted_based_on_z = [energy_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        image_list_sorted_based_on_z = [image_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        SetUp_list_sorted_based_on_z = [setup_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
    Qs_ref, Es_ref, stds_ref, QSs_ref = adjust.adjust_vals_2(quality_list_sorted_based_on_z, energy_list_sorted_based_on_z, std_list_sorted_based_on_z)
    
    
    lOf_run_input_list = input_list.lOf_run_input_list
    number_of_inputs_used = len(lOf_run_input_list)
    input_results = map(list, [[]]*number_of_inputs_used) 
    base_dir = "/home/local/bulkhead/behzad/usr/local/apx_tool_chain/inputPics/"
    counter = 0
    energy_list_to_be_drawn = []
    setup_list_to_be_drawn = [] 
    quality_list_to_be_drawn = []
    std_list_to_be_drawn = []
    image_list_to_be_drawn = [] 
    z_vals = [] 

    mR =0 
    mG =0
    mB =0

    for val in points_collected_imposed:
        input_results = map(list, [[]]*number_of_inputs_used) 
        zipped = zip(*val[:-1])  
        for el in zipped:
            input_results[el[2]].append(el)
        for index,res in enumerate(input_results):
            """ 
            if (counter > 50 ):
                break
            """ 
            print counter 
            if len(res) > 0:
                mR +=1 
                mG +=1
                mB +=1
                stdB = 0
                stdR = 0
                stdG = 0

                image_addr =  base_dir+lOf_run_input_list[index][0] + ".ppm"
                #mR, mG, mB, stdR, stdG, stdB = cluster_images.calc_image_mean_std(image_addr)
                if (int(np.mean([mR,mG,mB]))) in z_vals:
                    continue
                el = map(lambda x: list(x), zip(*res))
                quality_values_shifted = map(lambda x: x+1, el[0]) 
                
                #--- sort based on the quality
                Q = quality_values_shifted
                E = el[1]
                SetUps =  el[2]
                E_index_sorted = sorted(enumerate(E), key=lambda x: x[1])
                index_of_E_sorted = map(lambda y: y[0], E_index_sorted)
                Q_sorted = [Q[i] for i in index_of_E_sorted]
                E_sorted = [E[i] for i in index_of_E_sorted]
                SetUp_sorted = [SetUps[i] for i in index_of_E_sorted]
                quality_list_to_be_drawn.append(Q_sorted)
                energy_list_to_be_drawn.append(E_sorted)
                setup_list_to_be_drawn.append(SetUp_sorted)
                std_list_to_be_drawn.append([int(np.mean([mR,mG,mB]))]*len(E_sorted))
                image_list_to_be_drawn.append([lOf_run_input_list[index][0]]*len(E_sorted))
                z_vals.append( int(np.mean([mR,mG,mB])))
                counter +=1
        
        reminder(True,"the following lines which creates a new image every len(symbolsToChooseFrom) should be commented if we use any flag but various_inputs")
        
        
        #--sorting the data. This is necessary for wire frame 
        zvals_index_sorted = sorted(enumerate(z_vals), key=lambda x: x[1])
        index_of_zvals_sorted = map(lambda y: y[0], zvals_index_sorted)
        quality_list_sorted_based_on_z = [quality_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        std_list_sorted_based_on_z = [std_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        energy_list_sorted_based_on_z = [energy_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        image_list_sorted_based_on_z = [image_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
        
        SetUp_list_sorted_based_on_z = [setup_list_to_be_drawn[i] for i in index_of_zvals_sorted]                
    Qs_imposed, Es_imposed, stds_imposed, QSs_imposed = adjust.adjust_vals_2(quality_list_sorted_based_on_z, energy_list_sorted_based_on_z, std_list_sorted_based_on_z)
        
    
    Es_diff = []
    for input_index in range(len(Es_imposed)):
#        Es_diff_el = [] 
#        for el in range(len(Es_imposed[input_index])):
#            Es_diff_el.append(Es_imposed[input_index][el] - Es_ref[input_index][el])
         
        #Es_diff.append(Es_diff_el) 
        Es_diff.append(map(operator.sub, Es_imposed[input_index], Es_ref[input_index]))
     
    line_style = '-'
    plt.xlabel("Quality")
    plt.ylabel("Energy")
    second_axis = Es_diff; 
    third_axis = std_list_sorted_based_on_z; 
    third_axis_name = "input" 
    n_lines = len(std_list_sorted_based_on_z)
    colors = gen_color_spec.gen_color(n_lines+1, 'seismic') 
    for x in range(len(third_axis)):
        my_label =  third_axis_name +": " + str(int(third_axis[x][0]))
        #if (int(third_axis[x][0]) == 151): 
        ax.plot(QSs_imposed, Es_diff[x], marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], c= colors[x], label=my_label, linestyle=line_style)
#
    #   

    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width*.8 ,  box.height])
    # Put a legend to the right of the current axis (note: prop changes the fontsize)
    ax.legend(loc='center left', bbox_to_anchor=(1, .9), prop={'size':6})
    graph_title = "Ediff_vs_Q"
    name  = graph_title
    benchmark_name = "jpeg" 
    plt.title(graph_title + str(benchmark_name) + " benchmark")
    pylab.savefig(name+str(counter)+".png") #saving the figure generated by generateGraph
     
    
    Es_diff_avg_per_quality = [] 
    for x in range(len(QSs_imposed)):
        Es_diff_avg_per_quality.append(numpy.mean(map(lambda y: y[x], Es_diff)))
        #if (int(third_axis[x][0]) == 151): 
     
    my_label =  "AVG"
    ax.plot(QSs_imposed, Es_diff_avg_per_quality, marker = symbolsToChooseFrom[x%len(symbolsToChooseFrom)], c= colors[n_lines], label=my_label, linestyle=line_style)
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width*.8 ,  box.height])
    # Put a legend to the right of the current axis (note: prop changes the fontsize)
    ax.legend(loc='center left', bbox_to_anchor=(1, .9), prop={'size':6})
    graph_title = "Ediff_avg_vs_Q"
    name  = graph_title
    benchmark_name = "jpeg" 
    plt.title(graph_title + str(benchmark_name) + " benchmark")
    pylab.savefig(name+str(counter)+".png") #saving the figure generated by generateGraph
    
    #plt.close()
    fig, ax = plt.subplots()
    print "avg of ES_diff" + str(numpy.mean(map(lambda x: numpy.mean(x), Es_diff)))

    """