def main(): # check the given arguments if len(sys.argv) < 3: usage() elif len(sys.argv) == 4: if sys.argv[1] == "-c" or sys.argv[1] == "--no-color": shared.terminalRed = "" shared.terminalReset = "" filename = sys.argv[2] directory = sys.argv[3] else: usage() else: filename = sys.argv[1] directory = sys.argv[2] # ensure the given directory exists directory = shared.ensureDir(directory) if (directory[-1] != '/'): directory = directory + '/' minCon, maxCon = findMinMax(filename) # find the minimum and maximum values of the concentrations width, height, parsedFile = readData(filename) # find the width and height of the cell tissue and parse the concentrations file into an array edge, size = findSizes(width, height) # configure the hexagon edge and window size based on the grid size index = 0 for line in parsedFile: if (index % 10 == 0): plotHexagons(directory, size, index, line[1:], minCon, maxCon, edge, width, height) index += 1
def main(): # Default argument values. global Y_BOUNDS global JOB_NAME global PARAM_NAMES nodes = 1 feature = -1 points = 4 percent = 20 file_out = "sensitivity_graphs" nominal_file = "../sensitivity-analysis/nominal.params" data_dir = "../sensitivity-analysis/sense-for-plot" image_dir = "plots" perturb_file = "../simulation/input.perturb" nominal_count = 1 ex_path = "../sensitivity-analysis/sensitivity" sim_path = "../simulation/simulation" ppn = "2" graph = False elasticity = False sim_args = " " additional_args = " " cname = None #Check the commmand line input: if len(sys.argv) < 2 or ("-h" in sys.argv) or ("--help" in sys.argv): usage() ishift = 0 for i in range(1, len(sys.argv), 2): i += ishift if i + 1 >= len(sys.argv): usage() #elif sys.argv[i] == "-j" or sys.argv[i] == "--job-name": # global JOB_NAME = sys.argv[i+1] elif sys.argv[i] == "-n" or sys.argv[i] == "--nominal-file": nominal_file = sys.argv[i + 1] elif sys.argv[i] == "-c" or sys.argv[i] == "--nominal-count": nominal_count = shared.toInt(sys.argv[i + 1]) elif sys.argv[i] == "-p" or sys.argv[i] == "--percent": percent = shared.toInt(sys.argv[i + 1]) elif sys.argv[i] == "-P" or sys.argv[i] == "--Points": points = shared.toInt(sys.argv[i + 1]) elif sys.argv[i] == "-l" or sys.argv[i] == "--ppn": ppn = sys.argv[i + 1] elif sys.argv[i] == "-N" or sys.argv[i] == "--nodes": nodes = shared.toInt(sys.argv[i + 1]) elif sys.argv[i] == "-f" or sys.argv[i] == "--feature": feature = shared.toInt(sys.argv[i + 1]) elif sys.argv[i] == "-e" or sys.argv[i] == "--exec": ex_path = sys.argv[i + 1] elif sys.argv[i] == "-s" or sys.argv[i] == "--sim": sim_path = sys.argv[i + 1] elif sys.argv[i] == "-o" or sys.argv[i] == "--output": file_out = sys.argv[i + 1] elif sys.argv[i] == "-d" or sys.argv[i] == "--dir": image_dir = sys.argv[i + 1] elif sys.argv[i] == "-D" or sys.argv[i] == "--data-dir": data_dir = sys.argv[i + 1] elif sys.argv[i] == "-j" or sys.argv[i] == "--job-name": JOB_NAME = sys.argv[i + 1] elif sys.argv[i] == "-C" or sys.argv[i] == "--cluster-name": cname = sys.argv[i + 1] elif sys.argv[i] == "--ymin": val = float(sys.argv[i + 1]) if Y_BOUNDS == None: Y_BOUNDS = (val, max(1.5, 2 * val)) else: Y_BOUNDS[0] = val elif sys.argv[i] == "--ymax": val = float(sys.argv[i + 1]) if Y_BOUNDS == None: Y_BOUNDS = (min(0, 2 * val), val) else: Y_BOUNDS[1] = val elif sys.argv[i] == "-E" or sys.argv[i] == "--elasticity": elasticity = True ishift = -1 elif sys.argv[i] == "-g" or sys.argv[i] == "--graph": graph = True ishift = -1 elif sys.argv[i] == "-a" or sys.argv[i] == "--args": for a in sys.argv[i + 1:]: additional_args += " " + a + " " break #Ensure that the necessary directories exist -- if not, make them. shared.ensureDir(data_dir) shared.ensureDir(image_dir) #Additional args is a string that that is attached to the final arguments sent to the sensitivity analysis program. additional_args = " -p " + str(percent) + " -P " + str( points) + " " + additional_args #Depending on whether elasticity is chosen, either create sensitivity bar graphs or scatter-line plots. if (not elasticity): #This statement checks to see if simulations actually need to be run. This is mostly true, but if all the data has been created already then '-g' will cause the script to skip to the plotting. if (not graph): #dispatch_jobs takes care of running the program locally or making the pbs jobs. dispatch_jobs(nodes, file_out, nominal_file, data_dir, image_dir, perturb_file, nominal_count, 0, ex_path, sim_path, ppn, sim_args, None, additional_args, cname) print "\t~ Done with runs ~" #Once the data has been collected, load it in and make the graphs. print "\t ~ Generating graphs ~ " #Load all of the data from the sensitivity results. #This uses "/normalized_[number]" as the file name because that is how it's set in sensitivity-analysis/init.hpp. #The struct input_params has two strings, norm_file and sense_file, that determine the names of specific files to load. #These could be specified more generally by making a new commandline argument for the sensitivity executible, but this has not seemed necessary because there is already so much customization of the directies these files end up in. data = [] names = [] for i in range(nominal_count): temp_data, names = parse_files(data_dir + "/normalized_" + str(i)) data.append(temp_data) #If just one feature is specified, this makes just one graph. Otherwise it loops through all features and makes a graph for each. bar_data = [ ] #This holds onto the data that was actually plotted, i.e. average sensitivity values for each parameter. bar_error = [] #This holds onto the standard error for each parameter. if feature > 0: temp_sense, temp_error = sense_bar(data, image_dir, feature, feat_name=names[feature]) bar_data.append(temp_sense) bar_error.append(temp_error) else: sys.stdout.write("Done with normalized graphs: ") sys.stdout.flush() for i in range(len(data[0][0])): temp_sense, temp_error = sense_bar(data, image_dir, i, feat_name=names[i]) bar_data.append(temp_sense) bar_error.append(temp_error) sys.stdout.write(str(i) + "... ") sys.stdout.flush() #Write out the bar graph data to file write_bar_data(bar_data, bar_error, data_dir + "/bar_graph_data_normalized.csv", ynames=names, xnames=PARAM_NAMES) #Abosulte sensitivity graphs #Similarly, this uses "/LSA_[number]" as the file name because that is how it's set in sensitivity-analysis/init.hpp. data = [] names = [] for i in range(nominal_count): temp_data, names = parse_files(data_dir + "/LSA_" + str(i)) data.append(temp_data) #If just one feature is specified, this makes just one graph. Otherwise it loops through all features and makes a graph for each. bar_data = [ ] #This holds onto the data that was actually plotted, i.e. average sensitivity values for each parameter. bar_error = [] #This holds onto the standard error for each parameter. if feature > 0: temp_sense, temp_error = sense_bar(data, image_dir, feature, feat_name=names[feature], normal=False) bar_data.append(temp_sense) bar_error.append(temp_error) else: sys.stdout.write("Done with absolute graphs: ") sys.stdout.flush() for i in range(len(data[0][0])): temp_sense, temp_error = sense_bar(data, image_dir, i, feat_name=names[i], normal=False) bar_data.append(temp_sense) bar_error.append(temp_error) sys.stdout.write(str(i) + "... ") sys.stdout.flush() #Write out the bar graph data to file write_bar_data(bar_data, bar_error, data_dir + "/bar_graph_data_absolute.csv", ynames=names, xnames=PARAM_NAMES) #If the elasticity option was included, the following code makes scatter plots of the oscillation features data at different perturbations of each nominal parameter. else: #This adds a commandline argument that is passed to the sensitivity analysis program to tell it to gather the data without caluclating the sensitivity. additional_args = " --generate-only " + additional_args #Note that for the elasticity/scatter-line plots each instance of sensitivity used to gather the data is given only one parameter set to ensure data files will be unique (and not get overwritten). This makes it slower than the sensitivity graphs. print "\n\t ~ Elasticity data collection ~ " data = [ ] # this will be a four dimensional list indexed by: data[which nominal set][which parameter][which perturbation amount][which oscillation feature value] names = [] nominal = [ ] # this will be a three dimensional list indexed by: nominal[which nominal set][0][which oscillation feature], the middle index is zero because there is only one parameter set in the nominal features file. #This loop runs if the data needs to be collected. There are some unintuitive additions that are used to keep track of how many jobs should be sent out the index of which nominal parameter set to use. if not graph: disp = 0 # a counter used to keep track of how many jobs to dispatch. raw_data_dirs = [ ] #A list that gets filled with file name strings which get passed as arguments to the sensitivity program. for c in range(0, nominal_count): raw_data_dirs.append(data_dir + "/elastic_data_" + str(c)) disp += 1 if disp == nodes or c == nominal_count - 1: dispatch_jobs(disp, file_out, nominal_file, data_dir, image_dir, perturb_file, disp, c - disp + 1, ex_path, sim_path, ppn, sim_args, raw_data_dirs, additional_args, cname) raw_data_dirs = [] disp = 0 #Now that the data files exist, load them and parse them into the appropriate arrays. #The "/dim_[number]" and "/nominal_0" strings are the file names that the sensitivity analysis program uses to distinguish output features files. #Modifying these file names would require changing nom_file and dim_file in the constructor of input_params in sensitivity-analysis/init.hpp. #This loop is similar to the above, but simpler -- for every nominal parameter set it opens its /elastic_data_[number] directory, parses the files in it and stores the data in data[number] and nominal[number] for c in range(0, nominal_count): data.append([]) for d in range(44): temp_data, names = parse_files(data_dir + "/elastic_data_" + str(c) + "/dim_" + str(d)) data[c].append(temp_data) temp_data, names = parse_files(data_dir + "/elastic_data_" + str(c) + "/nominal_0") nominal.append(temp_data) #data[] and nominal[] should have everything we need for the graphs now, so plot them. print "\n\t ~ Elasticity graphing ~ " sys.stdout.write("Done with parameter: ") sys.stdout.flush() #Loop through each parameter for p in range(len(data[0])): #Loop through each feature for f in range(len(data[0][0][0])): #Plot! line_plot(data, nominal, p, f, names[f], PARAM_NAMES[p], image_dir, percent, points) sys.stdout.write(str(p) + "...") sys.stdout.flush() print "\n\t ~ Graphs complete ~ " return
def main(): #check the given arguments print "Reading command-line arguments..." args = sys.argv[1:] num_args = len(args) req_args = [False] * 6 num_seeds = 0 sim_arguments = "" if num_args >= 6: for arg in range(0, num_args - 1, 2): option = args[arg] value = args[arg + 1] if option == '-i' or option == '--input-file': ifile = value req_args[0] = True elif option == '-n' or option == '--num-params': num_params = shared.toInt(value) req_args[1] = True elif option == '-p' or option == '--pars-per-job': pars_per_job = shared.toInt(value) req_args[2] = True elif option == '-d' or option == '--directory': folder = value req_args[3] = True elif option == '-s' or option == '--simulation': simulation = value req_args[4] = True elif option == '-S' or option == '--seeds': num_seeds = int(value) req_args[5] = True elif option == '-a' or option == '--arguments': for a in range(arg + 1, num_args): sim_arguments += ' ' + args[a] break elif option == '-h' or option == '--help': usage() else: usage() for arg in req_args: if not arg: req_args usage() else: usage() index = 0 input_file = shared.openFile(ifile, "r") shared.ensureDir(folder) for parset in range(0, num_params, pars_per_job): params = shared.openFile(folder + "/input" + str(index) + ".params", "w") for line in range(pars_per_job): params.write(input_file.readline()) params.close() index += 1; for seeds in range(num_seeds): seed = (seeds + 1) * 1000 for parset in range(index): job = shared.openFile(folder + "/pbs-job-" + str(seed) + "-" + str(parset), 'w') job.write(''' #PBS -N robust-test #PBS -l nodes=1:ppn=1 #PBS -l mem=500mb #PBS -l file=300mb #PBS -q biomath #PBS -j oe #PBS -o ''' + folder + '''/output''' + str(seed) + "-" + str(parset) + '''.txt #PBS -l walltime=06:00:00 cd $PBS_O_WORKDIR ''' + simulation + ' ' + sim_arguments + ' -p ' + str(pars_per_job) + ' -i ' + ifile + ''' -s ''' + str(seed) + " -M 6 -E " + folder + "/scores-" + str(seed) + "-" + str(parset) + ".csv") job.close() subprocess.call(["qsub", folder + "/pbs-job-" + str(seed) + "-" + str(parset)])
def main(): # check the given arguments if len(sys.argv) < 3: usage() elif len(sys.argv) == 4: if sys.argv[1] == "-c" or sys.argv[1] == "--no-color": shared.terminalRed = "" shared.terminalReset = "" directory = sys.argv[2] filename = sys.argv[3] else: usage() else: directory = sys.argv[1] filename = sys.argv[2] # ensure the directory exists and open the output file directory = shared.ensureDir(directory) ofile = shared.openFile(filename, "w") fsize = 0 filename = directory + "/run0.txt" f = shared.openFile(filename) width, height = shared.widthAndHeight(f.readline().split(), filename) for line in f: data = line.split() fsize = shared.toFlo(data[0]) avg = [[-1 for i in range(w * h)] for j in range(int(fsize * 10) + 50)] end = 0 f.close() for run in range(0, runs): filename = directory + "/run" + str(run)+ ".txt" f = open(filename) width, height = shared.widthAndHeight(f.readline().split(), filename) for line in f: lineList = line.split(",") temptime = shared.toFlo(lineList[0]) place = int(temptime * 10) if float(place + 1) - (temptime * 10) < (temptime * 10 - float(place)): index = place + 1 else: index = place if (index > end): end = index for cell in range(0, w * h): if (avg[index][cell] == -1): avg[index][cell] = shared.toFlo(lineList[cell + 1]) else: avg[index][cell] += shared.toFlo(lineList[cell + 1]) ofile.write(str(width) + " " + str(height) + "\n") for cell in range(0, width * height): i = 0 minend = end while (i < end): if (avg[i + 1][cell] == -1): x1 = i y1 = avg[i][cell] y2 = 0 x2 = -1 for k in range(i + 2, end + 1): if (avg[k][cell] != -1): y2 = avg[k][cell] x2 = k break if (x2 != -1): m = (y2 - y1) / (x2 - x1) for k in range (i + 1, x2): avg[k][cell] = y1 + m * h * (k - i) i = x2 else: end = i if (end < minend): minend = end break else: i += 1 end = minend for i in range(1, end + 1): list = avg[i] ofile.write(str(float(i) / 10) + " ") for cell in range(0, w * h): ofile.write(str(list[cell] / runs) + " ") ofile.write("\n") index += 1 ofile.close()
def main(): print 'Reading command-line arguments...' args = sys.argv[1:] if len(args) == 3: cons_fname1 = args[0] cons_fname2 = args[1] directory = args[2] else: usage() print 'Reading concentrations file 1...' min_con1 = float('inf') max_con1 = 0 cons_data1 = [] if cons_fname1.endswith('.cons'): # Read ASCII file cons_file1 = shared.openFile(cons_fname1, 'r') width, height = map( lambda num: shared.toInt(num), cons_file1.readline().split( ' ')) # The first line contains the width and height checkSize(width, height) for line in cons_file1: cons = map( lambda num: shared.toFlo(num), line.split(' ')[1:-1] ) # Remove the time step column and newline when taking the concentrations for con in cons: min_con1 = min(min_con1, con) max_con1 = max(max_con1, con) cons_data1.append(cons) elif cons_fname1.endswith('.bcons'): # Read binary file cons_file1 = shared.openFile(cons_fname1, 'rb') # Read the file as a binary # The first two ints are the width and height width, = struct.unpack('i', cons_file1.read(4)) height, = struct.unpack('i', cons_file1.read(4)) checkSize(width, height) size = width * height cons1 = [] cons_length1 = 0 while True: con_str1 = cons_file1.read(4) if con_str1 == '': # While not EOF break else: # There are width * height concentration floats per time step con, = struct.unpack('f', con_str1) min_con1 = min(min_con1, con) max_con1 = max(max_con1, con) cons1.append(con) cons_length1 += 1 if cons_length1 == height: cons_data1.append(cons) cons1 = [] else: usage() print 'Reading concentrations file 2...' min_con2 = float('inf') max_con2 = 0 cons_data2 = [] if cons_fname2.endswith('.cons'): # Read ASCII file cons_file2 = shared.openFile(cons_fname2, 'r') width, height = map( lambda num: shared.toInt(num), cons_file2.readline().split( ' ')) # The first line contains the width and height checkSize(width, height) for line in cons_file2: cons = map( lambda num: shared.toFlo(num), line.split(' ')[1:-1] ) # Remove the time step column and newline when taking the concentrations for con in cons: min_con2 = min(min_con2, con) max_con2 = max(max_con2, con) cons_data2.append(cons) elif cons_fname2.endswith('.bcons'): # Read binary file cons_file2 = shared.openFile(cons_fname2, 'rb') # Read the file as a binary # The first two ints are the width and height width, = struct.unpack('i', cons_file2.read(4)) height, = struct.unpack('i', cons_file2.read(4)) checkSize(width, height) size = width * height cons2 = [] cons_length2 = 0 while True: con_str2 = cons_file2.read(4) if con_str2 == '': # While not EOF break else: # There are width * height concentration floats per time step con, = struct.unpack('f', con_str2) min_con2 = min(min_con2, con) max_con2 = max(max_con2, con) cons2.append(con) cons_length2 += 1 if cons_length2 == height: cons_data2.append(cons) cons2 = [] else: usage() print 'Creating the directory if necessary...' directory = shared.ensureDir(directory) if (directory[-1] != '/'): directory = directory + '/' cons_data = combine_cons(cons_data1, cons_data2, max_con1, min_con1, max_con2, min_con2) print 'Creating snapshots...' edge, size = findSizes( width, height ) # Configure the hexagon edge and window size based on the grid size index = 0 for line in cons_data: if (index % 10 == 0 and index >= 21000): plotHexagons(directory, size, index, line, edge, width, height) index += 1 print 'Done. Your snapshots are stored in ' + directory
def main(): print "Reading command-line arguments..." args = sys.argv[1:] if len(args) == 3: cons_fname1 = args[0] cons_fname2 = args[1] directory = args[2] else: usage() print "Reading concentrations file 1..." min_con1 = float("inf") max_con1 = 0 cons_data1 = [] if cons_fname1.endswith(".cons"): # Read ASCII file cons_file1 = shared.openFile(cons_fname1, "r") width, height = map( lambda num: shared.toInt(num), cons_file1.readline().split(" ") ) # The first line contains the width and height checkSize(width, height) for line in cons_file1: cons = map( lambda num: shared.toFlo(num), line.split(" ")[1:-1] ) # Remove the time step column and newline when taking the concentrations for con in cons: min_con1 = min(min_con1, con) max_con1 = max(max_con1, con) cons_data1.append(cons) elif cons_fname1.endswith(".bcons"): # Read binary file cons_file1 = shared.openFile(cons_fname1, "rb") # Read the file as a binary # The first two ints are the width and height width, = struct.unpack("i", cons_file1.read(4)) height, = struct.unpack("i", cons_file1.read(4)) checkSize(width, height) size = width * height cons1 = [] cons_length1 = 0 while True: con_str1 = cons_file1.read(4) if con_str1 == "": # While not EOF break else: # There are width * height concentration floats per time step con, = struct.unpack("f", con_str1) min_con1 = min(min_con1, con) max_con1 = max(max_con1, con) cons1.append(con) cons_length1 += 1 if cons_length1 == height: cons_data1.append(cons) cons1 = [] else: usage() print "Reading concentrations file 2..." min_con2 = float("inf") max_con2 = 0 cons_data2 = [] if cons_fname2.endswith(".cons"): # Read ASCII file cons_file2 = shared.openFile(cons_fname2, "r") width, height = map( lambda num: shared.toInt(num), cons_file2.readline().split(" ") ) # The first line contains the width and height checkSize(width, height) for line in cons_file2: cons = map( lambda num: shared.toFlo(num), line.split(" ")[1:-1] ) # Remove the time step column and newline when taking the concentrations for con in cons: min_con2 = min(min_con2, con) max_con2 = max(max_con2, con) cons_data2.append(cons) elif cons_fname2.endswith(".bcons"): # Read binary file cons_file2 = shared.openFile(cons_fname2, "rb") # Read the file as a binary # The first two ints are the width and height width, = struct.unpack("i", cons_file2.read(4)) height, = struct.unpack("i", cons_file2.read(4)) checkSize(width, height) size = width * height cons2 = [] cons_length2 = 0 while True: con_str2 = cons_file2.read(4) if con_str2 == "": # While not EOF break else: # There are width * height concentration floats per time step con, = struct.unpack("f", con_str2) min_con2 = min(min_con2, con) max_con2 = max(max_con2, con) cons2.append(con) cons_length2 += 1 if cons_length2 == height: cons_data2.append(cons) cons2 = [] else: usage() print "Creating the directory if necessary..." directory = shared.ensureDir(directory) if directory[-1] != "/": directory = directory + "/" cons_data = combine_cons(cons_data1, cons_data2, max_con1, min_con1, max_con2, min_con2) print "Creating snapshots..." edge, size = findSizes(width, height) # Configure the hexagon edge and window size based on the grid size index = 0 for line in cons_data: if index % 10 == 0 and index >= 21000: plotHexagons(directory, size, index, line, edge, width, height) index += 1 print "Done. Your snapshots are stored in " + directory
def main(): #check the given arguments print "Reading command-line arguments..." args = sys.argv[1:] num_args = len(args) req_args = [False] * 6 num_seeds = 0 sim_arguments = "" if num_args >= 6: for arg in range(0, num_args - 1, 2): option = args[arg] value = args[arg + 1] if option == '-i' or option == '--input-file': ifile = value req_args[0] = True elif option == '-n' or option == '--num-params': num_params = shared.toInt(value) req_args[1] = True elif option == '-p' or option == '--pars-per-job': pars_per_job = shared.toInt(value) req_args[2] = True elif option == '-d' or option == '--directory': folder = value req_args[3] = True elif option == '-s' or option == '--simulation': simulation = value req_args[4] = True elif option == '-S' or option == '--seeds': num_seeds = int(value) req_args[5] = True elif option == '-a' or option == '--arguments': for a in range(arg + 1, num_args): sim_arguments += ' ' + args[a] break elif option == '-h' or option == '--help': usage() else: usage() for arg in req_args: if not arg: req_args usage() else: usage() index = 0 input_file = shared.openFile(ifile, "r") shared.ensureDir(folder) for parset in range(0, num_params, pars_per_job): params = shared.openFile(folder + "/input" + str(index) + ".params", "w") for line in range(pars_per_job): params.write(input_file.readline()) params.close() index += 1 for seeds in range(num_seeds): seed = (seeds + 1) * 1000 for parset in range(index): job = shared.openFile( folder + "/pbs-job-" + str(seed) + "-" + str(parset), 'w') job.write(''' #PBS -N robust-test #PBS -l nodes=1:ppn=1 #PBS -l mem=500mb #PBS -l file=300mb #PBS -q biomath #PBS -j oe #PBS -o ''' + folder + '''/output''' + str(seed) + "-" + str(parset) + '''.txt #PBS -l walltime=06:00:00 cd $PBS_O_WORKDIR ''' + simulation + ' ' + sim_arguments + ' -p ' + str(pars_per_job) + ' -i ' + ifile + ''' -s ''' + str(seed) + " -M 6 -E " + folder + "/scores-" + str(seed) + "-" + str(parset) + ".csv") job.close() subprocess.call( ["qsub", folder + "/pbs-job-" + str(seed) + "-" + str(parset)])
def main(): # check the given arguments if len(sys.argv) < 6: usage() elif len(sys.argv) == 7: if sys.argv[1] == "-c" or sys.argv[1] == "--no-color": shared.terminalRed = "" shared.terminalReset = "" filename = sys.argv[2] filename2 = sys.argv[3] directory = sys.argv[4] measuring = sys.argv[5] mutation = sys.argv[6] else: usage() else: filename = sys.argv[1] directory = sys.argv[2] measuring = sys.argv[3] mutation = sys.argv[4] # open the input file and ensure the directory exists f = shared.openFile(filename, "r") f2 = shared.openFile(filename2, "r") directory = shared.ensureDir(directory) # split the lines to get data data = [line.split() for line in f] file_len = len(data) - 1 max_x = file_len f.close() data2 = [line.split() for line in f2] file_len2 = len(data2) - 1 max_x2 = file_len2 f2.close() if (max_x == max_x2): print "test" # number of columns we have in the files cn = shared.toInt(data[0][0]) * shared.toInt(data[0][1]) + 1 cn2 = shared.toInt(data2[0][0]) * shared.toInt(data2[0][1]) + 1 # create matrices to store the data we obtained from the files m2p=numpy.zeros(shape = (max_x,cn + cn2)) # put the data coming from the files to the matrix for i in range(2, file_len): for j in range(0, cn+cn2): if (j <cn): m2p[i][j] = shared.toFlo(data[i][j]) elif (j==cn): print data2[i][j-cn] else: m2p[i][j] = 2*shared.toFlo(data2[i][j-cn]) # plot colors colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] color = 0 for i in range(1, cn+cn2): if (i % 4 == 0): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'r') elif (i % 4 == 1): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'g') elif (i % 4 == 2): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'b') else: pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'c') pl.title(measuring + " " + mutation + " All Cells") pl.savefig(directory + "/" + mutation + "_all.png", format = "png") pl.close() # plot the data average = [] for i in range(0, max_x): average.append(float(sum(m2p[i][1:])) / float(len(m2p[i][1:]))) pl.plot(m2p[0:max_x, 0], average, colors[color]) if color == len(colors) - 1: color = 0 else: color += 1 pl.title(measuring + " " + mutation + " Average") pl.savefig(directory + "/" + mutation + "_avg.png", format = "png") pl.close()
def main(): # check the given arguments if len(sys.argv) < 6: usage() elif len(sys.argv) == 7: if sys.argv[1] == "-c" or sys.argv[1] == "--no-color": shared.terminalRed = "" shared.terminalReset = "" filename = sys.argv[2] filename2 = sys.argv[3] directory = sys.argv[4] measuring = sys.argv[5] mutation = sys.argv[6] else: usage() else: filename = sys.argv[1] directory = sys.argv[2] measuring = sys.argv[3] mutation = sys.argv[4] # open the input file and ensure the directory exists f = shared.openFile(filename, "r") f2 = shared.openFile(filename2, "r") directory = shared.ensureDir(directory) # split the lines to get data data = [line.split() for line in f] file_len = len(data) - 1 max_x = file_len f.close() data2 = [line.split() for line in f2] file_len2 = len(data2) - 1 max_x2 = file_len2 f2.close() if (max_x == max_x2): print "test" # number of columns we have in the files cn = shared.toInt(data[0][0]) * shared.toInt(data[0][1]) + 1 cn2 = shared.toInt(data2[0][0]) * shared.toInt(data2[0][1]) + 1 # create matrices to store the data we obtained from the files m2p = numpy.zeros(shape=(max_x, cn + cn2)) # put the data coming from the files to the matrix for i in range(2, file_len): for j in range(0, cn + cn2): if (j < cn): m2p[i][j] = shared.toFlo(data[i][j]) elif (j == cn): print data2[i][j - cn] else: m2p[i][j] = 2 * shared.toFlo(data2[i][j - cn]) # plot colors colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] color = 0 for i in range(1, cn + cn2): if (i % 4 == 0): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'r') elif (i % 4 == 1): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'g') elif (i % 4 == 2): pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'b') else: pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'c') pl.title(measuring + " " + mutation + " All Cells") pl.savefig(directory + "/" + mutation + "_all.png", format="png") pl.close() # plot the data average = [] for i in range(0, max_x): average.append(float(sum(m2p[i][1:])) / float(len(m2p[i][1:]))) pl.plot(m2p[0:max_x, 0], average, colors[color]) if color == len(colors) - 1: color = 0 else: color += 1 pl.title(measuring + " " + mutation + " Average") pl.savefig(directory + "/" + mutation + "_avg.png", format="png") pl.close()
def main(): print 'Reading command-line arguments...' args = sys.argv[1:] if len(args) == 2: cons_fname = args[0] directory = args[1] else: usage() print 'Reading the concentrations file...' min_con = float('inf') max_con = 0 cons_data = [] if cons_fname.endswith('.cons'): # Read ASCII file cons_file = shared.openFile(cons_fname, 'r') width, height = map(lambda num: shared.toInt(num), cons_file.readline().split(' ')) # The first line contains the width and height checkSize(width, height) for line in cons_file: cons = map(lambda num: shared.toFlo(num), line.split(' ')[1:-1]) # Remove the time step column and newline when taking the concentrations for con in cons: min_con = min(min_con, con) max_con = max(max_con, con) cons_data.append(cons) elif cons_fname.endswith('.bcons'): # Read binary file cons_file = shared.openFile(cons_fname, 'rb') # Read the file as a binary # The first two ints are the width and height width, = struct.unpack('i', cons_file.read(4)) height, = struct.unpack('i', cons_file.read(4)) checkSize(width, height) size = width * height cons = [] cons_length = 0 while True: con_str = cons_file.read(4) if con_str == '': # While not EOF break; else: # There are width * height concentration floats per time step con, = struct.unpack('f', con_str) min_con = min(min_con, con) max_con = max(max_con, con) cons.append(con) cons_length += 1 if cons_length == height: cons_data.append(cons) cons = [] else: usage() print 'Creating the directory if necessary...' directory = shared.ensureDir(directory) if (directory[-1] != '/'): directory = directory + '/' print 'Creating snapshots...' edge, size = findSizes(width, height) # Configure the hexagon edge and window size based on the grid size index = 0 for line in cons_data: if (index % 10 == 0 and index >= 50000): plotHexagons(directory, size, index, line, min_con, max_con, edge, width, height) index += 1 print 'Done. Your snapshots are stored in ' + directory
def main(): # Default argument values. global Y_BOUNDS global JOB_NAME global PARAM_NAMES nodes = 1 feature = -1 points = 4 percent = 20 file_out = "sensitivity_graphs" nominal_file = "../sensitivity-analysis/nominal.params" data_dir = "../sensitivity-analysis/sense-for-plot" image_dir = "plots" perturb_file = "../simulation/input.perturb" nominal_count = 1 ex_path = "../sensitivity-analysis/sensitivity" sim_path = "../simulation/simulation" ppn = "2" graph = False elasticity = False sim_args = " " additional_args = " " cname = None #Check the commmand line input: if len(sys.argv) < 2 or ("-h" in sys.argv) or ("--help" in sys.argv): usage() ishift = 0 for i in range(1, len(sys.argv), 2): i += ishift if i+1 >= len(sys.argv): usage() #elif sys.argv[i] == "-j" or sys.argv[i] == "--job-name": # global JOB_NAME = sys.argv[i+1] elif sys.argv[i] == "-n" or sys.argv[i] == "--nominal-file": nominal_file = sys.argv[i+1] elif sys.argv[i] == "-c" or sys.argv[i] == "--nominal-count": nominal_count = shared.toInt(sys.argv[i+1]) elif sys.argv[i] == "-p" or sys.argv[i] == "--percent": percent = shared.toInt(sys.argv[i+1]) elif sys.argv[i] == "-P" or sys.argv[i] == "--Points": points = shared.toInt(sys.argv[i+1]) elif sys.argv[i] == "-l" or sys.argv[i] == "--ppn": ppn = sys.argv[i+1] elif sys.argv[i] == "-N" or sys.argv[i] == "--nodes": nodes = shared.toInt(sys.argv[i+1]) elif sys.argv[i] == "-f" or sys.argv[i] == "--feature": feature = shared.toInt(sys.argv[i+1]) elif sys.argv[i] == "-e" or sys.argv[i] == "--exec": ex_path = sys.argv[i+1] elif sys.argv[i] == "-s" or sys.argv[i] == "--sim": sim_path = sys.argv[i+1] elif sys.argv[i] == "-o" or sys.argv[i] == "--output": file_out = sys.argv[i+1] elif sys.argv[i] == "-d" or sys.argv[i] == "--dir": image_dir = sys.argv[i+1] elif sys.argv[i] == "-D" or sys.argv[i] == "--data-dir": data_dir = sys.argv[i+1] elif sys.argv[i] == "-j" or sys.argv[i] == "--job-name": JOB_NAME = sys.argv[i+1] elif sys.argv[i] == "-C" or sys.argv[i] == "--cluster-name": cname = sys.argv[i+1] elif sys.argv[i] == "--ymin": val = float(sys.argv[i+1]) if Y_BOUNDS == None: Y_BOUNDS = (val , max(1.5, 2*val) ) else: Y_BOUNDS[0] = val elif sys.argv[i] == "--ymax": val = float(sys.argv[i+1]) if Y_BOUNDS == None: Y_BOUNDS = (min(0, 2*val) , val) else: Y_BOUNDS[1] = val elif sys.argv[i] == "-E" or sys.argv[i] == "--elasticity": elasticity = True ishift = -1 elif sys.argv[i] == "-g" or sys.argv[i] == "--graph": graph = True ishift = -1 elif sys.argv[i] == "-a" or sys.argv[i] == "--args": for a in sys.argv[i+1:]: additional_args += " " + a + " " break #Ensure that the necessary directories exist -- if not, make them. shared.ensureDir(data_dir) shared.ensureDir(image_dir) #Additional args is a string that that is attached to the final arguments sent to the sensitivity analysis program. additional_args = " -p " + str(percent) + " -P " + str(points) + " " + additional_args #Depending on whether elasticity is chosen, either create sensitivity bar graphs or scatter-line plots. if(not elasticity): #This statement checks to see if simulations actually need to be run. This is mostly true, but if all the data has been created already then '-g' will cause the script to skip to the plotting. if(not graph): #dispatch_jobs takes care of running the program locally or making the pbs jobs. dispatch_jobs(nodes, file_out, nominal_file , data_dir , image_dir , perturb_file, nominal_count, 0, ex_path, sim_path, ppn, sim_args , None, additional_args, cname) print "\t~ Done with runs ~" #Once the data has been collected, load it in and make the graphs. print "\t ~ Generating graphs ~ " #Load all of the data from the sensitivity results. #This uses "/normalized_[number]" as the file name because that is how it's set in sensitivity-analysis/init.hpp. #The struct input_params has two strings, norm_file and sense_file, that determine the names of specific files to load. #These could be specified more generally by making a new commandline argument for the sensitivity executible, but this has not seemed necessary because there is already so much customization of the directies these files end up in. data = [] names = [] for i in range(nominal_count): temp_data, names = parse_files(data_dir+"/normalized_"+str(i)) data.append(temp_data) #If just one feature is specified, this makes just one graph. Otherwise it loops through all features and makes a graph for each. bar_data = [] #This holds onto the data that was actually plotted, i.e. average sensitivity values for each parameter. bar_error = [] #This holds onto the standard error for each parameter. if feature > 0: temp_sense, temp_error = sense_bar(data, image_dir, feature, feat_name = names[feature]) bar_data.append(temp_sense) bar_error.append(temp_error) else: sys.stdout.write("Done with normalized graphs: ") sys.stdout.flush() for i in range(len(data[0][0])): temp_sense, temp_error = sense_bar(data, image_dir, i, feat_name = names[i]) bar_data.append(temp_sense) bar_error.append(temp_error) sys.stdout.write(str(i) + "... ") sys.stdout.flush() #Write out the bar graph data to file write_bar_data(bar_data, bar_error, data_dir+"/bar_graph_data_normalized.csv", ynames=names, xnames=PARAM_NAMES) #Abosulte sensitivity graphs #Similarly, this uses "/LSA_[number]" as the file name because that is how it's set in sensitivity-analysis/init.hpp. data = [] names = [] for i in range(nominal_count): temp_data, names = parse_files(data_dir+"/LSA_"+str(i)) data.append(temp_data) #If just one feature is specified, this makes just one graph. Otherwise it loops through all features and makes a graph for each. bar_data = [] #This holds onto the data that was actually plotted, i.e. average sensitivity values for each parameter. bar_error = [] #This holds onto the standard error for each parameter. if feature > 0: temp_sense, temp_error = sense_bar(data, image_dir, feature, feat_name = names[feature], normal=False) bar_data.append(temp_sense) bar_error.append(temp_error) else: sys.stdout.write("Done with absolute graphs: ") sys.stdout.flush() for i in range(len(data[0][0])): temp_sense, temp_error = sense_bar(data, image_dir, i, feat_name = names[i], normal=False) bar_data.append(temp_sense) bar_error.append(temp_error) sys.stdout.write(str(i) + "... ") sys.stdout.flush() #Write out the bar graph data to file write_bar_data(bar_data, bar_error, data_dir+"/bar_graph_data_absolute.csv", ynames=names, xnames=PARAM_NAMES) #If the elasticity option was included, the following code makes scatter plots of the oscillation features data at different perturbations of each nominal parameter. else: #This adds a commandline argument that is passed to the sensitivity analysis program to tell it to gather the data without caluclating the sensitivity. additional_args = " --generate-only " + additional_args #Note that for the elasticity/scatter-line plots each instance of sensitivity used to gather the data is given only one parameter set to ensure data files will be unique (and not get overwritten). This makes it slower than the sensitivity graphs. print "\n\t ~ Elasticity data collection ~ " data = [] # this will be a four dimensional list indexed by: data[which nominal set][which parameter][which perturbation amount][which oscillation feature value] names = [] nominal = [] # this will be a three dimensional list indexed by: nominal[which nominal set][0][which oscillation feature], the middle index is zero because there is only one parameter set in the nominal features file. #This loop runs if the data needs to be collected. There are some unintuitive additions that are used to keep track of how many jobs should be sent out the index of which nominal parameter set to use. if not graph: disp = 0 # a counter used to keep track of how many jobs to dispatch. raw_data_dirs = [] #A list that gets filled with file name strings which get passed as arguments to the sensitivity program. for c in range(0, nominal_count): raw_data_dirs.append(data_dir +"/elastic_data_" + str(c)) disp += 1 if disp == nodes or c == nominal_count - 1: dispatch_jobs(disp, file_out, nominal_file , data_dir , image_dir , perturb_file, disp, c-disp+1, ex_path, sim_path, ppn, sim_args , raw_data_dirs, additional_args, cname) raw_data_dirs = [] disp = 0 #Now that the data files exist, load them and parse them into the appropriate arrays. #The "/dim_[number]" and "/nominal_0" strings are the file names that the sensitivity analysis program uses to distinguish output features files. #Modifying these file names would require changing nom_file and dim_file in the constructor of input_params in sensitivity-analysis/init.hpp. #This loop is similar to the above, but simpler -- for every nominal parameter set it opens its /elastic_data_[number] directory, parses the files in it and stores the data in data[number] and nominal[number] for c in range(0, nominal_count): data.append([]) for d in range(44): temp_data, names = parse_files(data_dir +"/elastic_data_" + str(c) + "/dim_" + str(d)) data[c].append(temp_data) temp_data , names = parse_files(data_dir +"/elastic_data_" + str(c) + "/nominal_0") nominal.append(temp_data) #data[] and nominal[] should have everything we need for the graphs now, so plot them. print "\n\t ~ Elasticity graphing ~ " sys.stdout.write("Done with parameter: ") sys.stdout.flush() #Loop through each parameter for p in range(len(data[0])): #Loop through each feature for f in range(len(data[0][0][0])): #Plot! line_plot(data, nominal, p, f, names[f], PARAM_NAMES[p], image_dir, percent, points) sys.stdout.write(str(p) + "...") sys.stdout.flush() print "\n\t ~ Graphs complete ~ " return