def main():
	# check the given arguments
	if len(sys.argv) < 3:
		usage()
	elif len(sys.argv) == 4:
		if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
			shared.terminalRed = ""
			shared.terminalReset = ""
			filename = sys.argv[2]
			spaces = sys.argv[3]
		else:
			usage()
	else:
		filename = sys.argv[1]
		spaces = sys.argv[2]
	
	# open the input file and check to ensure 'spaces' is an integer
	f = shared.openFile(filename, "r")
	spaces = shared.toInt(spaces)
	
	# replace spaces with tabs
	ofile = ""
	for line in f:
		count = 0
		while line[:spaces] == " " * spaces:
			line = line[spaces:]
			count += 1
		ofile = ofile + "\t" * count + line
	
	f.close()
	newfile = shared.openFile(filename, "w")
	newfile.write(ofile)
def main():
    print 'Reading command-line arguments...'
    args = sys.argv[1:]  # Remove the name of the program from the arguments
    num_args = len(args)
    if num_args == 3:  # There are three arguments, each of which is required
        input_file = shared.openFile(args[0], 'r')  # The input parameter sets
        output_fname = args[1]
        output_file = shared.openFile(output_fname,
                                      'w')  # The output parameter sets
        num_output_params = shared.toInt(
            args[2]
        )  # How many parameters each set should have in the output file
    else:
        usage()

    print 'Converting each parameter set...'
    num_input_params = -1
    for line in input_file:  # For every parameter set
        if len(line) > 1 and line[0] != '#':  # Skip blank lines and comments
            input_set = line.split(',')
            if num_input_params == -1:  # Find the input format based on the first parameter set found
                num_input_params = len(input_set)
            else:
                output_file.write(
                    '\n')  # Print a newline before each non-first set

            input_set[num_input_params - 1].replace(
                '\n', '')  # Get rid of the newline in the last parameter

            # Convert the set to the master (88) format
            base_set = ['0'] * 88
            for par in range(num_input_params):
                base_index = set_formats[num_input_params][par]
                base_set[base_index] = input_set[par]

            # Convert the master format to the specified one
            output_set = ['0'] * num_output_params
            for par in range(num_output_params):
                output_index = set_formats[num_output_params][par]
                output_set[par] = base_set[output_index]

            # Write the results to the output file
            output_file.write(output_set[0])
            for par in range(1, num_output_params):
                output_file.write(',' + output_set[par])

    print 'Closing files...'
    input_file.close()
    output_file.close()

    print 'Done. Your newly formatted parameter sets are stored in ' + output_fname
def main():
	# check the given arguments
	if len(sys.argv) < 3:
		usage()
	elif len(sys.argv) == 4:
		if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
			shared.terminalRed = ""
			shared.terminalReset = ""
			filename1 = sys.argv[2]
			filename2 = sys.argv[3]
		else:
			usage()
	else:
		filename1 = sys.argv[1]
		filename2 = sys.argv[2]
	
	# open input files
	f1 = shared.openFile(filename1, "r")
	f2 = shared.openFile(filename2, "r")
	
	# make a list for each file containing each line of it as an element
	list1 = []
	list2 = []
	for line in f1:
		list1.append(line)
	for line in f2:
		list2.append(line)
	f1.close()
	f2.close()
	
	size = len(mylist1)
	unique1 = 0
	unique2 = 0

	# add lines that are unique and in common
	for line in list1:
		if line not in list2:
			unique1 += 1
			continue
	
	for line in list2:
		if line not in list1:
			unique2 += 1
			continue
	
	# print the results
	print filename1, "has", unique1, "unique lines."
	print filename2, "has", unique2, "unique lines."
	print "There are", size - unique1, "lines that appear in both files."
def run_sensitivity( sense_args, sim_args, ppn, pbs_count,  cluster_name = "biomath"):
	if " -a " not in sense_args:
		sense_args += " -a "
	sense_args += sim_args
	
	if cluster_name == None:
		return subprocess.Popen(space_split(sense_args))
	else:
		memory_limit = "3GB"
		disk_limit = "500MB"	
		wall_time = "24:00:00"
		job = shared.openFile("pbs-job-"+str(pbs_count), "w")
		job.write('''
#PBS -N ''' + JOB_NAME + ''' 
#PBS -l nodes=1:ppn='''+str(ppn)+'''
#PBS -l mem=''' + memory_limit + '''
#PBS -l file=''' + disk_limit + '''
#PBS -q ''' + cluster_name + '''
#PBS -j oe
#PBS -o sensitivity_'''+str(pbs_count)+'''.output
#PBS -l walltime='''+wall_time+'''
cd $PBS_O_WORKDIR

''' + sense_args + '''\n''')
		job.close()
		return subprocess.Popen(["qsub", "pbs-job-"+str(pbs_count)])
Beispiel #5
0
def run_sensitivity(sense_args,
                    sim_args,
                    ppn,
                    pbs_count,
                    cluster_name="biomath"):
    if " -a " not in sense_args:
        sense_args += " -a "
    sense_args += sim_args

    if cluster_name == None:
        return subprocess.Popen(space_split(sense_args))
    else:
        memory_limit = "3GB"
        disk_limit = "500MB"
        wall_time = "24:00:00"
        job = shared.openFile("pbs-job-" + str(pbs_count), "w")
        job.write('''
#PBS -N ''' + JOB_NAME + ''' 
#PBS -l nodes=1:ppn=''' + str(ppn) + '''
#PBS -l mem=''' + memory_limit + '''
#PBS -l file=''' + disk_limit + '''
#PBS -q ''' + cluster_name + '''
#PBS -j oe
#PBS -o sensitivity_''' + str(pbs_count) + '''.output
#PBS -l walltime=''' + wall_time + '''
cd $PBS_O_WORKDIR

''' + sense_args + '''\n''')
        job.close()
        return subprocess.Popen(["qsub", "pbs-job-" + str(pbs_count)])
Beispiel #6
0
def parse_files(file_in):
    # open the input file and ensure the directory exists
    fin = shared.openFile(file_in, "r")

    # split the lines to get data
    data = []
    names = []
    for line in fin:
        #print line
        if names == []:
            names = line.split(',')[1:]
        data += [line.split(',')[:-1]]

    #Number of parameters is the number of lines, minus one for the name line at the top
    params = len(data) - 1
    #Number of sensitivity values is the length of any one line array, minus one for the "PASSED" or "FAILED" message, and minus one for the set number
    features = len(data[1]) - 2
    #Create the sense array that will actually contain the data.
    sense = numpy.zeros(shape=(params, features))

    #Put the sensitivity values into matrix. The +1's take into account that the indexing skips the first item in each loop.
    for i in range(1, params + 1):
        for j in range(1, features + 1):
            num = data[i][j]
            if ("FAILED" in num) or (
                    "PASSED"
                    in num):  #This shouldn't ever be true, but just in case...
                num = 0
            num = num_check(float(num))
            sense[i - 1][j - 1] = num

    # close the file
    fin.close()
    return sense, names
def parse_files(file_in):
	# open the input file and ensure the directory exists
	fin = shared.openFile(file_in, "r")
	
	# split the lines to get data
	data = []
	names = []
	for line in fin:
		#print line
		if names == []:
			names = line.split(',')[1:]
		data += [line.split(',')[:-1] ]
	
	#Number of parameters is the number of lines, minus one for the name line at the top
	params = len(data) - 1 
	#Number of sensitivity values is the length of any one line array, minus one for the "PASSED" or "FAILED" message, and minus one for the set number
	features = len(data[1]) - 2 
	#Create the sense array that will actually contain the data.
	sense = numpy.zeros(shape = (params, features))
	
	#Put the sensitivity values into matrix. The +1's take into account that the indexing skips the first item in each loop.
	for i in range(1, params+1 ):
		for j in range(1, features + 1):
			num = data[i][j]
			if ("FAILED" in num) or ("PASSED" in num): #This shouldn't ever be true, but just in case...
				num = 0
			num = num_check(float(num))
			sense[i-1][j-1] = num

	# close the file
	fin.close()
	return sense, names
def main():
	# check the given arguments
	if len(sys.argv) < 5:
		usage()
	else:
		f = shared.openFile(sys.argv[1], "r")
		directory = sys.argv[2]
		image_name = sys.argv[3]
		step_size = shared.toFlo(sys.argv[4])
	
	print 'Plotting all the cells from ' + sys.argv[1] + '...'
	# split the lines to get data
	data = [line.split() for line in f]
	max_time = len(data) - 1
	
	# calculate the tissue size
	cells_width = shared.toInt(data[0][0])
	cells_height = shared.toInt(data[0][1])
	total_cells = cells_width * cells_height + 1
	
	# create a matrix to store the concentration values we obtain from the file
	cons = numpy.zeros(shape = (max_time, total_cells))
	
	# put the concentration values from the file into the matrix
	for i in range(1, max_time + 1):
		cons[i - 1][0] = shared.toFlo(data[i][0]) * step_size
		for j in range(1, total_cells):
			cons[i - 1][j] = shared.toFlo(data[i][j])
	
	# close the file
	f.close()
	
	# plot colors
	colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
	color = 0
	
	for i in range(0, total_cells,200):
		start = 0
		
		# Adjust the plotting interval for each cell to account for different columns being staggered
		# as they enter the PSM at intervals of 6 minutes apart frome ach other
		while cons[start][i] == -1: # -1 stands for no data in the output file
			start += 1
		end = max_time - 1
		while cons[end][i] == -1:
			end -= 1;

		if (i % 4 == 0):
			pl.plot(cons[start:end, 0], cons[start:end, i], 'r')
		#elif (i % 4 == 1):
			#pl.plot(cons[start:end, 0], cons[start:end, i], 'g')
		#elif (i % 4 == 2):
			#pl.plot(cons[start:end, 0], cons[start:end, i], 'b')
		#else:
			#pl.plot(cons[start:end, 0], cons[start:end, i], 'c')
	#pl.ylim((-1,100))
        pl.axis([400, 600, 0, 300])
	pl.savefig(directory + "/" + image_name + ".png", format = "png")
	pl.close()
	print 'Done. Your plot is stored in ' + directory + "/" + image_name + ".png"
def main():
	# check the given arguments
	if len(sys.argv) < 5:
		usage()
	else:
		f = shared.openFile(sys.argv[1], "r")
		directory = sys.argv[2]
		image_name = sys.argv[3]
		step_size = shared.toFlo(sys.argv[4])
	
	print 'Plotting all the cells from ' + sys.argv[1] + '...'
	# split the lines to get data
	data = [line.split() for line in f]
	max_time = len(data) - 1
	
	# calculate the tissue size
	cells_width = shared.toInt(data[0][0])
	cells_height = shared.toInt(data[0][1])
	total_cells = cells_width * cells_height + 1
	
	# create a matrix to store the concentration values we obtain from the file
	cons = numpy.zeros(shape = (max_time, total_cells))
	
	# put the concentration values from the file into the matrix
	for i in range(1, max_time + 1):
		cons[i - 1][0] = shared.toFlo(data[i][0]) * step_size
		for j in range(1, total_cells):
			cons[i - 1][j] = shared.toFlo(data[i][j])
	
	# close the file
	f.close()
	
	# plot colors
	colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
	color = 0
	
	for i in range(1, total_cells):
		start = 0
		
		# Adjust the plotting interval for each cell to account for different columns being staggered
		# as they enter the PSM at intervals of 6 minutes apart frome ach other
		while cons[start][i] == -1: # -1 stands for no data in the output file
			start += 1
		end = max_time - 1
		while cons[end][i] == -1:
			end -= 1;

		if (i % 4 == 0):
			pl.plot(cons[start:end, 0], cons[start:end, i], 'r')
		elif (i % 4 == 1):
			pl.plot(cons[start:end, 0], cons[start:end, i], 'g')
		elif (i % 4 == 2):
			pl.plot(cons[start:end, 0], cons[start:end, i], 'b')
		else:
			pl.plot(cons[start:end, 0], cons[start:end, i], 'c')
	
	pl.savefig(directory + "/" + image_name + ".png", format = "png")
	pl.close()
	print 'Done. Your plot is stored in ' + directory + "/" + image_name + ".png"
def main():
	print 'Reading command-line arguments...'
	args = sys.argv[1:] # Remove the name of the program from the arguments
	num_args = len(args)
	if num_args == 3: # There are three arguments, each of which is required
		input_file = shared.openFile(args[0], 'r') # The input parameter sets
		output_fname = args[1]
		output_file = shared.openFile(output_fname, 'w') # The output parameter sets
		num_output_params = shared.toInt(args[2]) # How many parameters each set should have in the output file
	else:
		usage()
	
	print 'Converting each parameter set...'
	num_input_params = -1
	for line in input_file: # For every parameter set
		if len(line) > 1 and line[0] != '#': # Skip blank lines and comments
			input_set = line.split(',')
			if num_input_params == -1: # Find the input format based on the first parameter set found
				num_input_params = len(input_set)
			else:
				output_file.write('\n') # Print a newline before each non-first set
			
			input_set[num_input_params - 1].replace('\n', '') # Get rid of the newline in the last parameter
			
			# Convert the set to the master (88) format
			base_set = ['0'] * 88
			for par in range(num_input_params):
				base_index = set_formats[num_input_params][par]
				base_set[base_index] = input_set[par]
			
			# Convert the master format to the specified one
			output_set = ['0'] * num_output_params
			for par in range(num_output_params):
				output_index = set_formats[num_output_params][par]
				output_set[par] = base_set[output_index]
			
			# Write the results to the output file
			output_file.write(output_set[0])
			for par in range(1, num_output_params):
				output_file.write(',' + output_set[par])
	
	print 'Closing files...'
	input_file.close()
	output_file.close()
	
	print 'Done. Your newly formatted parameter sets are stored in ' + output_fname
Beispiel #11
0
def main():
	if len(sys.argv) != 3:
		usage()
	else:
		input_filename = sys.argv[1]
		output_filename = sys.argv[2]

	input_file = shared.openFile(input_filename, "r")
	output_file = shared.openFile(output_filename, "w")

	lines = [line for line in input_file]
	for i in range(len(lines)):
		if "Found a good set" in lines[i]:
			good_set = lines[i+1][:]
			starting_index = good_set.find(',')
			good_set = good_set[starting_index+1:]
			output_file.write(good_set)

	input_file.close()
	output_file.close()
Beispiel #12
0
def main():
    if len(sys.argv) != 3:
        usage()
    else:
        input_filename = sys.argv[1]
        output_filename = sys.argv[2]

    input_file = shared.openFile(input_filename, "r")
    output_file = shared.openFile(output_filename, "w")

    lines = [line for line in input_file]
    for i in range(len(lines)):
        if "Found a good set" in lines[i]:
            good_set = lines[i + 1][:]
            starting_index = good_set.find(',')
            good_set = good_set[starting_index + 1:]
            output_file.write(good_set)

    input_file.close()
    output_file.close()
def readData(filename):
	f = shared.openFile(filename, 'r')
	parsedFile = []
	index = 0
	width = height = 0
	for line in f:
		line = line.strip()
		if (index == 0):
			width, height = shared.widthAndHeight(line.split(' '), filename)
			if width < 4 or height < 4 or (width % 2 != 0) or (height % 2 != 0):
				print 'The size of the tissue must be at least 4x4 and its width and height must be even numbers.'
				exit(2)
		else:
			aux = line.split(' ')
			parsedFile.append(aux)
		index += 1
	f.close()
	return width, height, parsedFile
def findMinMax(filename):
	f = shared.openFile(filename, 'r')
	minCon = float('inf')
	maxCon = 0
	first = True
	for line in f:
		if not first:
			lineNums = line.split()
			for i in range(len(lineNums)):
				lineNums[i] = shared.toFlo(lineNums[i])
			num = min(lineNums[1:])
			if (num < minCon):
				minCon = num
			num = max(lineNums[1:])
			if (num > maxCon):
				maxCon = num
		first = False
	f.close()
	return minCon, maxCon
Beispiel #15
0
def main():
	# check the given arguments
	with open(sys.argv[1]) as f:
            content = f.readlines()
	#f = shared.openFile(, "r")
	#directory = sys.argv[2]
	'''	
	data = [line.split() for line in f]
        max_time = len(data) - 1

        # calculate the tissue size
	cons = numpy.zeros(shape = (max_time, 71))
        
        # create array for row plotting

	# put the concentration values from the file into the matrix
	for i in range(0, len(data)):
		for j in range(0, 71):
			cons[i][j] = shared.toFlo(data[i][j])
                        print cons[i][j]
	'''
	#print content
        f.close()
        wr= shared.openFile("newset.params","w")
        newline=""
        needlist= [0,2,3,5,6,8,9,11,12,14,15,17,18,20,21,23,24,29,30,31,33,38,39,40,42,47,48,49,51,53,54,56,57,59,60,62,63,65,66,67,68,69,70]
        #print len(needlist)
        for i in range(0, len(content)):
            #print content[i]
            newline=""
            newlist= [x.strip() for x in content[i].split(',')]
            #print len(newlist)
            #print len(needlist)
            for j in range(0, len(newlist)):
                if j in needlist:
                    if newline=="":
                         newline+= newlist[j]
                    else:
                         newline+=","+newlist[j]
            wr.write(newline)
            wr.write("\n") 
        
        wr.close()
def write_bar_data(yvals, errorvals, out_file, ynames=None, xnames=None):
	data_file = shared.openFile(out_file, "w")
	data_file.write("Feature,")
	if xnames != None:
		for name in xnames:
			data_file.write(str(name) + ',')
	else:
		data_file.write("Parameters...")
	data_file.write('\n')
	for feature in range(len(yvals)):
		data_file.write('\n')
		if ynames != None:
			data_file.write(str(ynames[feature]) + ',')
		for param in range(len(yvals[feature])):
			data_file.write(str(yvals[feature][param]) + ',')
		data_file.write('\nstdev:,')
		for param in range(len(errorvals[feature])):
			data_file.write(str(errorvals[feature][param]) + ',')
	data_file.close()
	return
Beispiel #17
0
def write_bar_data(yvals, errorvals, out_file, ynames=None, xnames=None):
    data_file = shared.openFile(out_file, "w")
    data_file.write("Feature,")
    if xnames != None:
        for name in xnames:
            data_file.write(str(name) + ',')
    else:
        data_file.write("Parameters...")
    data_file.write('\n')
    for feature in range(len(yvals)):
        data_file.write('\n')
        if ynames != None:
            data_file.write(str(ynames[feature]) + ',')
        for param in range(len(yvals[feature])):
            data_file.write(str(yvals[feature][param]) + ',')
        data_file.write('\nstdev:,')
        for param in range(len(errorvals[feature])):
            data_file.write(str(errorvals[feature][param]) + ',')
    data_file.close()
    return
def main():
	print 'Reading command-line arguments...'
	args = sys.argv[1:]
	if len(args) == 2:
		cons_fname = args[0]
		directory = args[1]
	else:
		usage()
	
	print 'Reading the concentrations file...'
	min_con = float('inf')
	max_con = 0
	cons_data = []
	if cons_fname.endswith('.cons'): # Read ASCII file
		cons_file = shared.openFile(cons_fname, 'r')
		width, height = map(lambda num: shared.toInt(num), cons_file.readline().split(' ')) # The first line contains the width and height
		checkSize(width, height)
		for line in cons_file:
			cons = map(lambda num: shared.toFlo(num), line.split(' ')[1:-1]) # Remove the time step column and newline when taking the concentrations
			for con in cons:
				min_con = min(min_con, con)
				max_con = max(max_con, con)
			cons_data.append(cons)
	elif cons_fname.endswith('.bcons'): # Read binary file
		cons_file = shared.openFile(cons_fname, 'rb') # Read the file as a binary
		# The first two ints are the width and height
		width, = struct.unpack('i', cons_file.read(4))
		height, = struct.unpack('i', cons_file.read(4))
		checkSize(width, height)
		size = width * height
		cons = []
		cons_length = 0
		while True:
			con_str = cons_file.read(4)
			if con_str == '': # While not EOF
				break;
			else:
				# There are width * height concentration floats per time step
				con, = struct.unpack('f', con_str)
				min_con = min(min_con, con)
				max_con = max(max_con, con)
				cons.append(con)
				cons_length += 1
				if cons_length == height:
					cons_data.append(cons)
					cons = []
	else:
		usage()
	
	print 'Creating the directory if necessary...'
	directory = shared.ensureDir(directory)
	if (directory[-1] != '/'):
		directory = directory + '/'
	
	print 'Creating snapshots...'
	edge, size = findSizes(width, height) # Configure the hexagon edge and window size based on the grid size
	index = 0
	for line in cons_data:
		if (index % 10 == 0 and index >= 50000):
			plotHexagons(directory, size, index, line, min_con, max_con, edge, width, height)
		index += 1
	
	print 'Done. Your snapshots are stored in ' + directory
def main():
    print 'Reading command line arguments...'
    # check the given arguments
    if len(sys.argv) < 8:
        usage()
    else:
        folder = sys.argv[1]
        parsets = shared.toInt(sys.argv[2])
        image_name = sys.argv[3]
        feature = sys.argv[4]
        ofolder = sys.argv[5]
        post_width = shared.toInt(sys.argv[6])
        excel_name = sys.argv[7]

    num_mutants = 6
    index = 0
    mutants = ["wildtype", "delta", "her1", "her7", "her7her13", "her13"]
    markers = ['o', '^', 's', '*', 'h', 'D']
    colors = ['k', 'b', 'g', 'r', 'c', 'm']

    features = []
    if (feature == "period" or feature == "amplitude"):
        features.append(feature)
    else:
        features.append("period")
        features.append("amplitude")

    for feat in features:
        # Create excel file in which the data used to create the plots will be stored
        excel_file = shared.openFile(
            ofolder + "/" + excel_name + "-" + feat + ".csv", "w")
        print "Plotting ", feat, "..."
        first_avg = 0
        num_first = 0

        for index in range(num_mutants):
            mutant = mutants[index]
            print '    Running ' + mutant + '...'
            marker = markers[index]
            color = colors[index]
            # open the input file
            f = shared.openFile(
                folder + "/" + mutant + "/set_0_" + feat + "_mh1.feats", "r")

            # split the lines to get data
            data = [line.split(",") for line in f]

            # calculate the tissue size
            height = shared.toInt(data[0][0])
            width = shared.toInt(data[0][1])
            xmin = 0
            xmax = 0.9 * width

            buckets = 9  # split the interval into 9 chunks
            chunk = (width - post_width) / (
                buckets - 1)  # the width of the intervals after the posterior

            indexes = [0 for i in range(buckets)]
            for bucket in range(buckets):
                if bucket == 0:
                    indexes[bucket] = post_width / 2
                else:
                    indexes[bucket] = (post_width +
                                       (bucket - 1) * chunk) + (chunk / 2.0)
            averages = [0 for i in range(buckets)]
            num_points = [0 for i in range(buckets)]
            stderr = [0 for i in range(buckets)]

            if mutant == "wildtype":
                excel_file.write("mutant,")
                for index in indexes:
                    excel_file.write(str(index) + ",")
                excel_file.write("\n")

                print '        Averaging the first bucket for the wildtype...'  # all other data points will be averaged to this value
                for parset in range(parsets):
                    # open the input file and ensure the directory exists
                    f = shared.openFile(
                        folder + "/" + mutant + "/set_" + str(parset) + "_" +
                        feat + "_mh1.feats", "r")

                    # split the lines to get data
                    data = [line.split(",") for line in f]
                    lines = len(data)

                    for line in range(1, lines, 2):
                        for col in range(len(data[line]) - 1):
                            pos = shared.toInt(data[line][col])
                            val = shared.toFlo(data[line + 1][col])
                            if pos < post_width:
                                first_avg += val
                                num_first += 1

                first_avg /= num_first

            for parset in range(parsets):
                print '        Normalizing and analyzing data from set ' + str(
                    parset) + '...'
                # open the input file and ensure the directory exists
                f = shared.openFile(
                    folder + "/" + mutant + "/set_" + str(parset) + "_" +
                    feat + "_mh1.feats", "r")

                # split the lines to get data
                data = [line.split(",") for line in f]
                lines = len(data)

                for line in range(1, lines, 2):
                    for col in range(len(data[line]) - 1):
                        pos = shared.toInt(data[line][col])
                        val = shared.toFlo(data[line + 1][col]) / first_avg

                        if pos < post_width:
                            averages[0] += val
                            num_points[0] += 1
                        else:
                            averages[(pos - post_width) / chunk + 1] += val
                            num_points[(pos - post_width) / chunk + 1] += 1

            # ignore the buckets which don't have data
            buckets_with_data = buckets

            for bucket in range(buckets):
                if post_width + (
                    (bucket - 1) * chunk) + chunk - 1 > (0.9 * width):
                    buckets_with_data -= 1
                else:
                    if num_points[bucket] > 0:
                        averages[bucket] /= num_points[bucket]
                    elif feat == "amplitude":
                        averages[bucket] = 0
                    else:
                        buckets_with_data -= 1

            buckets = buckets_with_data

            print '        Calculating standard error...'
            for parset in range(parsets):
                f = shared.openFile(
                    folder + "/" + mutant + "/set_" + str(parset) + "_" +
                    feat + "_mh1.feats", "r")

                data = [line.split(",") for line in f]
                lines = len(data)

                for line in range(1, lines, 2):
                    for col in range(len(data[line]) - 1):
                        pos = shared.toInt(data[line][col])
                        val = shared.toFlo(data[line + 1][col]) / first_avg

                        if pos < post_width:
                            stderr[0] += (val - averages[0])**2
                        else:
                            stderr[(pos - post_width) / chunk + 1] += (
                                val -
                                averages[(pos - post_width) / chunk + 1])**2

            for bucket in range(buckets):
                if (num_points[bucket] > 0):
                    stderr[bucket] = math.sqrt(stderr[bucket] /
                                               num_points[bucket])
                    stderr[bucket] /= math.sqrt(num_points[bucket])
                else:
                    stderr[bucket] = 0

            indexes = indexes[:buckets]
            averages = averages[:buckets]
            stderr = stderr[:buckets]
            # Print the means and standard deviations to the excel_file
            excel_file.write(mutant + ",")
            for average in averages:
                excel_file.write(str(average) + ",")
            excel_file.write("\n,")
            for stder in stderr:
                excel_file.write(str(stder) + ",")
            excel_file.write("\n")

            plt.errorbar(indexes,
                         averages,
                         stderr,
                         fmt='ro',
                         linestyle='-',
                         marker=marker,
                         color=color,
                         label=mutant)
        plt.legend(prop={'size': 8}, loc=2)
        pylab.xlim([xmin, xmax])
        excel_file.close()
        plt.savefig(ofolder + "/" + image_name + "_" + feat + ".png",
                    format="png")
        plt.close()
        print "Done. Your " + feat + " plot is stored in " + ofolder + "/" + image_name + "_" + feat + ".png"
        print "The data behind the plot can be found in " + ofolder + "/" + excel_name + "-" + feat + ".csv"
Beispiel #20
0
def main():
    gradients = {
        0: ['28', '29'],
        1: ['37'],
        2: ['41'],
        3: ['34', '35'],
        4: ['38', '39'],
        5: ['4', '5'],
        6: ['34', '35', '37'],
        7: ['38', '39', '41'],
        8: ['4', '5', '7'],
        9: ['28', '32', '15']
    }
    end_rate = [-1, 1001, 1001, 1001, 1001, -1, 1001, 1001, -1, -1]
    incs = [-1, 1, 1, 1, 1, -1, 1, 1, -1, -1]

    print 'Reading command-line arguments...'
    args = sys.argv[1:]  # Remove the name of the program from the arguments
    num_args = len(args)
    num_req_args = 2
    req_args = [
        False
    ] * num_req_args  # If every required argument was given then req_args will be all true
    if num_args >= num_req_args:
        # Arguments with default values
        base_gradients_file = None
        simulation = './simulation'
        sim_arguments = ""

        for arg in range(0, num_args - 1, 2):
            option = args[arg]
            value = args[arg + 1]
            if option == '-s' or option == '--sets':
                sets_fname = value  # Parameter sets file
                req_args[0] = True
            elif option == '-o' or option == '--output-dir':
                odir = value
                req_args[1] = True
            elif option == '-g' or option == '--base-gradients':
                base_gradients_file = shared.openFile(
                    value, 'r'
                )  # Gradients to add to every simulation on top of the changing ones
            elif option == '-S' or option == '--simulation':
                simulation = value  # Filename of the simulation
            elif option == '-a' or option == '--arguments':
                for a in range(
                        arg + 1,
                        num_args):  # Arguments to pass to the simulation
                    sim_arguments += ' ' + args[a]
                break
            else:
                usage()
        for arg in req_args:  # Check to ensure every required argument was entered
            if not arg:
                usage()
    else:
        usage()

    print 'Reading the base gradients file...'
    # Add any given base gradients to a string to be passed to each simulation's gradients file
    if base_gradients_file is None:
        base_gradients = ''
    else:
        base_gradients = ''
        for line in base_gradients_file:
            base_gradients += line

    print 'Start creating gradient files and pbs-jobs for each gradient combination'
    start_point = '9'
    end_point = '49'

    # For every gradient option
    num_gradients = len(gradients)
    jobs_to_submit = []
    for i in range(num_gradients):
        si = str(i)
        for j in range(100, end_rate[i], incs[i]):
            sj = str(j)
            grad_id = '-' + si + '-' + sj
            grad_fname = odir + "/grad" + grad_id + ".gradient"
            scores_fname = odir + "/scores" + grad_id + ".csv"
            subprocess.call(["mkdir", "-p", odir])
            grad_file = shared.openFile(grad_fname, 'w')

            # Write the base gradient to the file
            grad_file.write(base_gradients)
            for rate in gradients[i]:
                grad_file.write(rate + ' (' + start_point + ' 100) (' +
                                end_point + ' ' + sj + ')\n')

            # Create the PBS job file
            pbs_fname = odir + '/pbs-job' + grad_id
            jobs_to_submit.append(pbs_fname)
            pbs_file = shared.openFile(pbs_fname, 'w')
            pbs_file.write("""
#PBS -N gradient-run""" + grad_id + """
#PBS -l nodes=1:ppn=1
#PBS -l mem=500MB
#PBS -l file=500MB
#PBS -q biomath
#PBS -j oe
#PBS -o output.txt
#PBS -l walltime=360:00:00

cd $PBS_O_WORKDIR\n""")

            # Write the simulation call to the PBS job file
            pbs_file.write(simulation + sim_arguments + ' -i ' + sets_fname +
                           ' -r ' + grad_fname + ' -E ' + scores_fname)
            pbs_file.close()
            if j % 50 == 0:
                queue_jobs = get_num_jobs()
                # Add parsing of queue_jobs
                while (queue_jobs > 150):
                    time.sleep(300)
                    queue_jobs = get_num_jobs()
                for job in range(min(100, len(jobs_to_submit))):
                    print("Submitting job " + jobs_to_submit[job])
                    subprocess.call(['qsub', jobs_to_submit[job]])
                jobs_to_submit = jobs_to_submit[100:]
def main():
    # check the given arguments
    if len(sys.argv) < 6:
        usage()
    else:
        folder = sys.argv[1]
        parsets = shared.toInt(sys.argv[2])
        ofolder = sys.argv[3]
        image_name = sys.argv[4]
        excel_name = sys.argv[5]

    mutants = ["wildtype", "delta", "her1", "her7", "her7her13", "her13"]
    markers = ['o', '^', 's', '*', 'h', 'D']
    colors = ['k', 'b', 'g', 'r', 'c', 'm']
    num_mutants = 6

    # Create excel file in which the data used to create the plots will be stored
    excel_file = shared.openFile(ofolder + "/" + excel_name + "-sync.csv", "w")

    for index in range(num_mutants):

        mutant = mutants[index]
        marker = markers[index]
        color = colors[index]

        # open the first file to get the height, width and interval
        f = shared.openFile(folder + "/" + mutant + "/set_0_sync_mh1.feats",
                            "r")

        # split the lines to get data
        data = [line.split(",") for line in f]

        # calculate the tissue size
        height = shared.toInt(data[0][0])
        interval = shared.toFlo(data[0][1])
        #split_time = shared.toFlo(data[0][2])
        width = len(data[1]) - 1

        indexes = [0 for i in range(width)]
        averages = [0 for i in range(width)]
        stderr = [0 for i in range(width)]

        for parset in range(parsets):
            f = shared.openFile(
                folder + "/" + mutant + "/set_" + str(parset) +
                "_sync_mh1.feats", "r")

            # split the lines to get data
            data = [line.split(",") for line in f]

            for col in range(width):
                for line in range(1, height + 1):
                    averages[col] += shared.toFlo(data[line][col])

            f.close()

        for col in range(width):
            indexes[col] = (((interval / 2) * col +
                             (interval / 2) * col + interval) / 2) / 6
            averages[col] /= height * parsets

        if mutant == "wildtype":
            excel_file.write("mutant,")
            for index in indexes:
                excel_file.write(str(index) + ",")
            excel_file.write("\n")

        for parset in range(parsets):
            f = shared.openFile(
                folder + "/" + mutant + "/set_" + str(parset) +
                "_sync_mh1.feats", "r")

            data = [line.split(",") for line in f]

            # std error = std deviation / sqrt(num data points)
            for col in range(width):
                for line in range(1, height + 1):
                    stderr[col] += (shared.toFlo(data[line][col]) -
                                    averages[col])**2
                stderr[col] = math.sqrt(stderr[col] / (height * parsets))
                stderr[col] /= math.sqrt(height * parsets)

        # Print the means and standard deviations to the excel_file
        excel_file.write(mutant + ",")
        for average in averages:
            excel_file.write(str(average) + ",")
        excel_file.write("\n,")
        for stder in stderr:
            excel_file.write(str(stder) + ",")
        excel_file.write("\n")

        plt.errorbar(indexes,
                     averages,
                     stderr,
                     fmt='ro',
                     linestyle='-',
                     marker=marker,
                     color=color,
                     label=mutant)
    plt.legend(prop={'size': 8}, loc=3)
    pylab.xlim([0, (width + 1) * (interval / 2) / 6])
    plt.savefig(ofolder + "/" + image_name + ".png", format="png")
    plt.close()
def main():
	# check the given arguments
	if len(sys.argv) < 6:
		usage()
	else:
		f = shared.openFile(sys.argv[1], "r")
		directory = sys.argv[2]
		image_name = sys.argv[3]
		step_size = shared.toFlo(sys.argv[4])
		plot_style= sys.argv[5]
		plot_helper1= int(shared.toFlo(sys.argv[6]))
		plot_helper2= int(shared.toFlo(sys.argv[7]))


        print 'Plotting all the cells from ' + sys.argv[1] + '...'
	# split the lines to get data
	data = [line.split() for line in f]
	max_time = len(data) - 1
	
	# calculate the tissue size
	cells_width = shared.toInt(data[0][0])
	cells_height = shared.toInt(data[0][1])
	total_cells = cells_width * cells_height + 1
        #print cells_width
	# create a matrix to store the concentration values we obtain from the file
	cons = numpy.zeros(shape = (max_time, total_cells))
        cons_t = [0]*max_time
        # create array for row plotting
	pos = [0]*50
        for i in range (0, 49):
            pos[i]=i
        time = [0]*max_time
        for i in range (1,max_time+1):
            time[i-1]=i*step_size
	# put the concentration values from the file into the matrix
	for i in range(1, max_time + 1):
		cons[i - 1][0] = shared.toFlo(data[i][0]) * step_size
		for j in range(1, total_cells):
			cons[i - 1][j] = shared.toFlo(data[i][j])
	
	# close the file
	f.close()
	
	# plot colors
	colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
	color = 0
        
        # decide which row/column/cells to plot
        #plot_col = 1
        if (plot_style == "col"):
            startpoint = plot_helper1
            interval = cells_width 
        elif (plot_style == "all"):
            startpoint =1
            interval =1
        elif (plot_style =="cell"):
            startpoint = plot_helper1
            interval = total_cells
        elif (plot_style == "col_t"):
            startpoint = plot_helper1
            interval= cells_width
            start_time = plot_helper2
            
        if (plot_style!= "row" and plot_style!="col_t"):    
            print "not in row"
            for j in range(0,plot_helper2):
	       for i in range(startpoint+j, total_cells, interval):
        
		  start = 0
		
		# Adjust the plotting interval for each cell to account for different columns being staggered
		# as they enter the PSM at intervals of 6 minutes apart frome ach other
	  	  while cons[start][i] == -1: # -1 stands for no data in the output file
		 	    start += 1
		  end = max_time - 1
		  while cons[end][i] == -1:
			 end -= 1;

		  if (i % 4 == 0):
			 pl.plot(cons[start:end, 0], cons[start:end, i], 'r')
		  elif (i % 4 == 1):
			 pl.plot(cons[start:end, 0], cons[start:end, i], 'g')
		  elif (i % 4 == 2):
			 pl.plot(cons[start:end, 0], cons[start:end, i], 'b')
		  else:
			 pl.plot(cons[start:end, 0], cons[start:end, i], 'c')
        
	elif (plot_style == "row"):
            print "in row"
            pl.plot(pos[0:49],cons[plot_helper1, 1:50],'r')
            pl.plot(pos[0:49],cons[plot_helper1, 51:100],'g')
	    pl.plot(pos[0:49],cons[plot_helper1, 101:150],'b')
            pl.plot(pos[0:49],cons[plot_helper1, 151:200],'c')


        elif (plot_style == "col_t"):
            difference=0
            if start_time > 29999:
                difference= start_time-29999
            print "tracking column"
            if (startpoint <=9):
                if start_time<29999:
                    timetodie= 24000
                    timetilappear=0   
                    i=1
                    while i<=29999:
                        #print i
                        #print startpoint
                        #print cons[timetilappear+i,startpoint]
                        cons_t[timetilappear+i]=cons[timetilappear+i,startpoint]
                        i+=1
                    timetilappear=29999
                else:
                    for i in range (start_time):
                        cons_t[i]=0
                    timetilappear=start_time
                    timetodie= (cells_width-1- startpoint)*600-1
            elif (startpoint>9 and startpoint <=49): 
                print ">9"
                timetodie= (cells_width-1- startpoint)*600
                timetilappear= (startpoint +1-10)*600+29999+difference
                #print timetodie
                #print timetilappear
                for i in range (timetilappear):
                    cons_t[i]=0      
            growth=1
            i=1
            while i<=timetodie:
                #print i
                #print growth
                #print startpoint
                #print cons[timetilappear+i,startpoint]
                if (timetilappear+i>= 90000):
                    break
                if (startpoint>= 93000):
                    break
                cons_t[timetilappear+i]=cons[timetilappear+i,startpoint]
                growth+=1
                i+=1
                if (growth %600==0 ):
                    startpoint+=1
                    growth=1
            pl.plot(time[0:max_time], cons_t, 'r')
            
        
        
       
        #new_array=[500,520,540,560,580,600,620,640,660,680,700,720,740,760,780,800,820,840]
        #pl.grid(True)
        #pl.axis([600, 700,0,1100])
        #pl.xticks(new_array)
	pl.savefig(directory + "/" + image_name + ".png", format = "png")
	pl.close()
	print 'Done. Your plot is stored in ' + directory + "/" + image_name + ".png"
def main():
	#check the given arguments
	print "Reading command-line arguments..."
	args = sys.argv[1:]
	num_args = len(args)
	req_args = [False] * 6
	num_seeds = 0
	sim_arguments = ""

	if num_args >= 6:
		for arg in range(0, num_args - 1, 2):
			option = args[arg]
			value = args[arg + 1]
			if option == '-i' or option == '--input-file':
				ifile = value
				req_args[0] = True
			elif option == '-n' or option == '--num-params':
				num_params = shared.toInt(value)
				req_args[1] = True
			elif option == '-p' or option == '--pars-per-job':
				pars_per_job = shared.toInt(value)
				req_args[2] = True
			elif option == '-d' or option == '--directory':
				folder = value
				req_args[3] = True
			elif option == '-s' or option == '--simulation':
				simulation = value
				req_args[4] = True
			elif option == '-S' or option == '--seeds':
				num_seeds = int(value)
				req_args[5] = True
			elif option == '-a' or option == '--arguments':
				for a in range(arg + 1, num_args):
					sim_arguments += ' ' + args[a]
				break
			elif option == '-h' or option == '--help':
				usage()
			else:
				usage()
		for arg in req_args:
			if not arg:
				req_args
				usage()
	else:
		usage()
	
	index = 0
	
	input_file = shared.openFile(ifile, "r")
	shared.ensureDir(folder)
	for parset in range(0, num_params, pars_per_job):
		params = shared.openFile(folder + "/input" + str(index) + ".params", "w")
		for line in range(pars_per_job):
			params.write(input_file.readline())
		params.close()
		index += 1;
	
	for seeds in range(num_seeds):
		seed = (seeds + 1) * 1000
		for parset in range(index):
			job = shared.openFile(folder + "/pbs-job-" + str(seed) + "-" + str(parset), 'w')
			job.write('''
#PBS -N robust-test 
#PBS -l nodes=1:ppn=1
#PBS -l mem=500mb
#PBS -l file=300mb
#PBS -q biomath
#PBS -j oe
#PBS -o ''' + folder + '''/output''' + str(seed) + "-" + str(parset) + '''.txt
#PBS -l walltime=06:00:00

cd $PBS_O_WORKDIR

''' + simulation + ' ' + sim_arguments + ' -p ' + str(pars_per_job) + ' -i ' + ifile + ''' -s ''' + str(seed) + " -M 6 -E " + folder + "/scores-" + str(seed) + "-" + str(parset) + ".csv")
			job.close()
			subprocess.call(["qsub", folder + "/pbs-job-" + str(seed) + "-" + str(parset)])
def main():
	print 'Reading command-line arguments...'
	args = sys.argv[1:] # Remove the name of the program from the arguments
	num_args = len(args)
	req_args = [False] * 3 # If every required argument was given then req_args will be all true
	if num_args >= 3:
		# Arguments with default values
		stdevs_away = 2
		round_to = 5
		
		for arg in range(0, num_args - 1, 2):
			option = args[arg]
			value = args[arg + 1]
			if option == '-s' or option == '--sets':
				sets_file = shared.openFile(value, 'r')
				req_args[0] = True
			elif option == '-c' or option == '--current-ranges':
				cur_ranges_file = shared.openFile(value, 'r')
				req_args[1] = True
			elif option == '-n' or option == '--new-ranges':
				new_ranges_fname = value
				new_ranges_file = shared.openFile(new_ranges_fname, 'w')
				req_args[2] = True
			elif option == '-d' or option == '--standard-dev':
				stdevs_away = shared.toInt(value)
			elif option == '-r' or option == '--round-to':
				round_to = shared.toInt(value)
			elif option == '-h' or option == '--help':
				usage()
			else:
				usage()
		for arg in req_args: # Check to ensure every required argument was entered
			if not arg:
				usage()
	else:
		usage()
	
	print 'Reading the parameter sets file...'
	# Parse the sets file to get the list of parameter sets
	sets = []
	for line in sets_file:
		if not(line == '' or line[0] == '#'): # Ignore blank lines and comments
			sets.append(line)
	if len(sets) < 1: # Ensure at least one set was given
		usage()
	
	print 'Reading the current ranges file...'
	# Parse the current ranges file to find the existing ranges
	par_names = []
	cur_ranges = []
	for line in cur_ranges_file:
		line = line.replace('\t', ' ')
		if not(line == '' or line[0] == '#'): # Ignore blank lines and comments
			# Get the human-readable description
			space = line.find(' ')
			if space <= 0:
				parsing_error()
			par_names.append(line[: space])
			line = line[space + 1:] # Skip past the description
			
			# Find the range bounds
			start = 0
			if line[start] == '\0':
				parsing_error()
			while line[start] == ' ':
				start += 1
			if line[start] != '[':
				parsing_error()
			end = start + 1
			while line[end] != ']' and line[end] != '\0':
				end += 1
			if line[end] == '\0':
				parsing_error()
			line = line[start + 1 : end]
			bounds = map(shared.toFlo, line.split(',')) # Convert the bounds to floats
			if len(bounds) != 2:
				parsing_error()
			cur_ranges.append(bounds)
	
	print 'Calculating new ranges...'
	# Calculate each parameter's new range
	flo_sets = map(lambda ls: map(shared.toFlo, ls), map(lambda s: s.split(','), sets)) # Convert each parameter set string into an array of floats
	num_sets = len(flo_sets)
	new_ranges = []
	for p in range(len(cur_ranges)): # For every range
		# Get the mean range based on every set
		vals = []
		for s in flo_sets:
			vals.append(s[p])
		mean = sum(vals) / num_sets
		# Calculate the standard deviation from the mean
		stdev_sum = 0
		for f in vals:
			stdev_sum += (f - mean) ** 2
		stdev = math.sqrt(stdev_sum / num_sets)
		# Define new ranges based on the mean and standard deviation that are at least as narrow as the current ranges
		lower_bound = max(cur_ranges[p][0], round(mean - stdev * stdevs_away, round_to))
		upper_bound = min(cur_ranges[p][1], round(mean + stdev * stdevs_away, round_to))
		new_ranges.append([lower_bound, upper_bound])
	
	print 'Writing the new ranges to the specified output file...'
	# Write the parameter ranges to the new ranges file
	for r in range(len(new_ranges)):
		new_ranges_file.write(par_names[r] + ' [' + str(new_ranges[r][0]) + ',' + str(new_ranges[r][1]) + ']\n')
	new_ranges_file.close()
	
	print 'Done. The new ranges are in ' + new_ranges_fname
def main():
	# check the given arguments
	if len(sys.argv) < 3:
		usage()
	elif len(sys.argv) == 4:
		if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
			shared.terminalRed = ""
			shared.terminalReset = ""
			directory = sys.argv[2]
			filename = sys.argv[3]
		else:
			usage()
	else:
		directory = sys.argv[1]
		filename = sys.argv[2]
	
	# ensure the directory exists and open the output file
	directory = shared.ensureDir(directory)
	ofile = shared.openFile(filename, "w")
	
	fsize = 0
	filename = directory + "/run0.txt"
	f = shared.openFile(filename)
	width, height = shared.widthAndHeight(f.readline().split(), filename)
	
	for line in f:
		data = line.split()
		fsize = shared.toFlo(data[0])
	avg = [[-1 for i in range(w * h)] for j in range(int(fsize * 10) + 50)]
	end = 0
	f.close()

	for run in range(0, runs):
		filename = directory + "/run" + str(run)+ ".txt"
		f = open(filename)
		width, height = shared.widthAndHeight(f.readline().split(), filename)

		for line in f:
			lineList = line.split(",")
			temptime = shared.toFlo(lineList[0])
			place = int(temptime * 10)
			if float(place + 1) - (temptime * 10) < (temptime * 10 - float(place)):
				index = place + 1
			else:
				index = place
			if (index > end):
				end = index
			for cell in range(0, w * h):
				if (avg[index][cell] == -1):
					avg[index][cell] = shared.toFlo(lineList[cell + 1])
				else:
					avg[index][cell] += shared.toFlo(lineList[cell + 1])
	
	ofile.write(str(width) + " " + str(height) + "\n")
	
	for cell in range(0, width * height):
		i = 0
		minend = end
		while (i < end):
			if (avg[i + 1][cell] == -1):
				x1 = i
				y1 = avg[i][cell]
				y2 = 0
				x2 = -1
				for k in range(i + 2, end + 1):
					if (avg[k][cell] != -1):
						y2 = avg[k][cell]
						x2 = k
						break
				if (x2 != -1):
					m = (y2 - y1) / (x2 - x1)
					for k in range (i + 1, x2):
						avg[k][cell] = y1 + m * h * (k - i)
					i = x2
				else:
					end = i
					if (end < minend):
						minend = end
					break
			else:
				i += 1 	
	
	end = minend
	for i in range(1, end + 1):
		list = avg[i]
		ofile.write(str(float(i) / 10) + " ")
		
		for cell in range(0, w * h):
			ofile.write(str(list[cell] / runs) + " ")
		
		ofile.write("\n")
		index += 1
	ofile.close()
Beispiel #26
0
def main():
    #check the given arguments
    print "Reading command-line arguments..."
    args = sys.argv[1:]
    num_args = len(args)
    req_args = [False] * 6
    num_seeds = 0
    sim_arguments = ""

    if num_args >= 6:
        for arg in range(0, num_args - 1, 2):
            option = args[arg]
            value = args[arg + 1]
            if option == '-i' or option == '--input-file':
                ifile = value
                req_args[0] = True
            elif option == '-n' or option == '--num-params':
                num_params = shared.toInt(value)
                req_args[1] = True
            elif option == '-p' or option == '--pars-per-job':
                pars_per_job = shared.toInt(value)
                req_args[2] = True
            elif option == '-d' or option == '--directory':
                folder = value
                req_args[3] = True
            elif option == '-s' or option == '--simulation':
                simulation = value
                req_args[4] = True
            elif option == '-S' or option == '--seeds':
                num_seeds = int(value)
                req_args[5] = True
            elif option == '-a' or option == '--arguments':
                for a in range(arg + 1, num_args):
                    sim_arguments += ' ' + args[a]
                break
            elif option == '-h' or option == '--help':
                usage()
            else:
                usage()
        for arg in req_args:
            if not arg:
                req_args
                usage()
    else:
        usage()

    index = 0

    input_file = shared.openFile(ifile, "r")
    shared.ensureDir(folder)
    for parset in range(0, num_params, pars_per_job):
        params = shared.openFile(folder + "/input" + str(index) + ".params",
                                 "w")
        for line in range(pars_per_job):
            params.write(input_file.readline())
        params.close()
        index += 1

    for seeds in range(num_seeds):
        seed = (seeds + 1) * 1000
        for parset in range(index):
            job = shared.openFile(
                folder + "/pbs-job-" + str(seed) + "-" + str(parset), 'w')
            job.write('''
#PBS -N robust-test 
#PBS -l nodes=1:ppn=1
#PBS -l mem=500mb
#PBS -l file=300mb
#PBS -q biomath
#PBS -j oe
#PBS -o ''' + folder + '''/output''' + str(seed) + "-" + str(parset) + '''.txt
#PBS -l walltime=06:00:00

cd $PBS_O_WORKDIR

''' + simulation + ' ' + sim_arguments + ' -p ' + str(pars_per_job) + ' -i ' +
                      ifile + ''' -s ''' + str(seed) + " -M 6 -E " + folder +
                      "/scores-" + str(seed) + "-" + str(parset) + ".csv")
            job.close()
            subprocess.call(
                ["qsub", folder + "/pbs-job-" + str(seed) + "-" + str(parset)])
Beispiel #27
0
def main():
    # check the given arguments
    if len(sys.argv) < 6:
        usage()
    elif len(sys.argv) == 7:
        if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
            shared.terminalRed = ""
            shared.terminalReset = ""
            filename = sys.argv[2]
            filename2 = sys.argv[3]
            directory = sys.argv[4]
            measuring = sys.argv[5]
            mutation = sys.argv[6]

        else:
            usage()
    else:
        filename = sys.argv[1]
        directory = sys.argv[2]
        measuring = sys.argv[3]
        mutation = sys.argv[4]

    # open the input file and ensure the directory exists
    f = shared.openFile(filename, "r")
    f2 = shared.openFile(filename2, "r")
    directory = shared.ensureDir(directory)

    # split the lines to get data
    data = [line.split() for line in f]
    file_len = len(data) - 1
    max_x = file_len
    f.close()

    data2 = [line.split() for line in f2]
    file_len2 = len(data2) - 1
    max_x2 = file_len2
    f2.close()

    if (max_x == max_x2):
        print "test"
    # number of columns we have in the files
    cn = shared.toInt(data[0][0]) * shared.toInt(data[0][1]) + 1
    cn2 = shared.toInt(data2[0][0]) * shared.toInt(data2[0][1]) + 1

    # create matrices to store the data we obtained from the files
    m2p = numpy.zeros(shape=(max_x, cn + cn2))

    # put the data coming from the files to the matrix
    for i in range(2, file_len):
        for j in range(0, cn + cn2):
            if (j < cn):
                m2p[i][j] = shared.toFlo(data[i][j])

            elif (j == cn):
                print data2[i][j - cn]
            else:
                m2p[i][j] = 2 * shared.toFlo(data2[i][j - cn])

    # plot colors
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    color = 0

    for i in range(1, cn + cn2):
        if (i % 4 == 0):
            pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'r')
        elif (i % 4 == 1):
            pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'g')
        elif (i % 4 == 2):
            pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'b')
        else:
            pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'c')

    pl.title(measuring + " " + mutation + " All Cells")
    pl.savefig(directory + "/" + mutation + "_all.png", format="png")
    pl.close()

    # plot the data
    average = []
    for i in range(0, max_x):
        average.append(float(sum(m2p[i][1:])) / float(len(m2p[i][1:])))

    pl.plot(m2p[0:max_x, 0], average, colors[color])
    if color == len(colors) - 1:
        color = 0
    else:
        color += 1

    pl.title(measuring + " " + mutation + " Average")
    pl.savefig(directory + "/" + mutation + "_avg.png", format="png")
    pl.close()
def main():
    print 'Reading command-line arguments...'
    args = sys.argv[1:]  # Remove the name of the program from the arguments
    num_args = len(args)
    req_args = [
        False
    ] * 3  # If every required argument was given then req_args will be all true
    if num_args >= 3:
        # Arguments with default values
        stdevs_away = 2
        round_to = 5

        for arg in range(0, num_args - 1, 2):
            option = args[arg]
            value = args[arg + 1]
            if option == '-s' or option == '--sets':
                sets_file = shared.openFile(value, 'r')
                req_args[0] = True
            elif option == '-c' or option == '--current-ranges':
                cur_ranges_file = shared.openFile(value, 'r')
                req_args[1] = True
            elif option == '-n' or option == '--new-ranges':
                new_ranges_fname = value
                new_ranges_file = shared.openFile(new_ranges_fname, 'w')
                req_args[2] = True
            elif option == '-d' or option == '--standard-dev':
                stdevs_away = shared.toInt(value)
            elif option == '-r' or option == '--round-to':
                round_to = shared.toInt(value)
            elif option == '-h' or option == '--help':
                usage()
            else:
                usage()
        for arg in req_args:  # Check to ensure every required argument was entered
            if not arg:
                usage()
    else:
        usage()

    print 'Reading the parameter sets file...'
    # Parse the sets file to get the list of parameter sets
    sets = []
    for line in sets_file:
        if not (line == ''
                or line[0] == '#'):  # Ignore blank lines and comments
            sets.append(line)
    if len(sets) < 1:  # Ensure at least one set was given
        usage()

    print 'Reading the current ranges file...'
    # Parse the current ranges file to find the existing ranges
    par_names = []
    cur_ranges = []
    for line in cur_ranges_file:
        line = line.replace('\t', ' ')
        if not (line == ''
                or line[0] == '#'):  # Ignore blank lines and comments
            # Get the human-readable description
            space = line.find(' ')
            if space <= 0:
                parsing_error()
            par_names.append(line[:space])
            line = line[space + 1:]  # Skip past the description

            # Find the range bounds
            start = 0
            if line[start] == '\0':
                parsing_error()
            while line[start] == ' ':
                start += 1
            if line[start] != '[':
                parsing_error()
            end = start + 1
            while line[end] != ']' and line[end] != '\0':
                end += 1
            if line[end] == '\0':
                parsing_error()
            line = line[start + 1:end]
            bounds = map(shared.toFlo,
                         line.split(','))  # Convert the bounds to floats
            if len(bounds) != 2:
                parsing_error()
            cur_ranges.append(bounds)

    print 'Calculating new ranges...'
    # Calculate each parameter's new range
    flo_sets = map(
        lambda ls: map(shared.toFlo, ls),
        map(lambda s: s.split(','),
            sets))  # Convert each parameter set string into an array of floats
    num_sets = len(flo_sets)
    new_ranges = []
    for p in range(len(cur_ranges)):  # For every range
        # Get the mean range based on every set
        vals = []
        for s in flo_sets:
            vals.append(s[p])
        mean = sum(vals) / num_sets
        # Calculate the standard deviation from the mean
        stdev_sum = 0
        for f in vals:
            stdev_sum += (f - mean)**2
        stdev = math.sqrt(stdev_sum / num_sets)
        # Define new ranges based on the mean and standard deviation that are at least as narrow as the current ranges
        lower_bound = max(cur_ranges[p][0],
                          round(mean - stdev * stdevs_away, round_to))
        upper_bound = min(cur_ranges[p][1],
                          round(mean + stdev * stdevs_away, round_to))
        new_ranges.append([lower_bound, upper_bound])

    print 'Writing the new ranges to the specified output file...'
    # Write the parameter ranges to the new ranges file
    for r in range(len(new_ranges)):
        new_ranges_file.write(par_names[r] + ' [' + str(new_ranges[r][0]) +
                              ',' + str(new_ranges[r][1]) + ']\n')
    new_ranges_file.close()

    print 'Done. The new ranges are in ' + new_ranges_fname
def main():
	# check the given arguments
	if len(sys.argv) < 3:
		usage()
	elif len(sys.argv) == 4:
		if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
			shared.terminalRed = ""
			shared.terminalReset = ""
			filename = sys.argv[2]
			startTime = sys.argv[3]
		else:
			usage()
	else:
		filename = sys.argv[1]
		startTime = sys.argv[2]
	
	# open the input file and ensure 'startTime' is a number
	f = shared.openFile(filename, "r")
	startTime = shared.toFlo(startTime)
	startIndex = 0
	
	# extract the width and height from the concentrations file
	width, height = shared.widthAndHeight(f.readline().split(), filename)
	
	cellCount = width * height
	sums = [0 for i in range(cellCount)]
	cells = []
	
	# for every line in the file, get the sum of the concentration levels as well as each individual value cast as a float, and calculate the starting index based off of the starting time
	for line in f:
		data = line.split()
		curdata = []
		
		if data[0] < startTime:
			startIndex += 1
		
		for cell in range(cellCount):
			sums[cell] += shared.toFlo(data[cell + 1])
			curdata.append(shared.toFlo(data[cell + 1]))
		
		cells.append(curdata)

	# ensure there was data to retrieve
	if len(cells) == 0:
		print shared.terminalRed + "Couldn't get any cell data! Make sure '" + filename + "' is properly formatted. Exit status 3.", shared.terminalReset
		exit(3)
	
	# calculate the mean of each cell
	means = []
	for cell in range(cellCount):
		means.append(sums[cell] / len(cells))
	
	# calculate the total average score
	avgscore = 0.0
	for cell in range(1, cellCount):
		numerator = 0
		sqr1 = 0
		sqr2 = 0
		for i in range(startIndex, len(cells)):
			tstep = cells[i]
			xi = tstep[0]
			yi = tstep[cell]
			numerator += ((xi - means[0]) * (yi - means[cell]))
			sqr1 += (xi - means[0]) ** 2
			sqr2 += (yi - means[cell]) ** 2
		
		sqr1 = math.sqrt(sqr1)
		sqr2 = math.sqrt(sqr2)
		if sqr1 == 0 or sqr2 == 0:
			r = 1
		else:
			r = numerator / (sqr1 * sqr2)
		avgscore += r
	
	# print the synchronization score
	print round(avgscore / (cellCount - 1), 10)
	f.close()
Beispiel #30
0
def main():
	# check the given arguments
	if len(sys.argv) < 6:
		usage()
	elif len(sys.argv) == 7:
		if sys.argv[1] == "-c" or sys.argv[1] == "--no-color":
			shared.terminalRed = ""
			shared.terminalReset = ""
			filename = sys.argv[2]
			filename2 = sys.argv[3]
			directory = sys.argv[4]
			measuring = sys.argv[5]
			mutation = sys.argv[6]

		else:
			usage()
	else:
		filename = sys.argv[1]
		directory = sys.argv[2]
		measuring = sys.argv[3]
		mutation = sys.argv[4]
	
	# open the input file and ensure the directory exists
	f = shared.openFile(filename, "r")
	f2 = shared.openFile(filename2, "r")
	directory = shared.ensureDir(directory)
	
	# split the lines to get data
	data = [line.split() for line in f]
	file_len = len(data) - 1
	max_x = file_len
	f.close()
	

	data2 = [line.split() for line in f2]
	file_len2 = len(data2) - 1
	max_x2 = file_len2
	f2.close()

	if (max_x == max_x2):
		print "test"
	# number of columns we have in the files
	cn = shared.toInt(data[0][0]) * shared.toInt(data[0][1]) + 1
	cn2 = shared.toInt(data2[0][0]) * shared.toInt(data2[0][1]) + 1
	
	# create matrices to store the data we obtained from the files
	m2p=numpy.zeros(shape = (max_x,cn + cn2))
	
	# put the data coming from the files to the matrix
	for i in range(2, file_len):
		for j in range(0, cn+cn2):
			if (j <cn):
				m2p[i][j] = shared.toFlo(data[i][j])
				
			elif (j==cn):
				print data2[i][j-cn]
			else:
				m2p[i][j] = 2*shared.toFlo(data2[i][j-cn])
				
	
	# plot colors
	colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
	color = 0
	
	for i in range(1, cn+cn2):
		if (i % 4 == 0):
			pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'r')
		elif (i % 4 == 1):
			pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'g')
		elif (i % 4 == 2):
			pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'b')
		else:
			pl.plot(m2p[0:max_x, 0], m2p[0:max_x, i], 'c')
	
	pl.title(measuring + " " + mutation + " All Cells")
	pl.savefig(directory + "/" + mutation + "_all.png", format = "png")
	pl.close()
	
	# plot the data
	average = []
	for i in range(0, max_x):
		average.append(float(sum(m2p[i][1:])) / float(len(m2p[i][1:])))
	
	pl.plot(m2p[0:max_x, 0], average, colors[color])
	if color == len(colors) - 1:
		color = 0
	else:
		color += 1
	
	pl.title(measuring + " " + mutation + " Average")
	pl.savefig(directory + "/" + mutation + "_avg.png", format = "png")
	pl.close()
def main():
    print "Reading command-line arguments..."
    args = sys.argv[1:]
    if len(args) == 3:
        cons_fname1 = args[0]
        cons_fname2 = args[1]
        directory = args[2]
    else:
        usage()

    print "Reading concentrations file 1..."
    min_con1 = float("inf")
    max_con1 = 0
    cons_data1 = []
    if cons_fname1.endswith(".cons"):  # Read ASCII file
        cons_file1 = shared.openFile(cons_fname1, "r")
        width, height = map(
            lambda num: shared.toInt(num), cons_file1.readline().split(" ")
        )  # The first line contains the width and height
        checkSize(width, height)
        for line in cons_file1:
            cons = map(
                lambda num: shared.toFlo(num), line.split(" ")[1:-1]
            )  # Remove the time step column and newline when taking the concentrations
            for con in cons:
                min_con1 = min(min_con1, con)
                max_con1 = max(max_con1, con)

            cons_data1.append(cons)
    elif cons_fname1.endswith(".bcons"):  # Read binary file
        cons_file1 = shared.openFile(cons_fname1, "rb")  # Read the file as a binary
        # The first two ints are the width and height
        width, = struct.unpack("i", cons_file1.read(4))
        height, = struct.unpack("i", cons_file1.read(4))
        checkSize(width, height)
        size = width * height
        cons1 = []
        cons_length1 = 0
        while True:
            con_str1 = cons_file1.read(4)
            if con_str1 == "":  # While not EOF
                break
            else:
                # There are width * height concentration floats per time step
                con, = struct.unpack("f", con_str1)
                min_con1 = min(min_con1, con)
                max_con1 = max(max_con1, con)
                cons1.append(con)
                cons_length1 += 1
                if cons_length1 == height:
                    cons_data1.append(cons)
                    cons1 = []
    else:
        usage()

    print "Reading concentrations file 2..."
    min_con2 = float("inf")
    max_con2 = 0
    cons_data2 = []
    if cons_fname2.endswith(".cons"):  # Read ASCII file
        cons_file2 = shared.openFile(cons_fname2, "r")
        width, height = map(
            lambda num: shared.toInt(num), cons_file2.readline().split(" ")
        )  # The first line contains the width and height
        checkSize(width, height)
        for line in cons_file2:
            cons = map(
                lambda num: shared.toFlo(num), line.split(" ")[1:-1]
            )  # Remove the time step column and newline when taking the concentrations
            for con in cons:
                min_con2 = min(min_con2, con)
                max_con2 = max(max_con2, con)
            cons_data2.append(cons)
    elif cons_fname2.endswith(".bcons"):  # Read binary file
        cons_file2 = shared.openFile(cons_fname2, "rb")  # Read the file as a binary
        # The first two ints are the width and height
        width, = struct.unpack("i", cons_file2.read(4))
        height, = struct.unpack("i", cons_file2.read(4))
        checkSize(width, height)
        size = width * height
        cons2 = []
        cons_length2 = 0
        while True:
            con_str2 = cons_file2.read(4)
            if con_str2 == "":  # While not EOF
                break
            else:
                # There are width * height concentration floats per time step
                con, = struct.unpack("f", con_str2)
                min_con2 = min(min_con2, con)
                max_con2 = max(max_con2, con)
                cons2.append(con)
                cons_length2 += 1
                if cons_length2 == height:
                    cons_data2.append(cons)
                    cons2 = []
    else:
        usage()

    print "Creating the directory if necessary..."
    directory = shared.ensureDir(directory)
    if directory[-1] != "/":
        directory = directory + "/"

    cons_data = combine_cons(cons_data1, cons_data2, max_con1, min_con1, max_con2, min_con2)

    print "Creating snapshots..."
    edge, size = findSizes(width, height)  # Configure the hexagon edge and window size based on the grid size
    index = 0
    for line in cons_data:
        if index % 10 == 0 and index >= 21000:

            plotHexagons(directory, size, index, line, edge, width, height)
        index += 1

    print "Done. Your snapshots are stored in " + directory
def main():
	gradients = {0: ['28', '29'], 1: ['37'], 2: ['41'], 3: ['34', '35'], 4: ['38', '39'], 5: ['4', '5'], 6: ['34', '35', '37'], 7: ['38', '39', '41'], 8: ['4', '5', '7'], 9: ['28', '32', '15']}
	end_rate = [-1, 1001, 1001, 1001, 1001, -1, 1001, 1001, -1, -1]
	incs = [-1, 1, 1, 1, 1, -1, 1, 1, -1, -1]

	print 'Reading command-line arguments...'
	args = sys.argv[1:] # Remove the name of the program from the arguments
	num_args = len(args)
	num_req_args = 2
	req_args = [False] * num_req_args # If every required argument was given then req_args will be all true
	if num_args >= num_req_args:
		# Arguments with default values
		base_gradients_file = None
		simulation = './simulation'
		sim_arguments = ""

		for arg in range(0, num_args - 1, 2):
			option = args[arg]
			value = args[arg + 1]
			if option == '-s' or option == '--sets':
				sets_fname = value # Parameter sets file
				req_args[0] = True
			elif option == '-o' or option == '--output-dir':
				odir = value
				req_args[1] = True
			elif option == '-g' or option == '--base-gradients':
				base_gradients_file = shared.openFile(value, 'r') # Gradients to add to every simulation on top of the changing ones
			elif option == '-S' or option == '--simulation':
				simulation = value # Filename of the simulation
			elif option == '-a' or option == '--arguments':
				for a in range(arg + 1, num_args): # Arguments to pass to the simulation
					sim_arguments += ' ' + args[a]
				break
			else:
				usage()
		for arg in req_args: # Check to ensure every required argument was entered
			if not arg:
				usage()
	else:
		usage()

	print 'Reading the base gradients file...'
	# Add any given base gradients to a string to be passed to each simulation's gradients file
	if base_gradients_file is None:
		base_gradients = ''
	else:
		base_gradients = ''
		for line in base_gradients_file:
			base_gradients += line
	
	print 'Start creating gradient files and pbs-jobs for each gradient combination'
	start_point = '9'
	end_point = '49'

	# For every gradient option
	num_gradients = len(gradients)
	jobs_to_submit = []
	for i in range(num_gradients):
		si = str(i)
		for j in range(100, end_rate[i], incs[i]):
			sj = str(j)
			grad_id = '-' + si + '-' + sj
			grad_fname = odir + "/grad" + grad_id + ".gradient"
			scores_fname = odir + "/scores" + grad_id + ".csv"
			subprocess.call(["mkdir", "-p", odir])
			grad_file = shared.openFile(grad_fname, 'w')

			# Write the base gradient to the file
			grad_file.write(base_gradients)
			for rate in gradients[i]:
				grad_file.write(rate + ' (' + start_point + ' 100) (' + end_point + ' ' + sj + ')\n')

			# Create the PBS job file
			pbs_fname = odir + '/pbs-job' + grad_id
			jobs_to_submit.append(pbs_fname)
			pbs_file = shared.openFile(pbs_fname, 'w')
			pbs_file.write("""
#PBS -N gradient-run""" + grad_id + """
#PBS -l nodes=1:ppn=1
#PBS -l mem=500MB
#PBS -l file=500MB
#PBS -q biomath
#PBS -j oe
#PBS -o output.txt
#PBS -l walltime=360:00:00

cd $PBS_O_WORKDIR\n""")
	
			# Write the simulation call to the PBS job file
			pbs_file.write(simulation + sim_arguments + ' -i ' + sets_fname + ' -r ' + grad_fname + ' -E ' + scores_fname)
			pbs_file.close()
			if j % 50 == 0:
				queue_jobs = get_num_jobs()
				# Add parsing of queue_jobs
				while (queue_jobs > 150):
					time.sleep(300)
					queue_jobs = get_num_jobs()
				for job in range(min(100, len(jobs_to_submit))):
					print ("Submitting job " + jobs_to_submit[job])
					subprocess.call(['qsub', jobs_to_submit[job]])
				jobs_to_submit = jobs_to_submit[100:]
def main():
	print 'Reading command-line arguments...'
	args = sys.argv[1:] # Remove the name of the program from the arguments
	num_args = len(args)
	cons_file = None # Concentrations file
	figure_fname = 'st' # Filename to give the figure minus its extension
	image_format = 'png' # Format in which to save the figure, also serving as its extension
	image_width = 1000 # Width of the image in pixels
	image_height = 250 # Height of the image in pixels
	steps_til_growth = 60000 # Steps before growth starts
	steps_to_split = 600 # Steps each split takes
	initial_width = 10 # Initial width of the PSM in cells
	granularity = 1 # Use each <granularity> time steps of data
	start_step = 0 # Start time step relative to steps_til_growth
	end_step = 60000 # End time step relative to steps_til_growth
	for arg in range(0, num_args - 1, 2):
		option = args[arg]
		value = args[arg + 1]
		if option == '-c' or option == '--cons-file':
			cons_file = shared.openFile(value, 'r')
		elif option == '-f' or option == '--figure-name':
			figure_fname = value
		elif option == '-i' or option == '--image-format':
			image_format = value
		elif option == '-w' or option == '--image-width':
			image_width = shared.toInt(value)
		elif option == '-h' or option == '--image-height':
			image_height = shared.toInt(value)
		elif option == '-G' or option == '--steps-til-growth':
			steps_til_growth = shared.toInt(value)
		elif option == '-S' or option == '--steps-to-split':
			steps_to_split = shared.toInt(value)
		elif option == '-n' or option == '--initial-width':
			initial_width = shared.toInt(value)
		elif option == '-g' or option == '--granularity':
			granularity = shared.toInt(value)
		elif option == '-s' or option == '--start-step':
			start_step = shared.toInt(value)
		elif option == '-e' or option == '--end-step':
			end_step = shared.toInt(value)
		elif option == '-h' or option == '--help':
			usage()
		else:
			usage()
	if cons_file is None: # The concentrations file is required
		usage()
	
	print 'Parsing concentrations file...'
	raw_data = [line.split() for line in cons_file] # Split the data into lines and split each line by spaces into an array
	cons_file.close()
	
	print 'Converting data to the appropriate sizes...'
	# Take the width and height from the first line of the file
	psm_width = shared.toInt(raw_data[0][0])
	psm_height = shared.toInt(raw_data[0][1])
	raw_data = raw_data[1 + steps_til_growth:] # Remove all data before growth starts
	# Adjust step sizes for the given granularity
	steps_til_growth /= granularity
	steps_to_split /= granularity
	data = [] # Like raw data, but takes only each <granularity> time steps of data and removes the time steps column
	for line in range(len(raw_data)):
		if line % granularity == 0:
			data.append(raw_data[line])
	total_steps = len(data)
	for row in range(total_steps):
		data[row] = data[row][1:] # Remove the time steps column
	steps_when_full = (psm_width - initial_width) * steps_to_split # When the PSM is done growing
	total_width = psm_width + (total_steps - steps_when_full) / steps_to_split # The width of every cell that exists at any point
	table = [[0 for i in range(total_steps)] for j in range(total_width)] # A table containing the data formatted more closely to what the figure requires
	
	print 'Accounting for cell growth and averaging cell columns...'
	min_con = float('inf')
	max_con = 0
	
	# Fill in the table with all data from when the PSM is growing
	current_width = initial_width
	row_start = current_width - 1
	steps_elapsed = 0
	for column in range(steps_when_full):
		for row in range(current_width):
			avg_con = 0
			cell_x = row_start - row # Posterior cells should be printed on the right
			for cell_y in range(psm_height): # Average each column of cells
				cell_index = cell_y * psm_width + cell_x
				avg_con += shared.toFlo(data[column][cell_index])
			avg_con /= psm_height
			table[row][column] = avg_con
			# Update the minimum and maximum concentrations
			min_con = min(min_con, avg_con)
			max_con = max(max_con, avg_con)
		for row in range(current_width, total_width): # Nonexistent cells get concentrations of 0
			table[row][column] = -10
		steps_elapsed += 1
		if steps_elapsed == steps_to_split: # Split the PSM every steps_to_split time steps
			current_width += 1
			row_start += 1 # Adjust because the first cell in data is the new, posterior-most cell
			steps_elapsed = 0
	for column in range(steps_when_full, total_steps):
		for row in range(current_width, total_width):
			table[row][column] = -10
	
	# Fill in the table with all data from when the PSM is done growing
	arrested_cells = []
	row_start = psm_width - 1
	row_offset = 0
	for column in range(steps_when_full, total_steps):
		for row in range(psm_width):
			avg_con = 0
			cell_x = (row_start - row) % psm_width # Posterior cells should be printed on the right
			for cell_y in range(psm_height): # Average each column of cells
				cell_index = cell_y * psm_width + cell_x
				avg_con += shared.toFlo(data[column][cell_index])
			avg_con /= psm_height
			table[row + row_offset][column] = avg_con
			# Update the minimum and maximum concentrations
			min_con = min(min_con, avg_con)
			max_con = max(max_con, avg_con)
		for cell in arrested_cells: # Print the last value each arrested cell had for the rest of time
			table[cell[0]][column] = -10

		steps_elapsed += 1
		if steps_elapsed == steps_to_split: # Split the PSM every steps_to_split time steps
			arrested_cells.append((row_offset, table[row_offset][column]))
			row_offset += 1
			steps_elapsed = 0
	max_con += 1
	print 'Cropping to the specified time range...'
	start_step /= granularity
	end_step /= granularity
	total_steps = end_step - start_step
	for row in range(total_width):
		table[row] = table[row][start_step:end_step]
	print 'Creating a blank image...'
	im = Image.new('RGB', (image_width, image_height), rgb('FFFFFF')) # Make an image with a blank, white canvas
	draw = ImageDraw.Draw(im) # Get the drawing object
	
	print 'Filling the image with the concentrations...'
	# Find the factors to scale the table data into an image_width by image_height sized figure
	x_factor = shared.toFlo(total_steps) / image_width
	y_factor = shared.toFlo(total_width) / image_height
	# Darker shades indicate higher concentrations
	shades = [rgb('FEB4EF'), rgb('FEB4EF'), rgb('FE5A77'), rgb('FE2D3B'), rgb('FF0000'), rgb('BF0000'), rgb('7F0000'), rgb('3F0000'), rgb('000000'), rgb('FFFFFF')]
	num_shades = len(shades)
	for i in range(image_width):
		x = shared.toInt(i * x_factor)
		for j in range(image_height):
			reverse_j = image_height - j - 1 # In the figure, cell 0 is at the bottom, not top
			y = shared.toInt(reverse_j * y_factor)
			con = table[y][x]
			if con == -10:
				color = rgb('EEE5DE')
			else:
				color = shades[int((con - min_con) / (max_con - min_con) * (num_shades - 1))] # Find the color matching the concentration
			draw.point((i, j), fill = color)
	
	print 'Saving the image...'
	figure_fname_full = figure_fname + '.' + image_format.lower()
	im.save(figure_fname_full, image_format.upper())
	
	print 'Done. Your figure is stored in ' + figure_fname_full
def main():
    print 'Reading command-line arguments...'
    args = sys.argv[1:]
    if len(args) == 3:
        cons_fname1 = args[0]
        cons_fname2 = args[1]
        directory = args[2]
    else:
        usage()

    print 'Reading concentrations file 1...'
    min_con1 = float('inf')
    max_con1 = 0
    cons_data1 = []
    if cons_fname1.endswith('.cons'):  # Read ASCII file
        cons_file1 = shared.openFile(cons_fname1, 'r')
        width, height = map(
            lambda num: shared.toInt(num),
            cons_file1.readline().split(
                ' '))  # The first line contains the width and height
        checkSize(width, height)
        for line in cons_file1:
            cons = map(
                lambda num: shared.toFlo(num),
                line.split(' ')[1:-1]
            )  # Remove the time step column and newline when taking the concentrations
            for con in cons:
                min_con1 = min(min_con1, con)
                max_con1 = max(max_con1, con)

            cons_data1.append(cons)
    elif cons_fname1.endswith('.bcons'):  # Read binary file
        cons_file1 = shared.openFile(cons_fname1,
                                     'rb')  # Read the file as a binary
        # The first two ints are the width and height
        width, = struct.unpack('i', cons_file1.read(4))
        height, = struct.unpack('i', cons_file1.read(4))
        checkSize(width, height)
        size = width * height
        cons1 = []
        cons_length1 = 0
        while True:
            con_str1 = cons_file1.read(4)
            if con_str1 == '':  # While not EOF
                break
            else:
                # There are width * height concentration floats per time step
                con, = struct.unpack('f', con_str1)
                min_con1 = min(min_con1, con)
                max_con1 = max(max_con1, con)
                cons1.append(con)
                cons_length1 += 1
                if cons_length1 == height:
                    cons_data1.append(cons)
                    cons1 = []
    else:
        usage()

    print 'Reading concentrations file 2...'
    min_con2 = float('inf')
    max_con2 = 0
    cons_data2 = []
    if cons_fname2.endswith('.cons'):  # Read ASCII file
        cons_file2 = shared.openFile(cons_fname2, 'r')
        width, height = map(
            lambda num: shared.toInt(num),
            cons_file2.readline().split(
                ' '))  # The first line contains the width and height
        checkSize(width, height)
        for line in cons_file2:
            cons = map(
                lambda num: shared.toFlo(num),
                line.split(' ')[1:-1]
            )  # Remove the time step column and newline when taking the concentrations
            for con in cons:
                min_con2 = min(min_con2, con)
                max_con2 = max(max_con2, con)
            cons_data2.append(cons)
    elif cons_fname2.endswith('.bcons'):  # Read binary file
        cons_file2 = shared.openFile(cons_fname2,
                                     'rb')  # Read the file as a binary
        # The first two ints are the width and height
        width, = struct.unpack('i', cons_file2.read(4))
        height, = struct.unpack('i', cons_file2.read(4))
        checkSize(width, height)
        size = width * height
        cons2 = []
        cons_length2 = 0
        while True:
            con_str2 = cons_file2.read(4)
            if con_str2 == '':  # While not EOF
                break
            else:
                # There are width * height concentration floats per time step
                con, = struct.unpack('f', con_str2)
                min_con2 = min(min_con2, con)
                max_con2 = max(max_con2, con)
                cons2.append(con)
                cons_length2 += 1
                if cons_length2 == height:
                    cons_data2.append(cons)
                    cons2 = []
    else:
        usage()

    print 'Creating the directory if necessary...'
    directory = shared.ensureDir(directory)
    if (directory[-1] != '/'):
        directory = directory + '/'

    cons_data = combine_cons(cons_data1, cons_data2, max_con1, min_con1,
                             max_con2, min_con2)

    print 'Creating snapshots...'
    edge, size = findSizes(
        width, height
    )  # Configure the hexagon edge and window size based on the grid size
    index = 0
    for line in cons_data:
        if (index % 10 == 0 and index >= 21000):

            plotHexagons(directory, size, index, line, edge, width, height)
        index += 1

    print 'Done. Your snapshots are stored in ' + directory
def main():
	print 'Reading command line arguments...'
	# check the given arguments
	if len(sys.argv) < 8:
		usage()
	else:
		folder = sys.argv[1]
		parsets = shared.toInt(sys.argv[2])
		image_name = sys.argv[3]
		feature = sys.argv[4]
		ofolder = sys.argv[5]
		post_width = shared.toInt(sys.argv[6])
		excel_name = sys.argv[7]
		
	num_mutants = 6
	index = 0
	mutants = ["wildtype", "delta", "her1", "her7", "her7her13", "her13"]
	markers = ['o', '^', 's', '*', 'h', 'D']
	colors = ['k', 'b', 'g', 'r', 'c', 'm']
	

	features = []
	if (feature == "period" or feature == "amplitude"):
		features.append(feature)
	else:
		features.append("period")
		features.append("amplitude")
	
	for feat in features:
		# Create excel file in which the data used to create the plots will be stored
		excel_file = shared.openFile(ofolder + "/" + excel_name + "-" + feat + ".csv", "w")
		print "Plotting ", feat, "..."
		first_avg = 0
		num_first = 0
	
		for index in range(num_mutants):
			mutant = mutants[index]
			print '    Running ' + mutant + '...'
			marker = markers[index]
			color = colors[index]
			# open the input file
			f = shared.openFile(folder + "/" + mutant + "/set_0_" + feat + "_mh1.feats", "r")

			# split the lines to get data
			data = [line.split(",") for line in f]
	
			# calculate the tissue size
			height = shared.toInt(data[0][0])
			width = shared.toInt(data[0][1])
			xmin = 0
			xmax = 0.9 * width

			buckets = 9 # split the interval into 9 chunks
			chunk = (width - post_width) / (buckets - 1) # the width of the intervals after the posterior
				
			indexes = [0 for i in range(buckets)]
			for bucket in range(buckets):
				if bucket == 0:
					indexes[bucket] = post_width / 2
				else:
					indexes[bucket] = (post_width + (bucket - 1) * chunk) + (chunk / 2.0)
			averages = [0 for i in range(buckets)]
			num_points = [0 for i in range(buckets)]
			stderr = [0 for i in range(buckets)]

			if mutant == "wildtype":
				excel_file.write("mutant,")
				for index in indexes:
					excel_file.write(str(index) + ",")
				excel_file.write("\n")
			
				print '        Averaging the first bucket for the wildtype...' # all other data points will be averaged to this value
				for parset in range(parsets):
					# open the input file and ensure the directory exists
					f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_" + feat + "_mh1.feats", "r")
	
					# split the lines to get data
					data = [line.split(",") for line in f]
					lines = len(data)
		
					for line in range(1, lines, 2):
						for col in range(len(data[line]) - 1):
							pos = shared.toInt(data[line][col])
							val = shared.toFlo(data[line + 1][col])
							if pos < post_width:
								first_avg += val
								num_first += 1
					
				first_avg /= num_first
	
			for parset in range(parsets):
				print '        Normalizing and analyzing data from set ' + str(parset) + '...'
				# open the input file and ensure the directory exists
				f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_" + feat + "_mh1.feats", "r")
	
				# split the lines to get data
				data = [line.split(",") for line in f]
				lines = len(data)
		
				for line in range(1, lines, 2):
					for col in range(len(data[line]) - 1):
						pos = shared.toInt(data[line][col])
						val = shared.toFlo(data[line + 1][col]) / first_avg

						if pos < post_width:
							averages[0] += val
							num_points[0] += 1
						else:
							averages[(pos - post_width) / chunk + 1] += val
							num_points[(pos - post_width) / chunk + 1] += 1
		
			# ignore the buckets which don't have data
			buckets_with_data = buckets	

			for bucket in range(buckets):
				if post_width + ((bucket - 1) * chunk) + chunk - 1 > (0.9 * width):
					buckets_with_data -= 1
				else:
					if num_points[bucket] > 0:
						averages[bucket] /= num_points[bucket]
					elif feat == "amplitude":
						averages[bucket] = 0
					else:
						buckets_with_data -= 1

			buckets = buckets_with_data

			print '        Calculating standard error...'
			for parset in range(parsets):
				f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_" + feat + "_mh1.feats", "r")
		
				data = [line.split(",") for line in f]
				lines = len(data)
		
				for line in range(1, lines, 2):
					for col in range(len(data[line]) - 1):
						pos = shared.toInt(data[line][col])
						val = shared.toFlo(data[line + 1][col]) / first_avg

						if pos < post_width:
							stderr[0] += (val - averages[0]) ** 2
						else:
							stderr[(pos - post_width) / chunk + 1] += (val - averages[(pos - post_width) / chunk + 1]) ** 2
			
			for bucket in range(buckets):
				if (num_points[bucket] > 0):
					stderr[bucket] = math.sqrt(stderr[bucket] / num_points[bucket])
					stderr[bucket] /= math.sqrt(num_points[bucket])
				else:
					stderr[bucket] = 0

			indexes = indexes[:buckets]
			averages = averages[:buckets]
			stderr = stderr[:buckets]
			# Print the means and standard deviations to the excel_file
			excel_file.write(mutant + ",")
			for average in averages:
				excel_file.write(str(average) + ",")
			excel_file.write("\n,")
			for stder in stderr:
				excel_file.write(str(stder) + ",")
			excel_file.write("\n")
			
			plt.errorbar(indexes, averages, stderr, fmt='ro', linestyle='-', marker=marker, color=color, label=mutant)
		plt.legend(prop={'size':8}, loc=2)
		pylab.xlim([xmin, xmax])
		excel_file.close()
		plt.savefig(ofolder + "/" + image_name + "_" + feat + ".png", format = "png")
		plt.close()
		print "Done. Your " + feat + " plot is stored in " + ofolder + "/" + image_name + "_" + feat + ".png"
		print "The data behind the plot can be found in " + ofolder + "/" + excel_name + "-" + feat + ".csv"
def main():
	# check the given arguments
	if len(sys.argv) < 6:
		usage()
	else:
		folder = sys.argv[1]
		parsets = shared.toInt(sys.argv[2])
		ofolder = sys.argv[3]
		image_name = sys.argv[4]
		excel_name = sys.argv[5]

	mutants = ["wildtype", "delta", "her1", "her7", "her7her13", "her13"]
	markers = ['o', '^', 's', '*', 'h', 'D']
	colors = ['k', 'b', 'g', 'r', 'c', 'm']
	num_mutants = 6
	
	# Create excel file in which the data used to create the plots will be stored
	excel_file = shared.openFile(ofolder + "/" + excel_name + "-sync.csv", "w")
	
	for index in range(num_mutants):

		mutant = mutants[index]
		marker = markers[index]
		color = colors[index]
			
		# open the first file to get the height, width and interval
		f = shared.openFile(folder + "/" + mutant + "/set_0_sync_mh1.feats", "r")

		# split the lines to get data
		data = [line.split(",") for line in f]
	
		# calculate the tissue size
		height = shared.toInt(data[0][0])
		interval = shared.toFlo(data[0][1])
		#split_time = shared.toFlo(data[0][2])
		width = len(data[1]) - 1

		indexes = [0 for i in range(width)]
		averages = [0 for i in range(width)]
		stderr = [0 for i in range(width)]
		
		for parset in range(parsets):
			f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_sync_mh1.feats", "r")
	
			# split the lines to get data
			data = [line.split(",") for line in f]
		
			for col in range(width):
				for line in range(1, height + 1):
					averages[col] += shared.toFlo(data[line][col])
		
			f.close()
				
		for col in range(width):
			indexes[col] = (((interval / 2) * col + (interval / 2) * col + interval) / 2) / 6
			averages[col] /= height * parsets
			
		if mutant == "wildtype":
			excel_file.write("mutant,")
			for index in indexes:
				excel_file.write(str(index) + ",")
			excel_file.write("\n")

		for parset in range(parsets):
			f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_sync_mh1.feats", "r")
		
			data = [line.split(",") for line in f]
		
			# std error = std deviation / sqrt(num data points)
			for col in range(width):
				for line in range(1, height + 1):
					stderr[col] += (shared.toFlo(data[line][col]) - averages[col]) ** 2
				stderr[col] = math.sqrt(stderr[col] / (height * parsets))
				stderr[col] /= math.sqrt(height * parsets)	

		# Print the means and standard deviations to the excel_file
		excel_file.write(mutant + ",")
		for average in averages:
			excel_file.write(str(average) + ",")
		excel_file.write("\n,")
		for stder in stderr:
			excel_file.write(str(stder) + ",")
		excel_file.write("\n")

		plt.errorbar(indexes, averages, stderr, fmt='ro', linestyle='-', marker=marker, color=color, label=mutant)
	plt.legend(prop={'size':8}, loc=3)
	pylab.xlim([0, (width + 1) * (interval / 2) / 6])
	plt.savefig(ofolder + "/" + image_name + ".png", format = "png")
	plt.close()