# Set up options usage = """usage: %prog [options] output_file data_file """ parser = OptionParser(usage) ## fetch the args (options, args) = parser.parse_args() ## parameter error if len(args) < 2: parser.error("incorrect number of arguments") output_filename = args[0] input_filename = args[1] data = rif.read_intermediate_datafile(input_filename) ## get means control_means = [] treatment_means = [] control_stds = [] treatment_stds = [] for task_name in data[ 'Mean_Median_Task_Oscillation_Amplitude_By_Task__Control'].keys(): control_mean = float( data[ 'Mean_Median_Task_Oscillation_Amplitude_By_Task__Control'][task_name][0] ) treatment_mean = float( data[ 'Mean_Median_Task_Oscillation_Amplitude_By_Task__Treatment'][task_name][0] ) control_means.append( control_mean )
import read_intermediate_files as rif data_structure = rif.read_intermediate_datafile('intermediate_file_format.txt') print str(data_structure) rif.output_to_intermediate_format(data_structure)
usage = """usage: %prog [options] output_file control_data_file treatment_data_file """ parser = OptionParser(usage) ## fetch the args (options, args) = parser.parse_args() ## parameter error if len(args) < 3: parser.error("incorrect number of arguments") output_filename = args[0] control_filename = args[1] treatment_filename = args[2] control_data = rif.read_intermediate_datafile(control_filename) treatment_data = rif.read_intermediate_datafile(treatment_filename) def calculate_means(data): ## calculate means across replicates #gather the values tasks = {} for task_and_replicate in data[ 'Task_Oscillation_Amplitudes_By_Sample_And_Task'].keys(): taskname = task_and_replicate.split('_')[0] if not taskname in tasks: tasks[taskname] = []
usage = """usage: %prog [options] control_amplitudes.txt treatment_amplitudes.txt """ #Permitted types for outfile are png, pdf, ps, eps, and svg""" parser = OptionParser(usage) ## fetch the args (options, args) = parser.parse_args() ## parameter error if len(args) < 2: parser.error("incorrect number of arguments") control_filename = args[0] treatment_filename = args[1] control_data = rif.read_intermediate_datafile(control_filename)['Median_Task_Oscillation_Amplitudes_By_Task'] ## there's only one thing treatment_data = rif.read_intermediate_datafile(treatment_filename)['Median_Task_Oscillation_Amplitudes_By_Task'] ## there's only one thing print 'Mean_Median_Task_Oscillation_Amplitude_By_Task__Control' print "Task_Name,mean,median,std,ste,variance" for task_name in control_data.keys(): control_medians = control_data[task_name] control_medians = [ float(val) for val in control_medians ] median = np.median ( control_medians ) mean = np.mean ( control_medians ) std = np.std ( control_medians ) ste = std / math.sqrt( len ( control_medians ) ) variance = np.var ( control_medians ) print task_name + "," + str(mean) + "," + str(median) + "," + str(std) + "," + str(ste) + "," + str(variance)
filenamebits = file.split('_') ## interpret the filename, which should be in the following format: 33_Andn_Backbone__mann_whitney_u_stats__control_vs_punish_xor ## 1 11 12 backbone_task = translation[filenamebits[1]] fluctuating_task = translation[ filenamebits[12]] # should be the same as [-1] punish_or_nopunish = filenamebits[11] if not backbone_task in data_structure.keys(): data_structure[backbone_task] = {} ## keyed by the fluctuating task if not fluctuating_task in data_structure[backbone_task].keys(): data_structure[backbone_task][fluctuating_task] = { } ## keyed by the punish/nopunish data = rif.read_intermediate_datafile(file) data_structure[backbone_task][fluctuating_task][punish_or_nopunish] = data means = {} pvalues = {} for backbone_task in data_structure.keys(): ## loop through the backbone tasks means[backbone_task] = {} pvalues[backbone_task] = {} for fluctuating_task in data_structure[backbone_task].keys(): for punish_or_nopunish in data_structure[backbone_task][ fluctuating_task].keys(): if len(data_structure[backbone_task][fluctuating_task] [punish_or_nopunish].keys()) > 0:
usage = """usage: %prog [options] output_file control_data_file treatment_data_file """ parser = OptionParser(usage) ## fetch the args (options, args) = parser.parse_args() ## parameter error if len(args) < 3: parser.error("incorrect number of arguments") output_filename = args[0] control_filename = args[1] treatment_filename = args[2] control_data = rif.read_intermediate_datafile(control_filename) treatment_data = rif.read_intermediate_datafile(treatment_filename) def calculate_means( data ): ## calculate means across replicates #gather the values tasks = {} for task_and_replicate in data['Task_Oscillation_Amplitudes_By_Sample_And_Task'].keys(): taskname = task_and_replicate.split('_')[0] if not taskname in tasks: tasks[taskname] = [] tasks[taskname].append( data['Task_Oscillation_Amplitudes_By_Sample_And_Task'][ task_and_replicate ] )
# Set up options usage = """usage: %prog [options] output_file data_file """ parser = OptionParser(usage) ## fetch the args (options, args) = parser.parse_args() ## parameter error if len(args) < 2: parser.error("incorrect number of arguments") output_filename = args[0] input_filename = args[1] data = rif.read_intermediate_datafile(input_filename) ## get means control_means = [] treatment_means = [] control_stds = [] treatment_stds = [] for task_name in data[ 'Mean_Median_Task_Oscillation_Amplitude_By_Task__Control'].keys(): control_mean = float( data['Mean_Median_Task_Oscillation_Amplitude_By_Task__Control'] [task_name][0]) treatment_mean = float(
import read_intermediate_files as rif data_structure = rif.read_intermediate_datafile('intermediate_file_format.txt') print str(data_structure) rif.output_to_intermediate_format( data_structure )
continue filenamebits = file.split('_') ## interpret the filename, which should be in the following format: 33_Andn_Backbone__mann_whitney_u_stats__control_vs_punish_xor ## 1 11 12 backbone_task = translation[ filenamebits[1] ] fluctuating_task = translation[ filenamebits[12] ] # should be the same as [-1] punish_or_nopunish = filenamebits[11] if not backbone_task in data_structure.keys(): data_structure[backbone_task] = {} ## keyed by the fluctuating task if not fluctuating_task in data_structure[backbone_task].keys(): data_structure[backbone_task][fluctuating_task] = {} ## keyed by the punish/nopunish data = rif.read_intermediate_datafile(file) data_structure[backbone_task][fluctuating_task][ punish_or_nopunish ] = data means = {} pvalues = {} for backbone_task in data_structure.keys(): ## loop through the backbone tasks means[ backbone_task ] = {} pvalues[ backbone_task ] = {} for fluctuating_task in data_structure[ backbone_task ].keys(): for punish_or_nopunish in data_structure[ backbone_task ][ fluctuating_task ].keys(): if len(data_structure[ backbone_task ][ fluctuating_task ][ punish_or_nopunish ].keys()) > 0: