def get_target_muscle_cell_data(analysis_var, analysis_start_time, sim_time, cell_ref, targets): # Based on: https://github.com/openworm/muscle_model/blob/master/pyramidal_implementation/data_analysis.py data_fname = "tune/redacted_data.txt" data_dt = 0.0002 #load the voltage: file = open(data_fname) #make voltage into a numpy array in mV: v = [float(i) * 1000 for i in file.readlines()] volts = {cell_ref: v} times = [] for i in range(len(v)): times.append(float(data_dt * i * 1000)) analysis_i = analysis.NetworkAnalysis(volts, times, analysis_var, start_analysis=analysis_start_time, smooth_data=True, show_smoothed_data=False, smoothing_window_len=33) target_data = analysis_i.analyse(targets) return target_data, volts, times
def test_net_analysis_data(self): print("- test_net_analysis_data()") analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': 0 } times, volts0 = self.get_real_data() volts = {'v': volts0} max_min_methods = [analysis.max_min_simple] for max_min_method in max_min_methods: print('-------------- Analysing with: %s in NetworkAnalysis' % max_min_method) analysis_data = analysis.NetworkAnalysis(volts, times, analysis_var, start_analysis=0, end_analysis=1000, smooth_data=False, show_smoothed_data=False) analysed = analysis_data.analyse(extra_targets=[ 'v:value_50', 'v:value_200', 'v:average_200_201', 'v:average_100_200' ]) pp.pprint(analysed) test_data = \ {'v:average_100_200': -55.34134969170771, 'v:average_200_201': -73.48867227722772, 'v:average_last_1percent': -63.559740088571395, 'v:average_maximum': 20.332122777777784, 'v:average_minimum': -78.491198000000011, 'v:first_spike_time': 108.44, 'v:interspike_time_covar': 0.019741062134352557, 'v:max_peak_no': 18, 'v:maximum': 23.789272, 'v:mean_spike_frequency': 34.545824019508231, 'v:min_peak_no': 17, 'v:minimum': -79.47941999999999, 'v:peak_decay_exponent': -0.064912249086890028, 'v:peak_linear_gradient': -0.0020092762353973994, 'v:spike_broadening': 1.0495985656104889, 'v:spike_frequency_adaptation': 0.015301587514290829, 'v:spike_width_adaptation': 0.0078514736435321194, 'v:trough_decay_exponent': 0.0043242589967644922, 'v:trough_phase_adaptation': 0.010484189508080874, 'v:value_200': -75.04318, 'v:value_50': -62.48935} for key in analysed.keys(): self.assertAlmostEqual(analysed[key], test_data[key])
def evaluate(self, candidates, args): print("\n>>>>> Evaluating: ") for cand in candidates: print(">>>>> %s" % cand) simulations_data = self.controller.run(candidates, self.parameters) fitness = [] for data in simulations_data: times = data[0] volts = data[1] data_analysis = analysis.NetworkAnalysis( volts, times, self.analysis_var, start_analysis=self.analysis_start_time, end_analysis=self.analysis_end_time) data_analysis.analyse(self.targets) fitness_value = self.evaluate_fitness( data_analysis, self.targets, self.weights, cost_function=normalised_cost_function) fitness.append(fitness_value) print('Fitness: %s\n' % fitness_value) return fitness
sim_var, show=True) print("Have run individual instance...") peak_threshold = 0 analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': peak_threshold } example_run_analysis = analysis.NetworkAnalysis( example_run_v, example_run_t, analysis_var, start_analysis=0, end_analysis=sim_time) analysis = example_run_analysis.analyse() pp.pprint(analysis) analysis = example_run_analysis.analyse(weights0.keys()) pp.pprint(analysis) elif '-osc' in sys.argv: parameters = [ 'neuron_to_muscle_chem_exc_syn_gbase', 'chem_exc_syn_decay',
def _run_all(self): import pp ppservers = () job_server = pp.Server(self.num_parallel_runs, ppservers=ppservers) print_v('\n == Running %i jobs across %i local processes\n ' % (self.total_todo, job_server.get_ncpus())) jobs = [] job_refs = {} submitted = 0 for ref in self.report['Simulations']: report_here = self.report['Simulations'][ref] params = report_here['parameters'] print_v("---- Submitting %s: %s" % (ref, params)) job_dir = os.path.join(self.result_dir, ref) os.mkdir(job_dir) vars = (self.runner, submitted + 1, self.total_todo, job_dir, params) job = job_server.submit( run_instance, args=vars, modules=('pyneuroml.pynml', 'shutil', 'neuroml', 'neuromllite', 'neuromllite.sweep.ParameterSweep', 'neuromllite.utils')) jobs.append(job) job_refs[len(jobs) - 1] = ref submitted += 1 print_v("Submitted all jobs: %s" % job_refs) for job_i in range(len(jobs)): job = jobs[job_i] ref = job_refs[job_i] report_here = self.report['Simulations'][ref] report_here['analysis'] = OrderedDict() params = report_here['parameters'] print_v("Checking parallel job %i/%i (%s)" % (job_i, len(jobs), ref)) traces, events = job() self.last_network_ran = None if len(traces) > 0: times = [t * 1000. for t in traces['t']] volts = OrderedDict() for tr in traces: if tr.endswith('/v'): volts[tr] = [v * 1000. for v in traces[tr]] if tr.endswith('/r'): volts[tr] = [r for r in traces[tr]] print_v("Analysing %s..." % traces.keys()) analysis_data = analysis.NetworkAnalysis( volts, times, self.analysis_var, start_analysis=0, end_analysis=times[-1], smooth_data=False, show_smoothed_data=False, verbose=self.verbose) analysed = analysis_data.analyse() for a in sorted(analysed.keys()): ref0, var = a.split(':') if not ref0 in report_here['analysis']: report_here['analysis'][ref0] = OrderedDict() report_here['analysis'][ref0][var] = analysed[a] for e in sorted(events.keys()): x = events[e] print_v('Examining event %s: %s -> %s (len: %i)' % (e, x[0] if len(x) > 0 else '-', x[-1] if len(x) > 0 else '-', len(x))) ref0 = '%s/spike' % e analysed = OrderedDict() l = len(x) tmax_si = self._get_sim_duration_ms(params) / 1000. f_hz = l / tmax_si #print_v('This has %s points in %s sec, so %s Hz'%(l,tmax_si, f_hz)) analysed["mean_spike_frequency"] = f_hz if not ref0 in report_here['analysis']: report_here['analysis'][ref0] = OrderedDict() report_here['analysis'][ref0] = analysed if self.plot_all or self.heatmap_all: for y in traces.keys(): if y != 't': pop_id = y.split('[')[0] if '[' in y else y.split( '/')[0] pop = None # self.last_network_ran.get_child(pop_id, 'populations') if pop: color = [ float(c) for c in pop.properties['color'].split() ] #print_v("This trace %s has population %s: %s, so color: %s"%(y,pop_id,pop,color)) else: #print_v("This trace %s has population %s: %s which has no color..."%(y,pop_id,pop)) color = [1, 0, 0] if self.plot_all: label = '%s (%s)' % (y, params) self.ax.plot([t * 1000 for t in traces['t']], [v * 1000 for v in traces[y]], label=label) if self.heatmap_all: dt = self.sim.dt if not 'dt' in params else params[ 'dt'] downscale = int(0.1 / dt) d = [ traces[y][i] * 1000 for i in range(len(traces[y])) if i % downscale == 0 ] tt = [ traces['t'][i] * 1000 for i in range(len(traces['t'])) if i % downscale == 0 ] param_name = self.vary.keys()[0] pval = get_value_in_si(params[param_name]) if self.hm_x == None: self.hm_x = tt print_v( ' == Trace %s (%s) downscaled by factor %i from %i to %i points for heatmap; y value: %s=%s' % (y, ref, downscale, len( traces[y]), len(d), param_name, pval)) self.hm_y.append(pval) self.hm_z.append(d) print_v("Finished checking parallel job %i/%i (%s)" % (job_i, len(jobs), ref)) job_server.print_stats() job_server.destroy() print_v("-------------------------------------------")
def simple_network_analysis( volts, times, analysis_var=None, start_analysis=0, end_analysis=None, plot=False, show_plot_already=True, targets=None, extra_targets=None, verbose=False, ): """ A utility function to quickly carry out a simple network analysis (IClampAnalysis). :param v: time-dependent variable (usually voltage) :type v: iterable :param t: time-vector :type t: iterable :param analysis_var: dictionary containing parameters to be used in analysis such as delta for peak detection :type analysis_var: dict :param start_analysis: time t where analysis is to start :type start_analysis: float :param end_analysis: time in t where analysis is to end :type end_analysis: float :returns: dictionary of analysis results """ if analysis_var is None: analysis_var = { "peak_delta": 0, "baseline": 0, "dvdt_threshold": 0, "peak_threshold": 0, } analysed = analysis.NetworkAnalysis( volts, times, analysis_var, start_analysis=start_analysis, end_analysis=end_analysis if end_analysis is not None else times[-1], smooth_data=False, show_smoothed_data=False, verbose=verbose, ) analysed.analyse(targets=targets, extra_targets=extra_targets) analysis.print_comment_v(pp.pformat(analysed.analysis_results)) if plot: fig = pylab.figure() fig.canvas.set_window_title( "Data analysed (%i traces at %i time points): %s" % (len(volts.keys()), len(times), volts.keys())) pylab.xlabel("Time (ms)") pylab.ylabel("Voltage (mV)") pylab.grid("on") for vk in volts.keys(): vs = volts[vk] maxmin = analysed.max_min_dictionaries[vk] pre = "%s:" % vk if analysed.analysis_results: if pre + "average_maximum" in analysed.analysis_results: _add_horizontal_line( analysed.analysis_results[pre + "average_maximum"], times) if pre + "maximum" in analysed.analysis_results: _add_horizontal_line( analysed.analysis_results[pre + "maximum"], times) if pre + "average_minimum" in analysed.analysis_results: _add_horizontal_line( analysed.analysis_results[pre + "average_minimum"], times) if pre + "minimum" in analysed.analysis_results: _add_horizontal_line( analysed.analysis_results[pre + "minimum"], times) if maxmin: for i in range(len(maxmin["maxima_times"])): pylab.plot(maxmin["maxima_times"][i], maxmin["maxima_values"][i], "ro") for i in range(len(maxmin["minima_times"])): pylab.plot(maxmin["minima_times"][i], maxmin["minima_values"][i], "go") pylab.plot(times, vs) if show_plot_already: pylab.show() return analysed.analysis_results
def run_optimisation(prefix, config, level, parameters, max_constraints, min_constraints, weights, target_data, sim_time=500, dt=0.05, analysis_start_time=0, population_size=20, max_evaluations=20, num_selected=10, num_offspring=20, mutation_rate=0.5, num_elites=1, seed=12345, simulator='jNeuroML', nogui=False): ref = prefix + config run_dir = "NT_%s_%s" % (ref, time.ctime().replace(' ', '_').replace( ':', '.')) os.mkdir(run_dir) my_controller = C302Controller(ref, level, config, sim_time, dt, simulator=simulator, generate_dir=run_dir) peak_threshold = -31 if level is 'A' or level is 'B' else ( -20 if level is 'C1' else 0) analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': peak_threshold } data = ref + '.dat' sim_var = OrderedDict() for i in range(len(parameters)): sim_var[ parameters[i]] = max_constraints[i] / 2 + min_constraints[i] / 2 #make an evaluator, using automatic target evaluation: my_evaluator = evaluators.NetworkEvaluator( controller=my_controller, analysis_start_time=analysis_start_time, analysis_end_time=sim_time, parameters=parameters, analysis_var=analysis_var, weights=weights, targets=target_data) #make an optimizer my_optimizer = optimizers.CustomOptimizerA(max_constraints, min_constraints, my_evaluator, population_size=population_size, max_evaluations=max_evaluations, num_selected=num_selected, num_offspring=num_offspring, num_elites=num_elites, mutation_rate=mutation_rate, seeds=None, verbose=False) start = time.time() #run the optimizer best_candidate, fitness = my_optimizer.optimize(do_plot=False, seed=seed, summary_dir=run_dir) secs = time.time() - start reportj = {} info = "Ran %s evaluations (pop: %s) in %f seconds (%f mins)\n\n" % ( max_evaluations, population_size, secs, secs / 60.0) report = "----------------------------------------------------\n\n" + info reportj['comment'] = info reportj['time'] = secs for key, value in zip(parameters, best_candidate): sim_var[key] = value best_candidate_t, best_candidate_v = my_controller.run_individual( sim_var, show=False) best_candidate_analysis = analysis.NetworkAnalysis( best_candidate_v, best_candidate_t, analysis_var, start_analysis=analysis_start_time, end_analysis=sim_time) best_cand_analysis_full = best_candidate_analysis.analyse() best_cand_analysis = best_candidate_analysis.analyse(weights.keys()) report += "---------- Best candidate ------------------------------------------\n" report += pp.pformat(best_cand_analysis_full) + "\n" report += pp.pformat(best_cand_analysis) + "\n\n" report += "FITNESS: %f\n\n" % fitness print(report) reportj['fitness'] = fitness reportj['best_cand_analysis_full'] = best_cand_analysis_full reportj['best_cand_analysis'] = best_cand_analysis reportj['parameters'] = parameters reportj['analysis_var'] = analysis_var reportj['target_data'] = target_data reportj['weights'] = weights reportj['analysis_start_time'] = analysis_start_time reportj['population_size'] = population_size reportj['max_evaluations'] = max_evaluations reportj['num_selected'] = num_selected reportj['num_offspring'] = num_offspring reportj['mutation_rate'] = mutation_rate reportj['num_elites'] = num_elites reportj['sim_time'] = sim_time reportj['dt'] = dt report_file = open("%s/report.json" % run_dir, 'w') report_file.write(pp.pformat(reportj)) report_file.close() plot_file = open("%s/plotgens.py" % run_dir, 'w') plot_file.write( 'from neurotune.utils import plot_generation_evolution\nimport os\n') plot_file.write('\n') plot_file.write('parameters = %s\n' % parameters) plot_file.write('\n') plot_file.write( "curr_dir = os.path.dirname(__file__) if len(os.path.dirname(__file__))>0 else '.'\n" ) plot_file.write( "plot_generation_evolution(parameters, individuals_file_name = '%s/ga_individuals.csv'%curr_dir)\n" ) plot_file.close() if not nogui: added = [] for wref in weights.keys(): ref = wref.split(':')[0] if not ref in added: added.append(ref) best_candidate_plot = plt.plot(best_candidate_t, best_candidate_v[ref], label="%s - %i evaluations" % (ref, max_evaluations)) plt.legend() plt.ylim(-80.0, 80.0) plt.xlim(0.0, sim_time) plt.title("Models") plt.xlabel("Time (ms)") plt.ylabel("Membrane potential(mV)") plt.show() utils.plot_generation_evolution( sim_var.keys(), individuals_file_name='%s/ga_individuals.csv' % run_dir)
def _run_optimisation(a): if isinstance(a.parameters, str): a.parameters = parse_list_arg(a.parameters) if isinstance(a.min_constraints, str): a.min_constraints = parse_list_arg(a.min_constraints) if isinstance(a.max_constraints, str): a.max_constraints = parse_list_arg(a.max_constraints) if isinstance(a.target_data, str): a.target_data = parse_dict_arg(a.target_data) if isinstance(a.weights, str): a.weights = parse_dict_arg(a.weights) if isinstance(a.known_target_values, str): a.known_target_values = parse_dict_arg(a.known_target_values) if isinstance(a.extra_report_info, str): a.extra_report_info = parse_dict_arg(a.extra_report_info) pynml.print_comment_v( "=====================================================================================" ) pynml.print_comment_v("Starting run_optimisation with: ") keys = sorted(a.__dict__.keys()) for key in keys: value = a.__dict__[key] pynml.print_comment_v(" %s = %s%s" % (key, ' ' * (30 - len(key)), value)) pynml.print_comment_v( "=====================================================================================" ) if a.dry_run: pynml.print_comment_v("Dry run; not running optimization...") return ref = a.prefix run_dir = "NT_%s_%s" % (ref, time.ctime().replace(' ', '_').replace( ':', '.')) os.mkdir(run_dir) my_controller = NeuroMLController( ref, a.neuroml_file, a.target, a.sim_time, a.dt, simulator=a.simulator, generate_dir=run_dir, num_parallel_evaluations=a.num_parallel_evaluations, cleanup=a.cleanup) peak_threshold = 0 analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': peak_threshold } sim_var = OrderedDict() #make an evaluator, using automatic target evaluation: my_evaluator = evaluators.NetworkEvaluator( controller=my_controller, analysis_start_time=a.analysis_start_time, analysis_end_time=a.sim_time, parameters=a.parameters, analysis_var=analysis_var, weights=a.weights, targets=a.target_data) #make an optimizer my_optimizer = optimizers.CustomOptimizerA( a.max_constraints, a.min_constraints, my_evaluator, population_size=a.population_size, max_evaluations=a.max_evaluations, num_selected=a.num_selected, num_offspring=a.num_offspring, num_elites=a.num_elites, mutation_rate=a.mutation_rate, seeds=None, verbose=a.verbose) start = time.time() #run the optimizer best_candidate, fitness = my_optimizer.optimize(do_plot=False, seed=a.seed, summary_dir=run_dir) secs = time.time() - start reportj = {} info = "Ran %s evaluations (pop: %s) in %f seconds (%f mins total; %fs per eval)\n\n" % ( a.max_evaluations, a.population_size, secs, secs / 60.0, (secs / a.max_evaluations)) report = "----------------------------------------------------\n\n" + info reportj['comment'] = info reportj['time'] = secs for key, value in zip(a.parameters, best_candidate): sim_var[key] = value best_candidate_t, best_candidate_v = my_controller.run_individual( sim_var, show=False, cleanup=False) best_candidate_analysis = analysis.NetworkAnalysis( best_candidate_v, best_candidate_t, analysis_var, start_analysis=a.analysis_start_time, end_analysis=a.sim_time) best_cand_analysis_full = best_candidate_analysis.analyse() best_cand_analysis = best_candidate_analysis.analyse(a.weights.keys()) report += "---------- Best candidate ------------------------------------------\n" report += pp.pformat(best_cand_analysis_full) + "\n\n" report += "TARGETS: \n" report += pp.pformat(a.target_data) + "\n\n" report += "TUNED VALUES:\n" report += pp.pformat(best_cand_analysis) + "\n\n" report += "FITNESS: %f\n\n" % fitness report += "FITTEST: %s\n\n" % pp.pformat(dict(sim_var)) pynml.print_comment_v(report) reportj['fitness'] = fitness reportj['fittest vars'] = dict(sim_var) reportj['best_cand_analysis_full'] = best_cand_analysis_full reportj['best_cand_analysis'] = best_cand_analysis reportj['parameters'] = a.parameters reportj['analysis_var'] = analysis_var reportj['target_data'] = a.target_data reportj['weights'] = a.weights reportj['analysis_start_time'] = a.analysis_start_time reportj['population_size'] = a.population_size reportj['max_evaluations'] = a.max_evaluations reportj['num_selected'] = a.num_selected reportj['num_offspring'] = a.num_offspring reportj['mutation_rate'] = a.mutation_rate reportj['num_elites'] = a.num_elites reportj['seed'] = a.seed reportj['simulator'] = a.simulator reportj['sim_time'] = a.sim_time reportj['dt'] = a.dt reportj['run_directory'] = run_dir reportj['reference'] = ref if a.extra_report_info: for key in a.extra_report_info: reportj[key] = a.extra_report_info[key] report_file = open("%s/report.json" % run_dir, 'w') report_file.write(pp.pformat(reportj)) report_file.close() plot_file = open("%s/plotgens.py" % run_dir, 'w') plot_file.write( 'from neurotune.utils import plot_generation_evolution\nimport os\n') plot_file.write('\n') plot_file.write('parameters = %s\n' % a.parameters) plot_file.write('\n') plot_file.write( "curr_dir = os.path.dirname(__file__) if len(os.path.dirname(__file__))>0 else '.'\n" ) plot_file.write( "plot_generation_evolution(parameters, individuals_file_name = '%s/ga_individuals.csv'%curr_dir)\n" ) plot_file.close() if not a.nogui: added = [] #print("Plotting saved data from %s which are relevant for targets: %s"%(best_candidate_v.keys(), a.target_data.keys())) fig = plt.figure() fig.canvas.set_window_title( "Simulation of fittest individual from run: %s" % ref) for tref in best_candidate_v.keys( ): ##################a.target_data.keys(): ref = tref.split(':')[0] if not ref in added: added.append(ref) #pynml.print_comment(" - Adding plot of: %s"%ref) plt.plot(best_candidate_t, best_candidate_v[ref], label="%s - %i evaluations" % (ref, a.max_evaluations)) plt.legend() #plt.ylim(-80.0,80.0) plt.xlim(0.0, a.sim_time) plt.title("Models %s" % a.prefix) plt.xlabel("Time (ms)") plt.ylabel("Membrane potential(mV)") utils.plot_generation_evolution( sim_var.keys(), individuals_file_name='%s/ga_individuals.csv' % run_dir, target_values=a.known_target_values, show_plot_already=a.show_plot_already, title_prefix=ref) if a.show_plot_already: plt.show() return reportj
else: cont = NeuroMLController( 'TestHH', '../../examples/test_data/HHCellNetwork.net.nml', 'HHCellNetwork', sim_time, dt, 'jNeuroML', 'temp/') sim_vars = OrderedDict([ ('cell:hhcell/channelDensity:naChans/mS_per_cm2', 100), ('cell:hhcell/channelDensity:kChans/mS_per_cm2', 20) ]) t, v = cont.run_individual(sim_vars, show=(not nogui)) from pyelectro import analysis analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': 0 } data_analysis = analysis.NetworkAnalysis(v, t, analysis_var, start_analysis=0, end_analysis=sim_time) analysed = data_analysis.analyse() pp.pprint(analysed)
seed=12345) secs = time.time() - start print("----------------------------------------------------\n\n" + "Ran %s evaluations (pop: %s) in %f seconds (%f mins)\n" % (max_evaluations, population_size, secs, secs / 60.0)) for key, value in zip(parameters, best_candidate): sim_var[key] = value best_candidate_t, best_candidate_v = my_controller.run_individual( sim_var, show=False) best_candidate_analysis = analysis.NetworkAnalysis( best_candidate_v, best_candidate_t, analysis_var, start_analysis=analysis_start_time, end_analysis=sim_time) best_candidate_analysis.analyse() #data_plot = plt.plot(t,v[cell_ref], label="Original data") best_candidate_plot = plt.plot(best_candidate_t, best_candidate_v[cell_ref], label="Best model - %i evaluations" % max_evaluations) plt.legend() plt.ylim(-80.0, 80.0) plt.xlim(0.0, 1000.0)
times, volts = swc.run_individual(sim_vars, showPlots, False, prefix="Orig: ") analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': 0 } surrogate_analysis = analysis.NetworkAnalysis(volts, times, analysis_var, start_analysis=0, end_analysis=1000) # The output of the analysis will serve as the basis for model optimization: surrogate_targets = surrogate_analysis.analyse() pp = pprint.PrettyPrinter(indent=4) print("Surrogate analysis") pp.pprint(surrogate_targets) weights = { 'wave_2:average_maximum': 1, 'wave_2:average_minimum': 1, 'wave_3:average_maximum': 1, 'wave_3:average_minimum': 1, 'wave_3:mean_spike_frequency': 1
def simple_network_analysis(volts, times, analysis_var=None, start_analysis=0, end_analysis=None, plot=False, show_plot_already=True, targets=None, extra_targets=None, verbose=False): if analysis_var == None: analysis_var = { 'peak_delta': 0, 'baseline': 0, 'dvdt_threshold': 0, 'peak_threshold': 0 } analysed = analysis.NetworkAnalysis( volts, times, analysis_var, start_analysis=start_analysis, end_analysis=end_analysis if end_analysis is not None else times[-1], smooth_data=False, show_smoothed_data=False, verbose=verbose) analysed.analyse(targets=targets, extra_targets=extra_targets) analysis.print_comment_v(pp.pformat(analysed.analysis_results)) if plot: fig = pylab.figure() fig.canvas.set_window_title( "Data analysed (%i traces at %i time points): %s" % (len(volts.keys()), len(times), volts.keys())) pylab.xlabel('Time (ms)') pylab.ylabel('Voltage (mV)') pylab.grid('on') for vk in volts.keys(): vs = volts[vk] maxmin = analysed.max_min_dictionaries[vk] pre = '%s:' % vk if analysed.analysis_results: if analysed.analysis_results.has_key(pre + 'average_maximum'): _add_horizontal_line( analysed.analysis_results[pre + 'average_maximum'], times) if analysed.analysis_results.has_key(pre + 'maximum'): _add_horizontal_line( analysed.analysis_results[pre + 'maximum'], times) if analysed.analysis_results.has_key(pre + 'average_minimum'): _add_horizontal_line( analysed.analysis_results[pre + 'average_minimum'], times) if analysed.analysis_results.has_key(pre + 'minimum'): _add_horizontal_line( analysed.analysis_results[pre + 'minimum'], times) if maxmin: for i in range(len(maxmin['maxima_times'])): pylab.plot(maxmin['maxima_times'][i], maxmin['maxima_values'][i], 'ro') for i in range(len(maxmin['minima_times'])): pylab.plot(maxmin['minima_times'][i], maxmin['minima_values'][i], 'go') pylab.plot(times, vs) if show_plot_already: pylab.show() return analysed.analysis_results