def analyse(self,cords,ns=10,absolute=True): for (xindex,yindex) in cords: if absolute==False: xindex = self.center_c + xindex yindex = self.center_r + yindex xcoor,ycoor = self.sheet.matrixidx2sheet(xindex,yindex) print "Starting surround analysis for cell with index coords and sheet coords: [%d,%d] [%f,%f]" % (xindex,yindex,xcoor,ycoor) c= topo.command.pylabplot.measure_size_response.instance(sheet=self.sheet,num_phase=__main__.__dict__.get('NUM_PHASE',8),num_sizes=ns,max_size=__main__.__dict__.get('MAX_SIZE',1.5),coords=[(xcoor,ycoor)],duration=__main__.__dict__.get('duration',4.0)) c.duraton=__main__.__dict__.get('duration',4.0) c(coords=[(xcoor,ycoor)],frequencies=[__main__.__dict__.get('FREQ',2.4)]) self.data_dict[(xindex,yindex)] = {} self.data_dict[(xindex,yindex)]["ST"] = self.calculate_RF_sizes(xindex, yindex) self.plot_size_tunning(xindex,yindex) import pylab #pylab.show() self.data_dict[(xindex,yindex)]["OCT"] = self.perform_orientation_contrast_analysis(self.data_dict[(xindex,yindex)]["ST"],xcoor,ycoor,xindex,yindex) self.plot_orientation_contrast_tuning(xindex,yindex) f = open(normalize_path("dict.dat"),'wb') import pickle pickle.dump((self.OR,self.OS,self.MR,self.data_dict),f) f.close() self.plot_average_oct(independent=True) self.plot_average_size_tuning_curve(independent=True) if False: self.lhi = compute_local_homogeneity_index(self.sheet.sheet_views['OrientationPreference'].view()[0]*pi,2.0) f = open(normalize_path('lhi2.0.pickle'),'wb') pickle.dump(self.lhi,f) f.close() else: f = open(normalize_path('lhi2.0.pickle'),'rb') self.lhi = pickle.load(f) # determine pinwheels and domain centers pinwheels = [] centers = [] for coords in self.data_dict.keys(): if self.lhi[coords] < 0.5: pinwheels.append(coords) if self.lhi[coords] > 0.5: centers.append(coords) self.plot_average_oct(keys=pinwheels,independent=True,string="pinwheels") self.plot_average_oct(keys=centers,independent=True,string="domains") pylab.figure() pylab.imshow(self.lhi) pylab.colorbar() release_fig("LHI") raster_plots_lc,raster_plots_hc = self.plot_map_feature_to_surround_modulation_feature_correlations(self.lhi,"Local Homogeneity Index") self.correlations_figure(raster_plots_lc)
def check_RF_corrleation_vs_connection_weights_correlation(): _check_RF_corrleation_vs_connection_weights_correlation(topo.sim["V1Simple"].projections()["LGNOnAfferent"],topo.sim["V1Simple"].projections()["LGNOnAfferent"],topo.sim["V1Simple"].projections()["L4EtoE"],topo.sim["LGNOn"],topo.sim["V1Simple"],topo.sim["V1Simple"]) pylab.savefig(normalize_path('RF_connectivity_correlation_EE:' + str(topo.sim.time()) + '.png')); _check_RF_corrleation_vs_connection_weights_correlation(topo.sim["V1Simple"].projections()["LGNOnAfferent"],topo.sim["V1SimpleInh"].projections()["LGNOnAfferentInh"],topo.sim["V1SimpleInh"].projections()["L4EtoI"],topo.sim["LGNOn"],topo.sim["V1Simple"],topo.sim["V1SimpleInh"]) pylab.savefig(normalize_path('RF_connectivity_correlation_EI:' + str(topo.sim.time()) + '.png')); _check_RF_corrleation_vs_connection_weights_correlation(topo.sim["V1SimpleInh"].projections()["LGNOnAfferentInh"],topo.sim["V1Simple"].projections()["LGNOnAfferent"],topo.sim["V1Simple"].projections()["L4ItoE"],topo.sim["LGNOn"],topo.sim["V1SimpleInh"],topo.sim["V1Simple"]) pylab.savefig(normalize_path('RF_connectivity_correlation_IE:' + str(topo.sim.time()) + '.png')); _check_RF_corrleation_vs_connection_weights_correlation(topo.sim["V1SimpleInh"].projections()["LGNOnAfferentInh"],topo.sim["V1SimpleInh"].projections()["LGNOnAfferentInh"],topo.sim["V1SimpleInh"].projections()["L4ItoI"],topo.sim["LGNOn"],topo.sim["V1SimpleInh"],topo.sim["V1SimpleInh"]) pylab.savefig(normalize_path('RF_connectivity_correlation_II:' + str(topo.sim.time()) + '.png'));
def measure_histogram(iterations=1000, sheet_name="V1"): import contrib.jacommands topo.sim["V1"].plastic = False topo.sim.state_push() for i in xrange(0, iterations): topo.sim.run(1) contrib.jacommands.collect_activity(sheet_name) topo.sim.state_pop() concat_activities = [] for a in contrib.jacommands.activities: concat_activities = numpy.concatenate((concat_activities, a.flatten()), axis=1) topo.sim["V1"].plastic = True contrib.jacommands.activities = [] pylab.figure() pylab.subplot(111, yscale='log') #pylab.subplot(111) print shape(concat_activities) mu = sum(concat_activities) / len(concat_activities) print mu (bins, a, b) = pylab.hist(concat_activities, (numpy.arange(80.0) / 40.0) , visible=True) pylab.savefig(normalize_path(str(topo.sim.time()) + 'activity_bar_histogram.png')) bins_axis = numpy.arange(79.0) / 40.0 bins = bins * 1.0 / sum(bins) print sum(bins) exponential = numpy.arange(79, dtype='float32') / 40.0 # compute the mean of the actual distribution #mu=0.024 pylab.figure() pylab.subplot(111, yscale='log') print len(bins_axis) print len(bins) print bins_axis print bins print numpy.exp(- (1 / mu) * (exponential+0.025)) print numpy.exp(- (1 / mu) * (exponential)) exponential = - numpy.exp(- (1 / mu) * (exponential+0.025)) + numpy.exp(- (1 / mu) * (exponential)) pylab.plot(bins_axis, bins) pylab.plot(bins_axis, bins, 'ro') pylab.plot(bins_axis, exponential) pylab.plot(bins_axis, exponential, 'go') pylab.axis(ymin=0.0000000001, ymax=100) #pylab.axis("tight") print mean(exponential) print mean(bins) #pylab.show() pylab.savefig(normalize_path(str(topo.sim.time()) + 'activity_histogram.png')) return bins
def specify(self, spec, tid, info): spec_path = os.path.join(param.normalize_path(), info['root_directory'], 'specifications') if not os.path.isdir(spec_path): os.makedirs(spec_path) spec_file_path = os.path.join(spec_path, 'spec-%s' % tid) prefix = [str(tid), param.normalize_path(info['root_directory'])] if self.argorder == []: specstr = ' '.join(prefix+spec.values()) else: specstr = ' '.join(prefix + [spec[k] for k in self.argorder]) with open(spec_file_path,'w') as specfile: specfile.write(specstr)
def run_lhi_informed_analysis(self,max_curves=26,center_size=20,index=None): if True: self.lhi = compute_local_homogeneity_index(self.OR*pi,__main__.__dict__.get('LHI',2.0)) f = open(normalize_path('lhi'+str(__main__.__dict__.get('LHI',2.0))+'.pickle'),'wb') pickle.dump(self.lhi,f) f.close() else: f = open(normalize_path('lhi'+str(__main__.__dict__.get('LHI',2.0))+'.pickle'),'rb') self.lhi = pickle.load(f) lhi_center = self.lhi[self.center_r-center_size:self.center_r+center_size,self.center_c-center_size:self.center_c+center_size] steps = [] r = RandomState(1023) if not __main__.__dict__.get('uniform',False): pinwheels = r.permutation(numpy.nonzero(numpy.ravel(lhi_center) < __main__.__dict__.get('cutoff',0.3))[0]) domains = r.permutation(numpy.nonzero(numpy.ravel(lhi_center) > (1-__main__.__dict__.get('cutoff',0.3)))[0]) assert len(pinwheels) > max_curves/2 #s = numpy.argsort(numpy.ravel(lhi_center)) if index == None: for i in xrange(0,max_curves/2): (x,y) = numpy.unravel_index(pinwheels[i],lhi_center.shape) steps.append((x+self.center_r-center_size,y+self.center_c-center_size)) (x,y) = numpy.unravel_index(domains[i],lhi_center.shape) steps.append((x+self.center_r-center_size,y+self.center_c-center_size)) else: if (index % 2) == 0: (x,y) = numpy.unravel_index(pinwheels[int(index/2)],lhi_center.shape) steps= [(x+self.center_r-center_size,y+self.center_c-center_size)] else: (x,y) = numpy.unravel_index(domains[int(index/2)],lhi_center.shape) steps= [(x+self.center_r-center_size,y+self.center_c-center_size)] else: bins = [] for i in xrange(0,10): a = numpy.ravel(lhi_center) >= i*0.1 b = numpy.ravel(lhi_center) < (i+1)*0.1 bins.append(r.permutation(numpy.nonzero(numpy.multiply(a,b))[0])) (x,y) = numpy.unravel_index(bins[index % 10][int(index/10)],lhi_center.shape) steps= [(x+self.center_r-center_size,y+self.center_c-center_size)] #places = r.permutation(numpy.arange(0,len(numpy.ravel(lhi_center)),1)) #(x,y) = numpy.unravel_index(places[index],lhi_center.shape) #steps.append((x+self.center_r-center_size,y+self.center_c-center_size)) self.analyse(steps,ns=__main__.__dict__.get('number_sizes',10))
def save_snapshot(snapshot_name=None): """ Save a snapshot of the network's current state. The snapshot is saved as a gzip-compressed Python binary pickle. As this function uses Python's 'pickle' module, it is subject to the same limitations (see the pickle module's documentation) - with the notable exception of class attributes. Python does not pickle class attributes, but this function stores class attributes of any Parameterized class that is declared within the topo package. See the param.parameterized.PicklableClassAttributes class for more information. """ if not snapshot_name: snapshot_name = topo.sim.basename() + ".typ" # For now we just search topo, but could do same for other packages. # CEBALERT: shouldn't it be topo and param? I guess we already get # many classes defined in param because they are imported into # topo at some point anyway. topoPOclassattrs = PicklableClassAttributes(topo,exclusions=('plotting','tests','tkgui'), startup_commands=topo.sim.startup_commands) paramPOclassattrs = PicklableClassAttributes(param) imagenPOclassattrs = PicklableClassAttributes(imagen) numbergenPOclassattrs = PicklableClassAttributes(numbergen) from topo.misc.commandline import global_params topo.sim.RELEASE=topo.release topo.sim.VERSION=topo.version to_save = (UnpickleEnvironmentCreator(topo.release,topo.version), PickleMain(), global_params, topoPOclassattrs, paramPOclassattrs, imagenPOclassattrs, numbergenPOclassattrs, topo.sim) try: snapshot_file=gzip.open(normalize_path(snapshot_name),'wb',compresslevel=5) except NameError: snapshot_file=open(normalize_path(snapshot_name),'wb') pickle.dump(to_save,snapshot_file,2) snapshot_file.close()
def configure_launch(self, dval): """ Determine the root directory from the launcher to configure the analysis object appropriately, save the final state and set up the launcher's exit callable. """ (task_launcher, batch_analysis) = dval command_template = task_launcher.command_template if self.reduce_timestamp is not None: return self.reduce_directory(task_launcher) arg_specifier = task_launcher.arg_specifier analysis_arguments = batch_analysis.analysis_arguments specifier_keys = set(arg_specifier.varying_keys() + arg_specifier.constant_keys()) if not (analysis_arguments <= specifier_keys): clashes = analysis_arguments - specifier_keys raise Exception("Analysis keys %s not set in the specifier" % list(clashes)) command_template.analysis_arguments = list(analysis_arguments) root_directory = param.normalize_path(task_launcher.root_directory_name()) batch_analysis.root_directory = root_directory task_launcher.script_path = batch_analysis.script_path batch_analysis.save() if self.skip_reduce or batch_analysis.map_reduces != []: task_launcher.reduction_fn = RunBatchAnalysis.reduce_batch(root_directory) return True
def generate(plotgroup_names): assert topo.sim.name==sim_name assert topo.sim['V1'].nominal_density==8 assert topo.sim.time()==100 for name in plotgroup_names: print "* Generating data for plotgroups['%s']"%name views = {} sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() sheets_views = views[sheet.name] = {} if hasattr(sheet,'sheet_views'): sheets_views['sheet_views'] = sheet.sheet_views if hasattr(sheet,'curve_dict'): sheets_views['curve_dict'] = sheet.curve_dict filename = normalize_path('tests/%s_t%s_%s.data'%(sim_name,topo.sim.timestr(), name.replace(' ','_'))) print "Saving results to %s" % (filename) f = open(filename,'wb') pickle.dump((topo.version,views),f) f.close()
def initialize_clissom_data(name, **kw): out_dir = normalize_path("topo/tests/reference/%s_BaseN%s_BaseRN%s/" % (name, kw['BaseN'], kw['BaseRN'])) global filename_base filename_base = out_dir + "/%s." % name if not os.path.exists(out_dir): os.mkdir(out_dir) clissom_support_files = glob.glob( "topo/tests/reference/support_clissom/*") for f in clissom_support_files: os.system("cp %s %s" % (f, out_dir)) os.system("cp topo/tests/reference/%s.param %s" % (name, out_dir)) cwd = os.getcwd() os.chdir(out_dir) os.system("gzip -dc lissom5.gz > lissom5") os.system("chmod +x %s" % "lissom5") _set_clissom_params(name, out_dir, **kw) print "------------------------------------------------------------" print "Generating c++ lissom results in %s" % out_dir c = _clissomcmd(name) print c os.system(c) print "------------------------------------------------------------" os.chdir(cwd) else: print "Skipping c++ lissom data generation: results already exist in %s" % out_dir
def _setup_launch(self): """ Method to be used by all launchers that prepares the root directory and generate basic launch information for command templates to use. Prepends some information to the description, registers a timestamp and return a dictionary of useful launch information constant across all tasks. """ root_name = self.root_directory_name() self.root_directory = param.normalize_path(root_name) if not os.path.isdir(self.root_directory): os.makedirs(self.root_directory) metrics_dir = os.path.join(self.root_directory, 'metrics') if not os.path.isdir(metrics_dir) and self.arg_specifier.dynamic: os.makedirs(metrics_dir) return { 'root_directory': self.root_directory, 'timestamp': self.timestamp, 'varying_keys': self.arg_specifier.varying_keys(), 'constant_keys': self.arg_specifier.constant_keys(), 'constant_items': self.arg_specifier.constant_items(), 'batch_name': self.batch_name, 'batch_tag': self.tag, 'batch_description': self.description }
def __call__(self): """ Calls the collector specified by the user in the run_batch context. Invoked as an analysis function by RunBatchCommand. """ self.collector.interval_hook = topo.sim.run topo_time = topo.sim.time() filename = '%s%s_%s' % (self._info.batch_name, ('[%s]' % self._info.batch_tag if self._info.batch_tag else ''), topo_time) viewtree = Layout() viewtree = self.collector(viewtree, times=[topo_time]) spec_metadata = [(key, self._info.specs[key]) for key in self.metadata if '.' not in key] path_metadata = [(key, viewtree.items.get(tuple(key.split('.')), float('nan'))) for key in self.metadata if '.' in key] Pickler.save(viewtree, param.normalize_path(filename), key=dict(spec_metadata + path_metadata + [('time',topo_time)]))
def compare_with_and_without_snapshot_NoSnapshot(script="models/lissom.ty",look_at='V1',cortex_density=8,lgn_density=4,retina_density=4,dims=['or','od','dr','cr','dy','sf'],dataset="Gaussian",run_for=10,break_at=5): data_filename=os.path.split(script)[1]+"_PICKLETEST" # we must execute in main because e.g. scheduled events are run in __main__ # CEBALERT: should set global params __main__.__dict__['cortex_density']=cortex_density __main__.__dict__['lgn_density']=lgn_density __main__.__dict__['retina_density']=retina_density __main__.__dict__['dims']=dims __main__.__dict__['dataset']=dataset execfile(script,__main__.__dict__) data = {} topo.sim.run(break_at) data[topo.sim.time()]= copy.deepcopy(topo.sim[look_at].activity) topo.sim.run(run_for-break_at) data[topo.sim.time()]= copy.deepcopy(topo.sim[look_at].activity) data['run_for']=run_for data['break_at']=break_at data['look_at']=look_at data['cortex_density']=cortex_density data['lgn_density']=lgn_density data['retina_density']=retina_density data['dims']=dims data['dataset']=dataset locn = normalize_path(os.path.join("tests",data_filename)) print "Writing pickle to %s"%locn pickle.dump(data,open(locn,'wb'),2)
def generate(plotgroup_names): assert topo.sim.name == sim_name assert topo.sim['V1'].nominal_density == 8 assert topo.sim.time() == 100 for name in plotgroup_names: print "* Generating data for plotgroups['%s']" % name views = {} sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() sheets_views = views[sheet.name] = {} if hasattr(sheet, 'sheet_views'): sheets_views['sheet_views'] = sheet.sheet_views if hasattr(sheet, 'curve_dict'): sheets_views['curve_dict'] = sheet.curve_dict filename = normalize_path( 'tests/%s_t%s_%s.data' % (sim_name, topo.sim.timestr(), name.replace(' ', '_'))) print "Saving results to %s" % (filename) f = open(filename, 'wb') pickle.dump((topo.version, views), f) f.close()
def __call__(self,times=None,**kwargs): if topo.sim.time() != times[-1]: return None for coidx,coord in enumerate(self.unit_list): prefix = str(coord) + "_" unit = topo.sim[self.sheet].sheet2matrixidx(coord[0],coord[1]) topo.command.pylabplot.measure_orientation_contrast(preference_fn=DSF_MaxValue,curve_parameters=self.c_dict, num_phase=self.num_phases,sheet=self.sheet,coords=[coord], contrastcenter=self.center_contrast,num_orientation=self.num_orientation, sizecenter=self.sizecenter,sizesurround=self.sizesurround,thickness=self.thickness, measurement_prefix=prefix) for cidx,contrast in enumerate(self.surround_contrasts): or_keys = sorted(topo.sim[self.sheet].curve_dict[prefix+'orientationsurround']['Contrastsurround = {c}%'.format(c=int(contrast))].keys(),key=lambda x: float(x)) for oidx,orientation in enumerate(or_keys): unit = topo.sim[self.sheet].sheet2matrixidx(coord[0],coord[1]) units = [(unit[0]+x_offset,unit[1]+y_offset) for x_offset in xrange(-1,2) for y_offset in xrange(-1,2)] activity = topo.sim[self.sheet].curve_dict[prefix+'orientationsurround']['Contrastsurround = {c}%'.format(c=int(contrast))][orientation].view()[0] for uidx,unit in enumerate(units): self.CSTC[oidx,uidx,cidx,coidx] = activity[unit[0]][unit[1]] if self.pickle: pkl_file = open(param.normalize_path('CSTC.pkl'), 'wb') pickle.dump((self.CSTC,(or_keys,self.unit_list,self.surround_contrasts,self.sheet,self.unit_list)), pkl_file) pkl_file.close()
def save_script_repr(script_name=None): """ Save the current simulation as a Topographica script. Generates a script that, if run, would generate a simulation with the same architecture as the one currently in memory. This can be useful when defining networks in place, so that the same general configuration can be recreated later. It also helps when comparing two similar networks generated with different scripts, so that the corresponding items can be matched rigorously. Note that the result of this operation is usually just a starting point for further editing, because it will not usually be runnable as-is (for instance, some parameters may not have runnable representations). Even so, this is usually a good start. """ if not script_name: script_name = topo.sim.basename() + "_script_repr.ty" header = ("# Generated by Topographica %s on %s\n\n" % (topo.release,time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) script = header+topo.sim.script_repr() script_file = open(normalize_path(script_name),'w') script_file.write(script)
def initialize_clissom_data(name, **kw): out_dir = normalize_path("topo/tests/reference/%s_BaseN%s_BaseRN%s/" % (name, kw["BaseN"], kw["BaseRN"])) global filename_base filename_base = out_dir + "/%s." % name if not os.path.exists(out_dir): os.mkdir(out_dir) clissom_support_files = glob.glob("topo/tests/reference/support_clissom/*") for f in clissom_support_files: os.system("cp %s %s" % (f, out_dir)) os.system("cp topo/tests/reference/%s.param %s" % (name, out_dir)) cwd = os.getcwd() os.chdir(out_dir) os.system("gzip -dc lissom5.gz > lissom5") os.system("chmod +x %s" % "lissom5") _set_clissom_params(name, out_dir, **kw) print "------------------------------------------------------------" print "Generating c++ lissom results in %s" % out_dir c = _clissomcmd(name) print c os.system(c) print "------------------------------------------------------------" os.chdir(cwd) else: print "Skipping c++ lissom data generation: results already exist in %s" % out_dir
def __call__(self,times=None,**kwargs): # if analysis_group('b'): # return None if self.pickle: or_pkl = open(param.normalize_path('ormap_{time}.pkl'.format(time=topo.sim.time())),'wb') pkl_data = {} for f in self.frequencies: save_plotgroup("Orientation Preference",use_cached_results=False,saver_params={'filename_suffix':'_{0}'.format(f)}, pre_plot_hooks=[measure_sine_pref.instance(frequencies=[f],num_phase=self.num_phase,num_orientation=self.num_orientation, preference_fn=DSF_WeightedAverage(value_scale=(0., 1./math.pi)))]) if self.pickle: for sheet in self.sheets: im = topo.sim[sheet].sheet_views['OrientationPreference'].view()[0][0:-1,0:-1] try: polar_or = image.hue_to_polar(im) pwdata = analysisJL.polarmap_contours(polar_or) pws = analysisJL.identify_pinwheels(*pwdata) pwbitsofinformation = type('pwdata', (), dict(zip(['recontours', 'imcontours', 'intersections', 'pinwheels'] , pwdata+(pws,) ))) pw_results = analysisJL.pinwheel_analysis(im, len(pws), ignore_DC=False) pkl_data['OR_Analysis_{freq}_{sheet}'.format(freq = f,sheet=sheet)] = pw_results except: print "OR Pinwheel Analysis failed" or_sel = topo.sim[sheet].sheet_views['OrientationSelectivity'].view()[0] pkl_data['OrientationPreference_{freq}_{sheet}'.format(freq = f,sheet=sheet)] = im pkl_data['OrientationSelectivity_{freq}_{sheet}'.format(freq = f,sheet=sheet)] = or_sel if self.pickle: pickle.dump(pkl_data,or_pkl)
def _print_vc_info(filename): """Save the version control status of the current code to the specified file.""" try: import subprocess f = open(normalize_path(filename),'w') f.write("Information about working copy used for batch run\n\n") f.write("topo.version=%s\n"% topo.version) f.flush() vctype,commands = _get_vc_commands() for cmd in commands: fullcmd = [vctype,cmd] if isinstance(cmd,str) else [vctype]+cmd # Note that we do not wait for the process below to finish # (by calling e.g. wait() on the Popen object). Although # this was probably done unintentionally, for a slow svn # connection, it's an advantage. But it does mean the # output of each command can appear in the file at any # time (i.e. the command outputs appear in the order of # finishing, rather than in the order of starting, making # it impossible to label the commands). subprocess.Popen(fullcmd,stdout=f,stderr=subprocess.STDOUT) except: print "Unable to retrieve version control information." finally: f.close()
def save_script_repr(script_name=None): """ Save the current simulation as a Topographica script. Generates a script that, if run, would generate a simulation with the same architecture as the one currently in memory. This can be useful when defining networks in place, so that the same general configuration can be recreated later. It also helps when comparing two similar networks generated with different scripts, so that the corresponding items can be matched rigorously. Note that the result of this operation is usually just a starting point for further editing, because it will not usually be runnable as-is (for instance, some parameters may not have runnable representations). Even so, this is usually a good start. """ if not script_name: script_name = topo.sim.basename() + "_script_repr.ty" header = ("# Generated by Topographica %s on %s\n\n" % (topo.release, time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) script = header + topo.sim.script_repr() script_file = open(normalize_path(script_name), 'w') script_file.write(script)
def _print_vc_info(filename): """Save the version control status of the current code to the specified file.""" try: import subprocess f = open(normalize_path(filename), 'w') f.write("Information about working copy used for batch run\n\n") f.write("topo.version=%s\n" % topo.version) f.flush() vctype, commands = _get_vc_commands() for cmd in commands: fullcmd = [vctype, cmd] if isinstance(cmd, str) else [vctype] + cmd # Note that we do not wait for the process below to finish # (by calling e.g. wait() on the Popen object). Although # this was probably done unintentionally, for a slow svn # connection, it's an advantage. But it does mean the # output of each command can appear in the file at any # time (i.e. the command outputs appear in the order of # finishing, rather than in the order of starting, making # it impossible to label the commands). subprocess.Popen(fullcmd, stdout=f, stderr=subprocess.STDOUT) except: print "Unable to retrieve version control information." finally: f.close()
def __call__(self,output_fn,init_time=0,final_time=None,**params): p=ParamOverrides(self,params) if final_time is None: final_time=topo.sim.time() attrs = p.attrib_names if len(p.attrib_names)>0 else output_fn.attrib_names for a in attrs: pylab.figure(figsize=(6,4)) isint=pylab.isinteractive() pylab.ioff() pylab.grid(True) ylabel=p.ylabel pylab.ylabel(a+" "+ylabel) pylab.xlabel('Iteration Number') coords = p.units if len(p.units)>0 else output_fn.units for coord in coords: y_data=[y for (x,y) in output_fn.values[a][coord]] x_data=[x for (x,y) in output_fn.values[a][coord]] if p.raw==True: plot_data=zip(x_data,y_data) pylab.save(normalize_path(p.filename+a+'(%.2f, %.2f)' %(coord[0], coord[1])),plot_data,fmt='%.6f', delimiter=',') pylab.plot(x_data,y_data, label='Unit (%.2f, %.2f)' %(coord[0], coord[1])) (ymin,ymax)=p.ybounds pylab.axis(xmin=init_time,xmax=final_time,ymin=ymin,ymax=ymax) if isint: pylab.ion() pylab.legend(loc=0) p.title=topo.sim.name+': '+a p.filename_suffix=a self._generate_figure(p)
def __call__(self): """ Calls the collector specified by the user in the run_batch context. Invoked as an analysis function by RunBatchCommand. """ self.collector.interval_hook = topo.sim.run topo_time = topo.sim.time() filename = '%s%s_%s' % (self._info.batch_name, ('[%s]' % self._info.batch_tag if self._info.batch_tag else ''), topo_time) viewtree = Layout() viewtree = self.collector(viewtree, times=[topo_time]) spec_metadata = [(key, self._info.specs[key]) for key in self.metadata if '.' not in key] path_metadata = [(key, viewtree.items.get(tuple(key.split('.')), float('nan'))) for key in self.metadata if '.' in key] Pickler.save(viewtree, param.normalize_path(filename), key=dict(spec_metadata + path_metadata + [('time', topo_time)]))
def _analyse_push_pull_connectivity1(sheet_name,proj_name): """ It assumes orientation preference was already measured. """ projection = topo.sim[sheet_name].projections()[proj_name] or_pref = topo.sim[projection.src.name].sheet_views['OrientationPreference'].view()[0]*numpy.pi phase_pref = topo.sim[projection.src.name].sheet_views['PhasePreference'].view()[0]*numpy.pi*2 or_pref_target = topo.sim[projection.dest.name].sheet_views['OrientationPreference'].view()[0]*numpy.pi phase_pref_target = topo.sim[projection.dest.name].sheet_views['PhasePreference'].view()[0]*2*numpy.pi app = [] app_or = [] av1 = [] av2 = [] for (i,cf) in enumerate(projection.cfs.flatten()): this_or = or_pref.flatten()[i] this_phase = phase_pref.flatten()[i] ors = cf.input_sheet_slice.submatrix(or_pref).flatten() phases = cf.input_sheet_slice.submatrix(phase_pref).flatten() weights = numpy.multiply(cf.weights,cf.mask) #Let's compute the mean orientation preference of connecting neurons z = circ_mean(numpy.array([ors]),weights=numpy.array([weights.flatten()]),axis=1,low=0.0,high=numpy.pi,normalize=False) app_or.append(z[0]) #First lets compute the average phase of neurons which are within 30 degrees of the given neuron within_30_degrees = numpy.nonzero((circular_dist(ors,this_or,numpy.pi) < (numpy.pi/6.0))*1.0)[0] #if len(within_30_degrees) != 0: z = circ_mean(numpy.array([phases]),weights=numpy.array([weights.flatten()]),axis=1,low=0.0,high=numpy.pi*2,normalize=False) app.append(z[0]) #else: # app.append(0.0) #Now lets compare the average connection strength to neurons oriented within 30 degrees and having the same phase (within 60 degrees), with the average connections strength to neurons more than 30 degrees off in orientation outside_30_degrees = numpy.nonzero(circular_dist(ors,this_or,numpy.pi) > numpy.pi/6.0)[0] within_30_degrees_and_same_phase = numpy.nonzero(numpy.multiply(circular_dist(ors,this_or,numpy.pi) < numpy.pi/6.0,circular_dist(phases,this_phase,2*numpy.pi) < numpy.pi/3.0))[0] if len(outside_30_degrees) != 0: av1.append(numpy.mean(weights.flatten()[outside_30_degrees])/max(len(outside_30_degrees),1.0)) else: av1.append(0.0) if len(within_30_degrees_and_same_phase) != 0: av2.append(numpy.mean(weights.flatten()[within_30_degrees_and_same_phase])/max(len(within_30_degrees_and_same_phase),1.0)) else: av2.append(0.0) import pylab pylab.figure() pylab.subplot(3,1,1) pylab.plot(numpy.array(app_or),or_pref_target.flatten(),'ro') pylab.title(proj_name) pylab.subplot(3,1,2) pylab.plot(numpy.array(app),phase_pref_target.flatten(),'ro') pylab.subplot(3,1,3) pylab.bar(numpy.arange(2), (numpy.mean(av1),numpy.mean(av2)), 0.35, color='b') pylab.savefig(normalize_path('PPconnectivity: ' + proj_name + str(topo.sim.time()) + '.png'));
def save_to_disk(self, **params): imgs = numpy.array([p.bitmap.image for p in self.plotgroup.plots], dtype=object).reshape( self.plotgroup.proj_plotting_shape ) img = make_contact_sheet(imgs, (3, 3, 3, 3), 3) img.save( normalize_path(self.filename(self.plotgroup.sheet.name + "_" + self.plotgroup.projection.name, **params)) )
def __init__(self,sheet_name="V1Complex",prefix=None): if prefix: f = open(prefix+'data_dict.pickle','rb') (self.OR,self.OS,self.MR,self.data_dict) = pickle.load(f) f.close() if True: self.lhi = compute_local_homogeneity_index(self.OR*pi,__main__.__dict__.get('LHI',2.0)) f = open(normalize_path('lhi'+str(__main__.__dict__.get('LHI',2.0))+'.pickle'),'wb') pickle.dump(self.lhi,f) f.close() else: f = open(normalize_path('lhi'+str(__main__.__dict__.get('LHI',2.0))+'.pickle'),'rb') self.lhi = pickle.load(f) else: import topo self.sheet_name=sheet_name self.sheet=topo.sim[sheet_name] # Center mask to matrixidx center self.center_r,self.center_c = self.sheet.sheet2matrixidx(0,0) self.center_x,self.center_y = self.sheet.matrixidx2sheet(self.center_r,self.center_c) from topo.analysis.featureresponses import PatternPresenter PatternPresenter.duration=__main__.__dict__.get('duration',4.0) #! import topo.command.pylabplot reload(topo.command.pylabplot) FeatureCurveCommand.curve_parameters=[{"contrast":self.low_contrast},{"contrast":self.high_contrast}] FeatureCurveCommand.display=True FeatureCurveCommand.sheet=topo.sim[sheet_name] SinusoidalMeasureResponseCommand.num_phase=8 SinusoidalMeasureResponseCommand.frequencies=[__main__.__dict__.get('FREQ',2.4)] SinusoidalMeasureResponseCommand.scale=self.high_contrast/100.0 MeasureResponseCommand.scale=self.high_contrast/100.0 FeatureCurveCommand.num_orientation=12 FeatureResponses.repetitions = __main__.__dict__.get('repetitions',1) #topo.command.pylabplot.measure_or_tuning_fullfield.instance(sheet=topo.sim["V1Complex"])() FeatureCurveCommand.curve_parameters=[{"contrast":self.low_contrast},{"contrast":self.high_contrast}] self.OR = topo.sim["V1Complex"].sheet_views['OrientationPreference'].view()[0] self.OS = topo.sim["V1Complex"].sheet_views['OrientationSelectivity'].view()[0] self.MR = topo.sim["V1Complex"].sheet_views['ComplexSelectivity'].view()[0]
def release_fig(filename=None): import pylab pylab.show._needmain=False if filename is not None: fullname=filename+str(topo.sim.time())+".png" pylab.savefig(normalize_path(fullname)) else: pylab.show()
def plot_ASTC(self): f = open(param.normalize_path('iDoG_Fit_%s.txt' % self.sheet), 'w') for uidx,unit in enumerate(self.unit_list): for cidx,contrast in enumerate(self.contrasts): fig = plt.figure() plt.title('{sheet}{unit} Contrast:{contrast}% Time:{time} Area Summation Curve'.format(sheet=self.sheet,unit=topo.sim[self.sheet].sheet2matrixidx(unit[0],unit[1]),contrast=contrast,time=str(float(topo.sim.time())))) plt.plot(self.sizes,self.ASTC[:,cidx,uidx],label='Measured Response') if self.size_fitted_curve != None: plt.plot(self.sizes,self.size_fitted_curve[:,cidx,uidx],label='Fitted Response - SI: {0:.3f} o_e: {1:.3f} vdeg, o_i: {2:.3f} vdeg'.format(self.size_fitv[2,cidx,uidx],self.size_fitv[0,cidx,uidx],self.size_fitv[1,cidx,uidx])) plt.xlabel('Stimulus Diameter (in deg of visual angle)') plt.ylabel('Activity') lgd = plt.legend(loc = 'upper center', bbox_to_anchor = (0.5, -0.1)) path = param.normalize_path('{sheet}{unit}{contrast}__{time:09.2f}_Size_TC.pdf'.format(sheet=self.sheet,unit=str(unit),time=float(topo.sim.time()),contrast=contrast)) plt.savefig(path,bbox_extra_artists=[lgd],bbox_inches='tight',format='pdf') plt.close(fig) f.write('{unit},{contrast}: {fit};{estim}\n'.format(sheet=self.sheet,unit=str(unit),contrast=contrast,fit=str(self.size_fitv[:,cidx,uidx]),estim=self.size_est[:,cidx,uidx])) f.close()
def save_script_repr(self): script_name = asksaveasfilename(filetypes=SCRIPT_FILETYPES, initialdir=normalize_path(), initialfile=topo.sim.basename()+"_script_repr.ty") if script_name: topo.command.save_script_repr(script_name) self.messageBar.response('Script saved to ' + script_name)
def __call__(self,times=None,**kwargs): if topo.sim.time() != times[-1]: return None measure_rfs(sampling_area=self.unit_slice,input_sheet=topo.sim[self.input_sheet], sampling_interval=self.sampling_rate,pattern_presenter=CoordinatedPatternGenerator(RawRectangle())) # Get Retina Information retina = topo.sim[self.input_sheet] sheet = topo.sim[self.measurement_sheet] left, bottom, right, top = sheet.nominal_bounds.lbrt() sheet_density = float(sheet.nominal_density) x_units,y_units = sheet.shape unit_size = 1.0 / sheet_density half_unit_size = (unit_size / 2.0) # saves repeated calculation. # Sample V1 v1_units = 4 sampling_range = (top - half_unit_size - (unit_size * np.floor((y_units-v1_units)/2)) , bottom + half_unit_size + (unit_size * np.ceil(y_units-v1_units)/2)) # Create Samplign Grid spacing = np.linspace(sampling_range[0],sampling_range[1], sheet.density) X, Y = np.meshgrid(spacing, spacing) sheet_coords = zip(X.flatten(),Y.flatten()) coords = list(set([sheet.sheet2matrixidx(x,y) for (x,y) in sheet_coords])) # Filter and sort RFs keys = [key for key in retina.sheet_views.keys() if key[0] == 'RFs'] filtered_keys = sorted(list(set([key for key in keys if sheet.sheet2matrixidx(key[2],key[3]) in coords])),key=operator.itemgetter(2,3)) rfs = np.dstack([topo.sim[self.input_sheet].sheet_views[key].view()[0] for key in filtered_keys]) coords = np.vstack([(key[2],key[3]) for key in filtered_keys]) plt.imsave(param.normalize_path('central_rf.png'),rfs[:,:,int(len(rfs[0,0,:])/2)]) # Pickle and save if self.pickle: pkl_file = open(param.normalize_path('RFs.pkl'), 'wb') pickle.dump((coords,rfs), pkl_file) pkl_file.close() try: img = rf_image(rfs,coords,norm='All') img.save(param.normalize_path('V1_RFs_{time}.png'.format(time=topo.sim.time()))) except: pass
def save_to_disk(self,**params): imgs = numpy.array([p.bitmap.image for p in self.plotgroup.plots], dtype=object).reshape( self.plotgroup.proj_plotting_shape) img = make_contact_sheet(imgs, (3,3,3,3), 3) img.save(normalize_path(self.filename( self.plotgroup.sheet.name+"_"+ self.plotgroup.projection.name,**params)))
def __save_to_png(self): plot = self._right_click_info['plot'] filename = self.plotgroup.filesaver.filename(plot.label(),file_format="png") PNG_FILETYPES = [('PNG images','*.png'),('All files','*')] snapshot_name = asksaveasfilename(filetypes=PNG_FILETYPES, initialdir=normalize_path(), initialfile=filename) if snapshot_name: plot.bitmap.image.save(snapshot_name)
def plot_SFTC(self): """ Plot SFTC and Fitted DoG Curve """ f = open(param.normalize_path('DoG_Fit_%s.txt' % self.sheet), 'w') for uidx,unit in enumerate(self.unit_list): for cidx,contrast in enumerate(self.contrasts): fig = plt.figure() plt.title('{sheet}{unit} Contrast:{contrast}% Time:{time} SF Tuning Curve'.format(sheet=self.sheet,unit=topo.sim[self.sheet].sheet2matrixidx(unit[0],unit[1]),contrast=contrast,time=topo.sim.time())) plt.plot(self.frequencies,self.SFTC[:,cidx,uidx],label='Measured Response') plt.plot(self.frequencies,self.sf_fitted_curve[:,cidx,uidx],label='Fitted Response - SI: {0:.3f} o_e: {1:.3f} vdeg, o_i: {2:.3f} vdeg'.format(self.sf_fitv[2,cidx,uidx],self.sf_fitv[0,cidx,uidx],self.sf_fitv[1,cidx,uidx])) plt.xlabel('Spatial Frequency (cyc/deg)') plt.ylabel('Activity') lgd = plt.legend(loc = 'upper center', bbox_to_anchor = (0.5, -0.1)) path = param.normalize_path('{sheet}{unit}{contrast}__{time:09.2f}_SFTC.pdf'.format(sheet=self.sheet,unit=str(unit),time=float(topo.sim.time()),contrast=contrast)) plt.savefig(path,bbox_extra_artists=[lgd],bbox_inches='tight',format='pdf') plt.close(fig) f.write('{sheet}{unit}{contrast}: {fit}\n'.format(sheet=self.sheet,unit=str(unit),contrast=contrast,fit=str(self.sf_fitv[:,cidx,uidx]))) f.close()
def __call__(self,times=None): if topo.sim.time() != times[-1]: return None if self.sf_measure: self.frequencies = np.linspace(0.0,self.sf_max,self.sf_steps) self.measure_SFTC() try: self.fit_DoG() except: pass #self.plot_SFTC() if self.pickle: pkl_file = open(param.normalize_path('SFTC_%s.pkl' % self.sheet), 'wb') pickle.dump((self.SFTC,self.frequencies,self.contrasts,self.unit_list), pkl_file) pkl_file.close() if self.size_measure: self.sizes = np.linspace(0.0,self.size_max,self.size_steps) self.frequency = [0]*len(self.unit_list) for uidx,unit in enumerate(self.unit_list): if self.sf_measure: self.frequency[uidx] = self.frequencies[self.SFTC[:,-1,uidx].argmax()] else: self.frequency[uidx] = self.sf_fallback self.measure_ASTC() if self.pickle: pkl_file = open(param.normalize_path('ASTC_%s.pkl' % self.sheet), 'wb') pickle.dump((self.ASTC,(self.sizes,self.contrasts,self.unit_list)), pkl_file) pkl_file.close() try: self.fit_iDoG() except: pass
def __save_to_postscript(self): plot = self._right_click_info['plot'] canvas = self._right_click_info['event'].widget filename = self.plotgroup.filesaver.filename(plot.label(),file_format="eps") POSTSCRIPT_FILETYPES = [('Encapsulated PostScript images','*.eps'), ('PostScript images','*.ps'),('All files','*')] snapshot_name = asksaveasfilename(filetypes=POSTSCRIPT_FILETYPES, initialdir=normalize_path(), initialfile=filename) if snapshot_name: canvas.postscript(file=snapshot_name)
def activity_stats(times=None, **kwargs): if analysis_group('a'): return None sheets = ['LGNOff','LGNOn','V1Exc','V1Inh'] f = open(param.normalize_path('mean_activity.txt'), 'a') for sheet in sheets: mean_activity = np.mean(topo.sim[sheet].activity) max_activity = np.max(topo.sim[sheet].activity) f.write('{sheet},{time}: Mean {act:.3f} Max {max_act:.3f}\n'.format(sheet=sheet,time=topo.sim.time(),act=mean_activity,max_act=max_activity)) f.close()
def _setup_streams_path(self): streams_path = os.path.join(param.normalize_path(), self.root_directory, "streams") try: os.makedirs(streams_path) except: pass # Waiting till these directories exist (otherwise potential qstat error) while not os.path.isdir(streams_path): pass return streams_path
def analyse_connectivity(sheet_name,proj_name,lhi): or_bins = numpy.linspace(0,numpy.pi/2,30) projection = topo.sim[sheet_name].projections()[proj_name] or_pref = topo.sim[projection.src.name].sheet_views['OrientationPreference'].view()[0]*numpy.pi or_pref_target = topo.sim[projection.dest.name].sheet_views['OrientationPreference'].view()[0]*numpy.pi domain_orientation_connection_strength = numpy.zeros((30,1)) pinwheel_orientation_connection_strength = numpy.zeros((30,1)) pinwheels = numpy.where(lhi.flatten()<0.1)[0] domains = numpy.where(lhi.flatten()>0.9)[0] for i in pinwheels: cf = projection.cfs.flatten()[i] this_or = or_pref_target.flatten()[i] ors = cf.input_sheet_slice.submatrix(or_pref).flatten() weights = numpy.multiply(cf.weights,cf.mask).flatten() for j,k in enumerate(numpy.digitize(circular_dist(ors,this_or,numpy.pi),or_bins)): print k pinwheel_orientation_connection_strength[k-1] += weights[j] for i in domains: cf = projection.cfs.flatten()[i] this_or = or_pref_target.flatten()[i] ors = cf.input_sheet_slice.submatrix(or_pref).flatten() weights = numpy.multiply(cf.weights,cf.mask).flatten() for j,k in enumerate(numpy.digitize(circular_dist(ors,this_or,numpy.pi),or_bins)): domain_orientation_connection_strength[k-1] += weights[j] import pylab pylab.figure() pylab.subplot(2,1,1) pylab.hold('on') pylab.plot(numpy.linspace(0,numpy.pi/2,30,endpoint=False)+numpy.pi/2/60,pinwheel_orientation_connection_strength,'k',linewidth=2.0) pylab.plot(numpy.linspace(0,numpy.pi/2,30,endpoint=False)+numpy.pi/2/60,pinwheel_orientation_connection_strength,'ko') pylab.title(proj_name,fontsize=20) pylab.autoscale(tight=True) pylab.title('pinwheels') pylab.subplot(2,1,2) pylab.hold('on') pylab.plot(numpy.linspace(0,numpy.pi/2,30,endpoint=False)+numpy.pi/2/60,domain_orientation_connection_strength,'k',linewidth=2.0) pylab.plot(numpy.linspace(0,numpy.pi/2,30,endpoint=False)+numpy.pi/2/60,domain_orientation_connection_strength,'ko') pylab.title(proj_name,fontsize=20) pylab.autoscale(tight=True) pylab.title('domains') from param import normalize_path pylab.savefig(normalize_path('PPconnectivity: ' + proj_name + str(topo.sim.time()) + '.png'));
def _save_parameters(p,filename): from topo.misc.commandline import global_params g = {'global_params_specified':p, 'global_params_all':dict(global_params.get_param_values())} for d in g.values(): if 'name' in d: del d['name'] if 'print_level' in d: del d['print_level'] pickle.dump(g,open(normalize_path(filename),'w'))
def run_script(self): """ Dialog to run a user-selected script The script is exec'd in __main__.__dict__ (i.e. as if it were specified on the commandline.) """ script = askopenfilename(initialdir=normalize_path(),filetypes=SCRIPT_FILETYPES) if script in ('',(),None): # (representing the various ways no script was selected in the dialog) self.messageBar.response('Run canceled') else: execfile(script,__main__.__dict__) self.messageBar.response('Ran ' + script) sim_name_from_filename(script) self.title(topo.sim.name)
def reduce_directory(self, task_launcher): task_launcher.timestamp = self.reduce_timestamp root_directory = param.normalize_path(task_launcher.root_directory_name()) print "Running reduce in directory %s" % os.path.basename(root_directory) log_name = "%s.log" % task_launcher.batch_name log_path = os.path.join(root_directory, log_name) assert os.path.exists(log_path), 'Cannot find log file %s' % log_name with open(log_path,'r') as log: splits = (line.split() for line in log) spec_log = [(int(split[0]), json.loads(" ".join(split[1:]))) for split in splits] task_launcher.reduction_fn(spec_log, root_directory) return False
def unit_bitpattern(times=None,**kwargs): if analysis_group('b'): return None sheets = [('V1Exc','LGNOff','LGNOffAfferent'),('V1Exc','LGNOn','LGNOnAfferent')] units = [(0,0)] f = open(param.normalize_path('mean_bitpattern.txt'), 'a') for sidx, sheet in enumerate(sheets): for uidx,unit in enumerate(units): coords = topo.sim[sheet[0]].sheet2matrixidx(unit[0],unit[1]) shape = topo.sim[sheet[0]].projections()[sheet[2]].cfs[coords[0]][coords[1]].weights.shape coords = topo.sim[sheet[1]].sheet2matrixidx(unit[0],unit[1]) if shape[0] % 2: activity = topo.sim[sheet[1]].activity[coords[0]-np.ceil(shape[0]/2):coords[0]+np.floor(shape[0]/2),coords[1]-np.ceil(shape[1]/2):coords[1]+np.floor(shape[1]/2)] else: activity = topo.sim[sheet[1]].activity[coords[0]-np.floor(shape[0]/2):coords[0]+np.floor(shape[0]/2),coords[1]-np.floor(shape[1]/2):coords[1]+np.floor(shape[1]/2)] path = param.normalize_path('{unit}__{time:09.2f}_{sheet}BitPattern.png'.format(sheet=sheet[2],unit=str(unit),time=float(topo.sim.time()))) plt.imsave(path, activity, cmap=plt.cm.gray) mean_activity = np.mean(activity) f.write('{sheet},{unit},{time}: {act:.3f}\n'.format(sheet=sheet[2],unit=unit,time=topo.sim.time(),act=mean_activity)) f.close()
def reduce_directory(self, task_launcher): task_launcher.timestamp = self.reduce_timestamp root_directory = param.normalize_path(task_launcher.root_directory_name()) print "Running reduce in directory %s" % os.path.basename(root_directory) log_name = "%s.log" % task_launcher.batch_name log_path = os.path.join(root_directory, log_name) assert os.path.exists(log_path), "Cannot find log file %s" % log_name with open(log_path, "r") as log: splits = (line.split() for line in log) spec_log = [(int(split[0]), json.loads(" ".join(split[1:]))) for split in splits] task_launcher.reduction_fn(spec_log, root_directory) return False
def extract_log(log_path, dict_type=dict): """ Parses the log file generated by a launcher and returns dictionary with tid keys and specification values. Ordering can be maintained by setting dict_type to the appropriate constructor. Keys are converted from unicode to strings for kwarg use. """ with open(param.normalize_path(log_path), 'r') as log: splits = (line.split() for line in log) uzipped = ((int(split[0]), json.loads(" ".join(split[1:]))) for split in splits) szipped = [(i, dict((str(k), v) for (k, v) in d.items())) for (i, d) in uzipped] return dict_type(szipped)
def resume_launch(cls): """ Resumes the execution of the launcher if environment contains LANCET_ANALYSIS_DIR. This information allows the launcher.pickle file to be unpickled to resume the launch. """ if "LANCET_ANALYSIS_DIR" not in os.environ: return False root_path = param.normalize_path(os.environ["LANCET_ANALYSIS_DIR"]) del os.environ["LANCET_ANALYSIS_DIR"] pickle_path = os.path.join(root_path, 'launcher.pickle') launcher = pickle.load(open(pickle_path, 'rb')) launcher.collate_and_launch() return True
def load_snapshot(self): """ Dialog to load a user-selected snapshot (see topo.command.load_snapshot() ). """ snapshot_name = askopenfilename(initialdir=normalize_path(),filetypes=SAVED_FILETYPES) if snapshot_name in ('',(),None): self.messageBar.response('No snapshot loaded.') else: self.messageBar.dynamicinfo('Loading snapshot (may take some time)...') self.update_idletasks() topo.command.load_snapshot(snapshot_name) self.messageBar.response('Loaded snapshot ' + snapshot_name) self.title(topo.sim.name) self.auto_refresh()
def _generate_figure(self,p): """ Helper function to display a figure on screen or save to a file. p should be a ParamOverrides instance containing the current set of parameters. """ pylab.show._needmain=False if p.filename is not None: # JABALERT: need to reformat this as for other plots fullname=p.filename+p.filename_suffix+str(topo.sim.time())+"."+p.file_format pylab.savefig(normalize_path(fullname), dpi=p.file_dpi) else: self._set_windowtitle(p.title) pylab.show()
def save(self): """Save the movie frames.""" filename_pat = self.name.join(self.filename_fmt.split('%n')) filename_pat = self.filename_time_fmt.join(filename_pat.split('%t')) filename_pat = self.filetype.join(filename_pat.split('%T')) filename_pat = normalize_path(filename_pat,prefix=self.filename_prefix) dirname = os.path.dirname(filename_pat) if not os.access(dirname,os.F_OK): os.makedirs(dirname) self.verbose('Writing',len(self.frames),'to files like "%s"'%filename_pat) for t,f in zip(self.frame_times,self.frames): filename = filename_pat% t self.debug("Writing frame",repr(filename)) f.image.save(filename)
def analyse(self,steps=[],ns=10,offset_x=0,offset_y=0): print self.low_contrast print self.high_contrast #save_plotgroup("Orientation Preference and Complexity") #save_plotgroup("Position Preference") for (x,y) in steps: xindex = self.center_r+offset_x+x yindex = self.center_c+offset_y+y xcoor,ycoor = self.sheet.matrixidx2sheet(xindex,yindex) c= topo.command.pylabplot.measure_size_response.instance(sheet=self.sheet,num_phase=__main__.__dict__.get('NUM_PHASE',8),num_sizes=ns,max_size=__main__.__dict__.get('MAX_SIZE',1.5),coords=[(xcoor,ycoor)]) c.duraton=4.0 #! c(coords=[(xcoor,ycoor)],frequencies=[__main__.__dict__.get('FREQ',2.4)]) self.data_dict[(xindex,yindex)] = {} self.data_dict[(xindex,yindex)]["ST"] = self.calculate_RF_sizes(xindex, yindex) self.plot_size_tunning(xindex,yindex) self.data_dict[(xindex,yindex)]["OCT"] = self.perform_orientation_contrast_analysis(self.data_dict[(xindex,yindex)]["ST"],xcoor,ycoor,xindex,yindex) self.plot_orientation_contrast_tuning(xindex,yindex) self.plot_orientation_contrast_tuning_abs(xindex,yindex) f = open(normalize_path("dict.dat"),'wb') import pickle pickle.dump(self.data_dict,f) f.close() if True: self.lhi = compute_local_homogeneity_index(self.sheet.sheet_views['OrientationPreference'].view()[0]*pi,2.0) f = open(prefix+'lhi2.0.pickle','wb') pickle.dump(self.lhi,f) f.close() else: f = open(prefix+'lhi2.0.pickle','rb') self.lhi = pickle.load(f) pylab.figure() pylab.imshow(self.lhi) pylab.colorbar() release_fig("LHI") self.plot_map_feature_to_surround_modulation_feature_correlations(self.lhi,"Local Homogeneity Index")
def save_snapshot(self): """ Dialog to save a snapshot (see topo.command.save_snapshot() ). Adds the file extension .typ if not already present. """ snapshot_name = asksaveasfilename(filetypes=SAVED_FILETYPES, initialdir=normalize_path(), initialfile=topo.sim.basename()+".typ") if snapshot_name in ('',(),None): self.messageBar.response('No snapshot saved.') else: if not snapshot_name.endswith('.typ'): snapshot_name = snapshot_name + SAVED_FILE_EXTENSION self.messageBar.dynamicinfo('Saving snapshot (may take some time)...') self.update_idletasks() topo.command.save_snapshot(snapshot_name) self.messageBar.response('Snapshot saved to ' + snapshot_name)
def phase_preference_scatter_plot(sheet_name,diameter=0.39): r = numbergen.UniformRandom(seed=1023) preference_map = topo.sim[sheet_name].sheet_views['PhasePreference'] offset_magnitude = 0.03 datax = [] datay = [] (v,bb) = preference_map.view() for z in zeros(66): x = (r() - 0.5)*2*diameter y = (r() - 0.5)*2*diameter rand = r() xoff = sin(rand*2*pi)*offset_magnitude yoff = cos(rand*2*pi)*offset_magnitude xx = max(min(x+xoff,diameter),-diameter) yy = max(min(y+yoff,diameter),-diameter) x = max(min(x,diameter),-diameter) y = max(min(y,diameter),-diameter) [xc1,yc1] = topo.sim[sheet_name].sheet2matrixidx(xx,yy) [xc2,yc2] = topo.sim[sheet_name].sheet2matrixidx(x,y) if((xc1==xc2) & (yc1==yc2)): continue datax = datax + [v[xc1,yc1]] datay = datay + [v[xc2,yc2]] for i in range(0,len(datax)): datax[i] = datax[i] * 360 datay[i] = datay[i] * 360 if(datay[i] > datax[i] + 180): datay[i]= datay[i]- 360 if((datax[i] > 180) & (datay[i]> 180)): datax[i] = datax[i] - 360; datay[i] = datay[i] - 360 if((datax[i] > 180) & (datay[i] < (datax[i]-180))): datax[i] = datax[i] - 360; #datay[i] = datay[i] - 360 f = pylab.figure() ax = f.add_subplot(111, aspect='equal') pylab.plot(datax,datay,'ro') pylab.plot([0,360],[-180,180]) pylab.plot([-180,180],[0,360]) pylab.plot([-180,-180],[360,360]) ax.axis([-180,360,-180,360]) pylab.xticks([-180,0,180,360], [-180,0,180,360]) pylab.yticks([-180,0,180,360], [-180,0,180,360]) pylab.grid() pylab.savefig(normalize_path(str(topo.sim.timestr()) + sheet_name + "_scatter.png"))