def __call__(self,data,colors=None,**params): p=ParamOverrides(self,params,allow_extra_keywords=True) pylab.figure(figsize=(4,2)) n,bins,bars = pylab.hist(data,**(p.extra_keywords())) # if len(bars)!=len(colors), any extra bars won't have their # colors changed, or any extra colors will be ignored. if colors: [bar.set_fc(color) for bar,color in zip(bars,colors)] self._generate_figure(p)
def __call__(self, data, colors=None, **params): p = ParamOverrides(self, params, allow_extra_keywords=True) fig = plt.figure(figsize=(4, 2)) n, bins, bars = plt.hist(data, **(p.extra_keywords())) # if len(bars)!=len(colors), any extra bars won't have their # colors changed, or any extra colors will be ignored. if colors: [bar.set_fc(color) for bar, color in zip(bars, colors)] self._generate_figure(p) return fig
def __init__(self,inherent_features={},**params): """ If a dataset already and inherently includes certain features, a dictionary with feature-name:code-to-access-the-feature pairs should be supplied specifying how to select (e.g. from a set of images) the appropriate feature value. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. """ p=ParamOverrides(self,params,allow_extra_keywords=True) super(PatternCoordinator, self).__init__(**p.param_keywords()) self._feature_params = p.extra_keywords() self._inherent_features = inherent_features # And also, this key must be in feature_coordinators because _inherent_features # can have additional features such as i to support multiple images # TFALERT: Once spatial frequency (sf) is added, this will # cause warnings, because all image datasets will have a # spatial frequency inherent feature, but mostly we just # ignore that by having only a single size of DoG, which # discards all but a narrow range of sf. So the dataset will # have sf inherently, but that won't be an error or even # worthy of a warning. if(len((set(self._inherent_features.keys()) - set(self.features_to_vary)) & set(self.feature_coordinators.keys()))): self.warning('Inherent feature present which is not requested in features') self._feature_coordinators_to_apply = [] for feature, feature_coordinator in self.feature_coordinators.iteritems(): if feature in self.features_to_vary and feature not in self._inherent_features: # if it is a list, append each list item individually if isinstance(feature_coordinator,list): for individual_feature_coordinator in feature_coordinator: self._feature_coordinators_to_apply.append(individual_feature_coordinator) else: self._feature_coordinators_to_apply.append(feature_coordinator)
def __init__(self, inherent_features=[], **params): """ If a dataset already and inherently includes certain features, a list with the inherent feature names should be supplied. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. """ p = ParamOverrides(self, params, allow_extra_keywords=True) super(PatternCoordinator, self).__init__(**p.param_keywords()) self._feature_params = p.extra_keywords() self._inherent_features = inherent_features # TFALERT: Once spatial frequency (sf) is added, this will # cause warnings, because all image datasets will have a # spatial frequency inherent feature, but mostly we just # ignore that by having only a single size of DoG, which # discards all but a narrow range of sf. So the dataset will # have sf inherently, but that won't be an error or even # worthy of a warning. if (len(set(self._inherent_features) - set(self.features_to_vary))): self.warning( 'Inherent feature present which is not requested in features') self._feature_coordinators_to_apply = [] for feature, feature_coordinator in self.feature_coordinators.items(): if feature in self.features_to_vary and feature not in self._inherent_features: # if it is a list, append each list item individually if isinstance(feature_coordinator, list): for individual_feature_coordinator in feature_coordinator: self._feature_coordinators_to_apply.append( individual_feature_coordinator) else: self._feature_coordinators_to_apply.append( feature_coordinator)
def __init__(self,inherent_features=[],**params): """ If a dataset already and inherently includes certain features, a list with the inherent feature names should be supplied. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. """ p=ParamOverrides(self,params,allow_extra_keywords=True) super(PatternCoordinator, self).__init__(**p.param_keywords()) self._feature_params = p.extra_keywords() self._inherent_features = inherent_features # TFALERT: Once spatial frequency (sf) is added, this will # cause warnings, because all image datasets will have a # spatial frequency inherent feature, but mostly we just # ignore that by having only a single size of DoG, which # discards all but a narrow range of sf. So the dataset will # have sf inherently, but that won't be an error or even # worthy of a warning. if(len(set(self._inherent_features) - set(self.features_to_vary))): self.warning('Inherent feature present which is not requested in features') self._feature_coordinators_to_apply = [] for feature, feature_coordinator in self.feature_coordinators.items(): if feature in self.features_to_vary and feature not in self._inherent_features: # if it is a list, append each list item individually if isinstance(feature_coordinator,list): for individual_feature_coordinator in feature_coordinator: self._feature_coordinators_to_apply.append(individual_feature_coordinator) else: self._feature_coordinators_to_apply.append(feature_coordinator)
def __call__(self,script_file,**params_to_override): p=ParamOverrides(self,params_to_override,allow_extra_keywords=True) import os import shutil # Construct simulation name, etc. scriptbase= re.sub('.ty$','',os.path.basename(script_file)) prefix = "" if p.timestamp==(0,0): prefix += time.strftime(p.name_time_format) else: prefix += time.strftime(p.name_time_format, p.timestamp) prefix += "_" + scriptbase + "_" + p.tag simname = prefix # Construct parameter-value portion of filename; should do more filtering # CBENHANCEMENT: should provide chance for user to specify a # function (i.e. make this a function, and have a parameter to # allow the function to be overridden). # And sort by name by default? Skip ones that aren't different # from default, or at least put them at the end? prefix += p.dirname_params_filter(p.extra_keywords()) # Set provided parameter values in main namespace from topo.misc.commandline import global_params global_params.set_in_context(**p.extra_keywords()) # Create output directories if not os.path.isdir(normalize_path(p['output_directory'])): try: os.mkdir(normalize_path(p['output_directory'])) except OSError: pass # Catches potential race condition (simultaneous run_batch runs) dirname = self._truncate(p,p.dirname_prefix+prefix) normalize_path.prefix = normalize_path(os.path.join(p['output_directory'],dirname)) if os.path.isdir(normalize_path.prefix): print "Batch run: Warning -- directory already exists!" print "Run aborted; wait one minute before trying again, or else rename existing directory: \n" + \ normalize_path.prefix sys.exit(-1) else: os.mkdir(normalize_path.prefix) print "Batch run output will be in " + normalize_path.prefix if p['vc_info']: _print_vc_info(simname+".diffs") hostinfo = "Host: " + " ".join(platform.uname()) topographicalocation = "Topographica: " + os.path.abspath(sys.argv[0]) topolocation = "topo package: " + os.path.abspath(topo.__file__) scriptlocation = "script: " + os.path.abspath(script_file) starttime=time.time() startnote = "Batch run started at %s." % time.strftime("%a %d %b %Y %H:%M:%S +0000", time.gmtime()) # store a re-runnable copy of the command used to start this batch run try: # pipes.quote is undocumented, so I'm not sure which # versions of python include it (I checked python 2.6 and # 2.7 on linux; they both have it). import pipes quotefn = pipes.quote except (ImportError,AttributeError): # command will need a human to insert quotes before it can be re-used quotefn = lambda x: x command_used_to_start = string.join([quotefn(arg) for arg in sys.argv]) # CBENHANCEMENT: would be nice to separately write out a # runnable script that does everything necessary to # re-generate results (applies diffs etc). # Shadow stdout to a .out file in the output directory, so that # print statements will go to both the file and to stdout. batch_output = open(normalize_path(simname+".out"),'w') batch_output.write(command_used_to_start+"\n") sys.stdout = MultiFile(batch_output,sys.stdout) print print hostinfo print topographicalocation print topolocation print scriptlocation print print startnote from topo.misc.commandline import auto_import_commands auto_import_commands() # Ensure that saved state includes all parameter values from topo.command import save_script_repr param.parameterized.script_repr_suppress_defaults=False # Save a copy of the script file for reference shutil.copy2(script_file, normalize_path.prefix) shutil.move(normalize_path(scriptbase+".ty"), normalize_path(simname+".ty")) # Default case: times is just a number that scales a standard list of times times=p['times'] if not isinstance(times,list): times=[t*times for t in [0,50,100,500,1000,2000,3000,4000,5000,10000]] # Run script in main error_count = 0 initial_warning_count = param.parameterized.warning_count try: execfile(script_file,__main__.__dict__) #global_params.context global_params.check_for_unused_names() if p.save_global_params: _save_parameters(p.extra_keywords(),simname+".global_params.pickle") print_sizes() topo.sim.name=simname # Run each segment, doing the analysis and saving the script state each time for run_to in times: topo.sim.run(run_to - topo.sim.time()) p['analysis_fn']() save_script_repr() elapsedtime=time.time()-starttime param.Parameterized(name="run_batch").message( "Elapsed real time %02d:%02d." % (int(elapsedtime/60),int(elapsedtime%60))) if p['snapshot']: save_snapshot() except: error_count+=1 import traceback traceback.print_exc(file=sys.stdout) sys.stderr.write("Warning -- Error detected: execution halted.\n") print "\nBatch run completed at %s." % time.strftime("%a %d %b %Y %H:%M:%S +0000", time.gmtime()) print "There were %d error(s) and %d warning(s)%s." % \ (error_count,(param.parameterized.warning_count-initial_warning_count), ((" (plus %d warning(s) prior to entering run_batch)"%initial_warning_count if initial_warning_count>0 else ""))) # restore stdout sys.stdout = sys.__stdout__ batch_output.close()
def __call__(self, script_file, **params_to_override): p = ParamOverrides(self, params_to_override, allow_extra_keywords=True) import os import shutil # Construct simulation name, etc. scriptbase = re.sub('.ty$', '', os.path.basename(script_file)) prefix = "" if p.timestamp == (0, 0): prefix += time.strftime(p.name_time_format) else: prefix += time.strftime(p.name_time_format, p.timestamp) prefix += "_" + scriptbase + "_" + p.tag simname = prefix # Construct parameter-value portion of filename; should do more filtering # CBENHANCEMENT: should provide chance for user to specify a # function (i.e. make this a function, and have a parameter to # allow the function to be overridden). # And sort by name by default? Skip ones that aren't different # from default, or at least put them at the end? prefix += p.dirname_params_filter(p.extra_keywords()) # Set provided parameter values in main namespace from topo.misc.commandline import global_params global_params.set_in_context(**p.extra_keywords()) # Create output directories if not os.path.isdir(normalize_path(p['output_directory'])): try: os.mkdir(normalize_path(p['output_directory'])) except OSError: pass # Catches potential race condition (simultaneous run_batch runs) dirname = self._truncate(p, p.dirname_prefix + prefix) normalize_path.prefix = normalize_path( os.path.join(p['output_directory'], dirname)) if os.path.isdir(normalize_path.prefix): print "Batch run: Warning -- directory already exists!" print "Run aborted; wait one minute before trying again, or else rename existing directory: \n" + \ normalize_path.prefix sys.exit(-1) else: os.mkdir(normalize_path.prefix) print "Batch run output will be in " + normalize_path.prefix if p['vc_info']: _print_vc_info(simname + ".diffs") hostinfo = "Host: " + " ".join(platform.uname()) topographicalocation = "Topographica: " + os.path.abspath(sys.argv[0]) topolocation = "topo package: " + os.path.abspath(topo.__file__) scriptlocation = "script: " + os.path.abspath(script_file) starttime = time.time() startnote = "Batch run started at %s." % time.strftime( "%a %d %b %Y %H:%M:%S +0000", time.gmtime()) # store a re-runnable copy of the command used to start this batch run try: # pipes.quote is undocumented, so I'm not sure which # versions of python include it (I checked python 2.6 and # 2.7 on linux; they both have it). import pipes quotefn = pipes.quote except (ImportError, AttributeError): # command will need a human to insert quotes before it can be re-used quotefn = lambda x: x command_used_to_start = string.join([quotefn(arg) for arg in sys.argv]) # CBENHANCEMENT: would be nice to separately write out a # runnable script that does everything necessary to # re-generate results (applies diffs etc). # Shadow stdout to a .out file in the output directory, so that # print statements will go to both the file and to stdout. batch_output = open(normalize_path(simname + ".out"), 'w') batch_output.write(command_used_to_start + "\n") sys.stdout = MultiFile(batch_output, sys.stdout) print print hostinfo print topographicalocation print topolocation print scriptlocation print print startnote from topo.misc.commandline import auto_import_commands auto_import_commands() # Ensure that saved state includes all parameter values from topo.command import save_script_repr param.parameterized.script_repr_suppress_defaults = False # Save a copy of the script file for reference shutil.copy2(script_file, normalize_path.prefix) shutil.move(normalize_path(scriptbase + ".ty"), normalize_path(simname + ".ty")) # Default case: times is just a number that scales a standard list of times times = p['times'] if not isinstance(times, list): times = [ t * times for t in [0, 50, 100, 500, 1000, 2000, 3000, 4000, 5000, 10000] ] # Run script in main error_count = 0 initial_warning_count = param.parameterized.warning_count try: execfile(script_file, __main__.__dict__) #global_params.context global_params.check_for_unused_names() if p.save_global_params: _save_parameters(p.extra_keywords(), simname + ".global_params.pickle") print_sizes() topo.sim.name = simname # Run each segment, doing the analysis and saving the script state each time for run_to in times: topo.sim.run(run_to - topo.sim.time()) p['analysis_fn']() save_script_repr() elapsedtime = time.time() - starttime param.Parameterized(name="run_batch").message( "Elapsed real time %02d:%02d." % (int(elapsedtime / 60), int(elapsedtime % 60))) if p['snapshot']: save_snapshot() except: error_count += 1 import traceback traceback.print_exc(file=sys.stdout) sys.stderr.write("Warning -- Error detected: execution halted.\n") print "\nBatch run completed at %s." % time.strftime( "%a %d %b %Y %H:%M:%S +0000", time.gmtime()) print "There were %d error(s) and %d warning(s)%s." % \ (error_count,(param.parameterized.warning_count-initial_warning_count), ((" (plus %d warning(s) prior to entering run_batch)"%initial_warning_count if initial_warning_count>0 else ""))) # restore stdout sys.stdout = sys.__stdout__ batch_output.close()