def n_action(option,opt_str,value,parser): args = [arg for arg in sys.argv[1:] if arg != opt_str] options, args = parser.parse_args(args) sys.argv = ['notebook'] try: # Jupyter/IPython >= 4.0 from notebook.notebookapp import NotebookApp jupyter = True except: # IPython <4.0 from IPython.html.notebookapp import NotebookApp jupyter = False if jupyter: ipython_dir = param.resolve_path('platform/ipython', path_to_file=False) os.environ['IPYTHONDIR'] = ipython_dir config_dir = param.resolve_path('platform/jupyter', path_to_file=False) NotebookApp.config_dir = config_dir else: if options.Profile is None: config_dir = param.resolve_path('platform/ipython/', path_to_file=False) NotebookApp.ipython_dir = config_dir else: NotebookApp.profile = options.Profile if options.IP is not None: NotebookApp.ip = options.IP if options.Port is not None: NotebookApp.port = options.Port NotebookApp().launch_instance() global something_executed something_executed = True
def n_action(option, opt_str, value, parser): args = [arg for arg in sys.argv[1:] if arg != opt_str] options, args = parser.parse_args(args) sys.argv = ['notebook'] try: # Jupyter/IPython >= 4.0 from notebook.notebookapp import NotebookApp jupyter = True except: # IPython <4.0 from IPython.html.notebookapp import NotebookApp jupyter = False if jupyter: ipython_dir = param.resolve_path('platform/ipython', path_to_file=False) os.environ['IPYTHONDIR'] = ipython_dir config_dir = param.resolve_path('platform/jupyter', path_to_file=False) NotebookApp.config_dir = config_dir else: if options.Profile is None: config_dir = param.resolve_path('platform/ipython/', path_to_file=False) NotebookApp.ipython_dir = config_dir else: NotebookApp.profile = options.Profile if options.IP is not None: NotebookApp.ip = options.IP if options.Port is not None: NotebookApp.port = options.Port NotebookApp().launch_instance() global something_executed something_executed = True
def compare_startup_speed_data(script): """ Run and time script with the parameters specified when its STARTUPSPEEDDATA file was generated, and check for changes. Looks for the STARTUPSPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA. If not found there, first generates a new STARTUPSPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA (i.e. to generate new data, delete the existing data before running). """ script = script.replace("\\", "\\\\") print "Comparing startup speed data for %s"%script script_name = os.path.basename(script) ensure_path_exists(MACHINETESTSDATADIR) data_filename = os.path.join(MACHINETESTSDATADIR,script_name+"_STARTUPSPEEDDATA") try: locn = resolve_path(data_filename) except IOError: print "No existing data" #_run_in_forked_process(_generate_startup_speed_data,script,data_filename,cortex_density=SPEEDTESTS_CORTEXDENSITY) _generate_startup_speed_data(script,data_filename,cortex_density=SPEEDTESTS_CORTEXDENSITY) locn = resolve_path(data_filename) print "Reading data from %s"%locn speed_data_file = open(locn,'r') try: speed_data = pickle.load(speed_data_file) print "Data from release=%s, version=%s"%(speed_data['versions'] if 'versions' in speed_data else ("unknown","unknown")) except: ############################################################### ## Support old data files (used to be string in the file rather ## than pickle) speed_data_file.seek(0) speed_data = speed_data_file.readline() density,old_time = speed_data.split('=') speed_data = {'cortex_density':float(density), 'how_long':float(old_time), 'args':{}} _support_old_args(speed_data['args']) ############################################################### _setargs(speed_data['args']) speed_data_file.close() old_time = speed_data['how_long'] new_time = _time_sim_startup(script) percent_change = 100.0*(new_time-old_time)/old_time print "["+script+ ' startup] Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\ %(old_time,new_time,new_time-old_time,percent_change)
def compare_with_and_without_snapshot_LoadSnapshot(script="models/lissom.ty"): data_filename = os.path.split(script)[1] + "_PICKLETEST" snapshot_filename = os.path.split(script)[1] + "_PICKLETEST.typ_" locn = resolve_path(os.path.join('tests', data_filename)) print "Loading pickle from %s" % locn try: data = pickle.load(open(locn, "rb")) except IOError: print "\nData file '" + data_filename + "' could not be opened; run _A() first" raise # retrieve parameters used when script was run run_for = data['run_for'] break_at = data['break_at'] look_at = data['look_at'] # # CEBALERT: shouldn't need to re-list - should be able to read from data! # cortex_density=data['cortex_density'] # lgn_density=data['lgn_density'] # retina_density=data['retina_density'] # dims=data['dims'] # dataset=data['dataset'] from topo.command import load_snapshot locn = resolve_path(os.path.join('tests', snapshot_filename)) print "Loading snapshot at %s" % locn try: load_snapshot(locn) except IOError: print "\nPickle file '" + snapshot_filename + "' could not be opened; run _B() first." raise assert topo.sim.time() == break_at assert_array_equal(data[topo.sim.time()], topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d" % topo.sim.time()) print "Match at %s after loading snapshot" % topo.sim.time() topo.sim.run(run_for - break_at) assert_array_equal(data[topo.sim.time()], topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d" % topo.sim.time()) print "Match at %s after running loaded snapshot" % topo.sim.time()
def test_vertical_oddimage_evensheet__horizontal_evenimage_evensheet(self): """ Test vertical positioning for even sheet, odd image and horizontal positioning for even image, even sheet. """ image_array = np.array( [[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 0. , 34. , 68. , 102. , 136. , 255. , 0. , 0. ,], [ 0. , 34. , 68. , 102. , 136. , 255. , 255. , 0. ,], [ 0. , 34. , 68. , 102. , 136. , 255. , 255. , 255. ,], [ 255. , 0. , 255. , 0. , 255. , 0. , 255. , 0. ,], [ 0. , 255. , 0. , 255. , 0. , 255. , 0. , 255. ,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,]],dtype=np.float) image = FileImage(filename = resolve_path('topo/tests/unit/testimage.pgm'), xdensity=8, ydensity=8, bounds=BoundingBox(radius=0.5), output_fns=[]) ps = image.pattern_sampler ps.size_normalization='original' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def test_vertical_oddimage_evensheet__horizontal_evenimage_evensheet(self): """ Test vertical positioning for even sheet, odd image and horizontal positioning for even image, even sheet. """ image_array = array( [[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 0. , 34. , 68. , 102. , 136. , 255. , 0. , 0. ,], [ 0. , 34. , 68. , 102. , 136. , 255. , 255. , 0. ,], [ 0. , 34. , 68. , 102. , 136. , 255. , 255. , 255. ,], [ 255. , 0. , 255. , 0. , 255. , 0. , 255. , 0. ,], [ 0. , 255. , 0. , 255. , 0. , 255. , 0. , 255. ,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,]],Float) image = FileImage(filename = resolve_path('tests/testimage.pgm'), xdensity=8, ydensity=8, bounds=BoundingBox(radius=0.5), output_fns=[]) ps = image.pattern_sampler ps.size_normalization='original' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def open_location(self, locations): """ Try to open one of the specified locations in a new window of the default browser. See webbrowser module for more information. locations should be a tuple. """ # CB: could have been a list. This is only here because if locations is set # to a string, it will loop over the characters of the string. assert isinstance(locations,tuple),"locations must be a tuple." for location in locations: try: existing_location = resolve_path(location) webbrowser.open(existing_location,new=2,autoraise=True) self.messageBar.response('Opened local file '+existing_location+' in browser.') return ### except: pass for location in locations: if location.startswith('http'): try: webbrowser.open(location,new=2,autoraise=True) self.messageBar.response('Opened remote location '+location+' in browser.') return ### except: pass self.messageBar.response("Could not open any of %s in a browser."%locations)
def add_static_image(self,name,file_path): """ Construct a static image Plot (e.g. a color key for an Orientation Preference map). """ image = Image.open(resolve_path(file_path)) plot = Plot(image,name=name) self._static_plots.append(plot)
def load_snapshot(snapshot_name): """ Load the simulation stored in snapshot_name. """ # unpickling the PicklableClassAttributes() executes startup_commands and # sets PO class parameters. snapshot_name = param.resolve_path(snapshot_name) # If it's not gzipped, open as a normal file. try: snapshot = gzip.open(snapshot_name,'r') snapshot.read(1) snapshot.seek(0) except (IOError,NameError): snapshot = open(snapshot_name,'r') try: pickle.load(snapshot) except ImportError: # CEBALERT: Support snapshots where the unpickling support # (UnpickleEnvironmentCreator) cannot be found because the # support itself was moved from topo.command.basic to # topo.command.__init__! Was it a bad idea to have the support # code loaded via an object? sys.modules['topo.command.basic'] = topo.command # Could instead set find_global on cPickle.Unpickler (could # support all import changes that way, as alternative to what # we currently do), but I'm reluctant to mess with cPickle's # default way of finding things. (Also it would be specific to # cPickle; would be different for pickle.) snapshot.seek(0) try: pickle.load(snapshot) except: import traceback m = """ Snapshot could not be loaded. Please file a support request via topographica.org. Loading error: %s """%traceback.format_exc() param.Parameterized(name="load_snapshot").warning(m) snapshot.close() # Restore subplotting prefs without worrying if there is a # problem (e.g. if topo/analysis/ is not present) try: from topo.analysis.featureresponses import Subplotting Subplotting.restore_subplots() except: p = param.Parameterized(name="load_snapshot") p.message("Unable to restore Subplotting settings")
def basic_save_load_snapshot(self): """ Very basic test to check the activity matrix of a GeneratorSheet comes back ok, and that class attributes are pickled. """ assert topo.sim.name == SIM_NAME topo.sim['R'] = GeneratorSheet(input_generator=Gaussian(), nominal_density=2) topo.sim.run(1) R_act = copy.deepcopy(topo.sim['R'].activity) Line.x = 12.0 topo.sim.startup_commands.append("z=99") save_snapshot(SNAPSHOT_NAME) Line.x = 9.0 exec "z=88" in __main__.__dict__ topo.sim['R'].set_input_generator(Line()) topo.sim.run(1) load_snapshot( resolve_path(SNAPSHOT_NAME, search_paths=[normalize_path.prefix])) # CEBALERT: should also test that unpickling order is correct # (i.e. startup_commands, class attributes, simulation) assert_array_equal(R_act, topo.sim['R'].activity) self.assertEqual(Line.x, 12.0) self.assertEqual(__main__.__dict__['z'], 99)
def basic_save_load_snapshot(self): """ Very basic test to check the activity matrix of a GeneratorSheet comes back ok, and that class attributes are pickled. """ assert topo.sim.name == SIM_NAME topo.sim["R"] = GeneratorSheet(input_generator=Gaussian(), nominal_density=2) topo.sim.run(1) R_act = copy.deepcopy(topo.sim["R"].activity) Line.x = 12.0 topo.sim.startup_commands.append("z=99") save_snapshot(SNAPSHOT_NAME) Line.x = 9.0 exec "z=88" in __main__.__dict__ topo.sim["R"].set_input_generator(Line()) topo.sim.run(1) load_snapshot(resolve_path(SNAPSHOT_NAME, search_paths=[normalize_path.prefix])) # CEBALERT: should also test that unpickling order is correct # (i.e. startup_commands, class attributes, simulation) assert_array_equal(R_act, topo.sim["R"].activity) self.assertEqual(Line.x, 12.0) self.assertEqual(__main__.__dict__["z"], 99)
def load_snapshot(snapshot_name): """ Load the simulation stored in snapshot_name. """ # unpickling the PicklableClassAttributes() executes startup_commands and # sets PO class parameters. snapshot_name = param.resolve_path(snapshot_name) # If it's not gzipped, open as a normal file. try: snapshot = gzip.open(snapshot_name, 'r') snapshot.read(1) snapshot.seek(0) except (IOError, NameError): snapshot = open(snapshot_name, 'r') try: pickle.load(snapshot) except ImportError: # CEBALERT: Support snapshots where the unpickling support # (UnpickleEnvironmentCreator) cannot be found because the # support itself was moved from topo.command.basic to # topo.command.__init__! Was it a bad idea to have the support # code loaded via an object? sys.modules['topo.command.basic'] = topo.command # Could instead set find_global on cPickle.Unpickler (could # support all import changes that way, as alternative to what # we currently do), but I'm reluctant to mess with cPickle's # default way of finding things. (Also it would be specific to # cPickle; would be different for pickle.) snapshot.seek(0) try: pickle.load(snapshot) except: import traceback m = """ Snapshot could not be loaded. Please file a support request via topographica.org. Loading error: %s """ % traceback.format_exc() param.Parameterized(name="load_snapshot").warning(m) snapshot.close() # Restore subplotting prefs without worrying if there is a # problem (e.g. if topo/analysis/ is not present) try: from topo.analysis.featureresponses import Subplotting Subplotting.restore_subplots() except: p = param.Parameterized(name="load_snapshot") p.message("Unable to restore Subplotting settings")
def n_action(option,opt_str,value,parser): from IPython.html.notebookapp import NotebookApp sys.argv = ['notebook'] NotebookApp.ipython_dir = param.resolve_path('platform/ipython', path_to_file=False) NotebookApp.profile = 'topo' NotebookApp().launch_instance() global something_executed something_executed = True
def compare_with_and_without_snapshot_LoadSnapshot(script="models/lissom.ty"): data_filename=os.path.split(script)[1]+"_PICKLETEST" snapshot_filename=os.path.split(script)[1]+"_PICKLETEST.typ_" locn = resolve_path(os.path.join('tests',data_filename)) print "Loading pickle from %s"%locn try: data = pickle.load(open(locn,"rb")) except IOError: print "\nData file '"+data_filename+"' could not be opened; run _A() first" raise # retrieve parameters used when script was run run_for=data['run_for'] break_at=data['break_at'] look_at=data['look_at'] # # CEBALERT: shouldn't need to re-list - should be able to read from data! # cortex_density=data['cortex_density'] # lgn_density=data['lgn_density'] # retina_density=data['retina_density'] # dims=data['dims'] # dataset=data['dataset'] from topo.command import load_snapshot locn = resolve_path(os.path.join('tests',snapshot_filename)) print "Loading snapshot at %s"%locn try: load_snapshot(locn) except IOError: print "\nPickle file '"+snapshot_filename+"' could not be opened; run _B() first." raise assert topo.sim.time()==break_at assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d"%topo.sim.time()) print "Match at %s after loading snapshot"%topo.sim.time() topo.sim.run(run_for-break_at) assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d"%topo.sim.time()) print "Match at %s after running loaded snapshot"%topo.sim.time()
def __init__(self,dataset_name,**params): """ dataset_name is the path to a JSON file (https://docs.python.org/2/library/json.html) containing a description for a dataset. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. The JSON file should contain the following entries: :'name': Name of the dataset (string, default=basename(dataset_name)) :'length': Number of images in the dataset (integer, default=number of files in directory of dataset_name minus 1) :'description': Description of the dataset (string, default="") :'source': Citation of paper for which the dataset was created (string, default=name) :'filename_template': Path to the images with placeholders ({placeholder_name}) for inherent features and the image number, e.g. "filename_template": "images/image{i}.png" (default={current_image}.jpg) :'inherent_features': Dictionary specifying how to access inherent features; value is used in eval(). Currently, the label of the pattern generator ('pattern_label') as well as the image number ('current_image') are given as parameters to each callable supplied in inherent_features, where current_image varies from 0 to length-1 and pattern_label is one of the items of pattern_labels. (python code, default={'i': lambda params: '%02d' % (params['current_image']+1)} Example 1: Imagine having images without any inherent features named as follows: "images/image01.png", "images/image02.png" and so on. Then, filename_template: "images/image{i}.png" and "inherent_features": "{'i': lambda params: '%02d' % (params['current_image']+1)}" This replaces {i} in the template with the current image number + 1 Example 2: Imagine having image pairs from a stereo webcam named as follows: "images/image01_left.png", "images/image01_right.png" and so on. If pattern_labels=['Left','Right'], then filename_template: "images/image{i}_{dy}" and "inherent_features": "{'i': lambda params: '%02d' % (params['current_image']+1), 'dy':lambda params: 'left' if params['pattern_label']=='Left' else 'right'}" Here, additionally {dy} gets replaced by either 'left' if the pattern_label is 'Left' or 'right' otherwise """ filename=param.resolve_path(dataset_name) filepath=os.path.dirname(filename) dataset=json.loads(open(filename).read()) self.dataset_name=dataset.get('name', os.path.basename(dataset_name)) length = len([ f for f in os.listdir(filepath) if os.path.isfile(os.path.join(filepath,f)) ]) - 1 self.patterns_per_label=dataset.get('length', length) self.description=dataset.get('description', "") self.filename_template=dataset.get('filename_template', filepath+"/{i}.jpg") self.source=dataset.get('source', self.dataset_name) inherent_features=eval(dataset['inherent_features']) if 'inherent_features' in dataset else {'i': lambda params: '%02d' % (params['current_image']+1)} super(PatternCoordinatorImages, self).__init__(inherent_features,**params)
def __call__(self, source_file, ns={}, **kwargs): from topo.misc.commandline import global_params ns = ns if ns else self.ns for (key, val) in kwargs.items(): global_params.exec_in_context('%s=%s' % (key, val)) source_path = param.resolve_path(source_file) code = compile(open(source_path, 'r').read(), "<execution>", "exec") exec code in ns #globals and locals self.push(ns)
def __call__(self, source_file, ns={}, **kwargs): from topo.misc.commandline import global_params ns = ns if ns else self.ns for (key, val) in kwargs.items(): global_params.exec_in_context('%s=%s' % (key,val)) source_path = param.resolve_path(source_file) code = compile(open(source_path, 'r').read(), "<execution>", "exec") exec code in ns #globals and locals self.push(ns)
def test_fit_shortest(self): """ Test that the shorter dimension is made to fit 1.0, while the other is scaled by the same factor. """ ### 15 units represent 1.0 in sheet coordinates. image_array = np.array( [[ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,]]) image = FileImage(filename = resolve_path('topo/tests/unit/testimage.pgm'), xdensity=15, ydensity=15, output_fns=[], bounds=BoundingBox(radius=0.5)) ps = image.pattern_sampler ps.size_normalization='fit_shortest' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def test_fit_shortest(self): """ Test that the shorter dimension is made to fit 1.0, while the other is scaled by the same factor. """ ### 15 units represent 1.0 in sheet coordinates. image_array = array( [[ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 0., 0.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 34., 68., 68., 68., 102., 102., 102., 136., 136., 136., 255., 255., 255., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,], [ 255., 0., 0., 0., 255., 255., 255., 0., 0., 0., 255., 255., 255., 0., 0.,]]) image = FileImage(filename = resolve_path('tests/testimage.pgm'), xdensity=15, ydensity=15, output_fns=[], bounds=BoundingBox(radius=0.5)) ps = image.pattern_sampler ps.size_normalization='fit_shortest' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def n_action(option,opt_str,value,parser): args = [arg for arg in sys.argv[1:] if arg != opt_str] options, args = parser.parse_args(args) from IPython.html.notebookapp import NotebookApp sys.argv = ['notebook'] NotebookApp.ipython_dir = param.resolve_path('platform/ipython', path_to_file=False) NotebookApp.profile = 'topo' if options.IP is not None: NotebookApp.ip = options.IP if options.Port is not None: NotebookApp.port = options.Port NotebookApp().launch_instance() global something_executed something_executed = True
def n_action(option, opt_str, value, parser): args = [arg for arg in sys.argv[1:] if arg != opt_str] options, args = parser.parse_args(args) from IPython.html.notebookapp import NotebookApp sys.argv = ['notebook'] NotebookApp.ipython_dir = param.resolve_path('platform/ipython', path_to_file=False) NotebookApp.profile = 'topo' if options.IP is not None: NotebookApp.ip = options.IP if options.Port is not None: NotebookApp.port = options.Port NotebookApp().launch_instance() global something_executed something_executed = True
def setUp(self): """ Uses topo/tests/unit/testbitmap.jpg in the unit tests directory """ miata = Image.open(resolve_path('topo/tests/unit/testbitmap.jpg')) miata = miata.resize((miata.size[0] / 2, miata.size[1] / 2)) self.rIm, self.gIm, self.bIm = miata.split() self.rseq = self.rIm.getdata() self.gseq = self.gIm.getdata() self.bseq = self.bIm.getdata() self.rar = Numeric.array(self.rseq) self.gar = Numeric.array(self.gseq) self.bar = Numeric.array(self.bseq) self.ra = Numeric.reshape(self.rar, miata.size) / 255.0 self.ga = Numeric.reshape(self.gar, miata.size) / 255.0 self.ba = Numeric.reshape(self.bar, miata.size) / 255.0
def setUp(self): """ Uses topo/tests/unit/testbitmap.jpg in the unit tests directory """ miata = Image.open(resolve_path('topo/tests/unit/testbitmap.jpg')) miata = miata.resize((miata.size[0]/2,miata.size[1]/2)) self.rIm, self.gIm, self.bIm = miata.split() self.rseq = self.rIm.getdata() self.gseq = self.gIm.getdata() self.bseq = self.bIm.getdata() self.rar = np.array(self.rseq) self.gar = np.array(self.gseq) self.bar = np.array(self.bseq) self.ra = np.reshape(self.rar,miata.size) / 255.0 self.ga = np.reshape(self.gar,miata.size) / 255.0 self.ba = np.reshape(self.bar,miata.size) / 255.0
def load_snapshot(snapshot_name): """ Load the simulation stored in snapshot_name. """ # unpickling the PicklableClassAttributes() executes startup_commands and # sets PO class parameters. snapshot_name = param.resolve_path(snapshot_name) # If it's not gzipped, open as a normal file. try: snapshot = gzip.open(snapshot_name,'r') snapshot.read(1) snapshot.seek(0) except (IOError,NameError): snapshot = open(snapshot_name,'r') try: pickle.load(snapshot) except: import traceback m = """ Snapshot could not be loaded. Please file a support request via topographica.org. Loading error: %s """%traceback.format_exc() param.Parameterized(name="load_snapshot").warning(m) snapshot.close() # Restore subplotting prefs without worrying if there is a # problem (e.g. if topo/analysis/ is not present) try: from topo.analysis.featureresponses import Subplotting Subplotting.restore_subplots() except: p = param.Parameterized(name="load_snapshot") p.message("Unable to restore Subplotting settings")
def compare_with_and_without_snapshot_CreateSnapshot( script="models/lissom.ty"): data_filename = os.path.split(script)[1] + "_PICKLETEST" locn = resolve_path(os.path.join('tests', data_filename)) print "Loading pickle at %s" % locn try: data = pickle.load(open(locn, "rb")) except IOError: print "\nData file '" + data_filename + "' could not be opened; run _A() first." raise # retrieve parameters used when script was run run_for = data['run_for'] break_at = data['break_at'] look_at = data['look_at'] # CEBALERT: shouldn't need to re-list - should be able to read from data! cortex_density = data['cortex_density'] lgn_density = data['lgn_density'] retina_density = data['retina_density'] dims = data['dims'] dataset = data['dataset'] __main__.__dict__['cortex_density'] = cortex_density __main__.__dict__['lgn_density'] = lgn_density __main__.__dict__['retina_density'] = retina_density __main__.__dict__['dims'] = dims __main__.__dict__['dataset'] = dataset execfile(script, __main__.__dict__) # check we have the same before any pickling topo.sim.run(break_at) assert_array_equal(data[topo.sim.time()], topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d" % topo.sim.time()) from topo.command import save_snapshot locn = normalize_path(os.path.join('tests', data_filename + '.typ_')) print "Saving snapshot to %s" % locn save_snapshot(locn)
def compare_with_and_without_snapshot_CreateSnapshot(script="models/lissom.ty"): data_filename=os.path.split(script)[1]+"_PICKLETEST" locn = resolve_path(os.path.join('tests',data_filename)) print "Loading pickle at %s"%locn try: data = pickle.load(open(locn,"rb")) except IOError: print "\nData file '"+data_filename+"' could not be opened; run _A() first." raise # retrieve parameters used when script was run run_for=data['run_for'] break_at=data['break_at'] look_at=data['look_at'] # CEBALERT: shouldn't need to re-list - should be able to read from data! cortex_density=data['cortex_density'] lgn_density=data['lgn_density'] retina_density=data['retina_density'] dims=data['dims'] dataset=data['dataset'] __main__.__dict__['cortex_density']=cortex_density __main__.__dict__['lgn_density']=lgn_density __main__.__dict__['retina_density']=retina_density __main__.__dict__['dims']=dims __main__.__dict__['dataset']=dataset execfile(script,__main__.__dict__) # check we have the same before any pickling topo.sim.run(break_at) assert_array_equal(data[topo.sim.time()],topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d"%topo.sim.time()) from topo.command import save_snapshot locn = normalize_path(os.path.join('tests',data_filename+'.typ_')) print "Saving snapshot to %s"%locn save_snapshot(locn)
def __init__(self,dataset_name,**params): """ dataset_name is the path to a folder containing a MANIFEST_json (https://docs.python.org/2/library/json.html), which contains a description for a dataset. If no MANIFEST_json is present, all image files in the specified folder are used. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. The JSON file can contain any of the following entries, if an entry is not present, the default is used: :'dataset_name': Name of the dataset (string, default=filepath) :'length': Number of images in the dataset (integer, default=number of files in directory matching filename_template) :'description': Description of the dataset (string, default="") :'source': Citation of paper for which the dataset was created (string, default=name) :'filename_template': Path to the images with placeholders ({placeholder_name}) for inherent features and the image number, e.g. "filename_template": "images/image{i}.png". The placeholders are replaced according to placeholder_mapping. Alternatively, glob patterns such as * or ? can be used, e.g. "filename_template": "images/*.png" (default=path_to_dataset_name/*.*) :'placeholder_mapping': Dictionary specifying the replacement of placeholders in filename_template; value is used in eval() (default={}). :'inherent_features': Features for which the corresponding feature_coordinators should not be applied (default=['sf','or','cr']) Currently, the label of the pattern generator ('pattern_label') as well as the image number ('current_image') are given as parameters to each callable supplied in placeholder_mapping, where current_image varies from 0 to length-1 and pattern_label is one of the items of pattern_labels. (python code, default={'i': lambda params: '%02d' % (params['current_image']+1)} Example 1: Imagine having images without any inherent features named as follows: "images/image01.png", "images/image02.png" and so on. Then, filename_template: "images/image{i}.png" and "placeholder_mapping": "{'i': lambda params: '%02d' % (params['current_image']+1)}" This replaces {i} in the template with the current image number + 1 Example 2: Imagine having image pairs from a stereo webcam named as follows: "images/image01_left.png", "images/image01_right.png" and so on. If pattern_labels=['Left','Right'], then filename_template: "images/image{i}_{dy}" and "placeholder_mapping": "{'i': lambda params: '%02d' % (params['current_image']+1), 'dy':lambda params: 'left' if params['pattern_label']=='Left' else 'right'}" Here, additionally {dy} gets replaced by either 'left' if the pattern_label is 'Left' or 'right' otherwise If the directory does not contain a MANIFEST_json file, the defaults are as follows: :'filename_template': filepath/*.*, whereas filepath is the path given in dataset_name :'patterns_per_label': Number of image files in filepath, whereas filepath is the path given in dataset_name :'inherent_features': [] :'placeholder_mapping': {} """ filepath=param.resolve_path(dataset_name,path_to_file=False) self.dataset_name=filepath self.filename_template=filepath+"/*.*" self.description="" self.source=self.dataset_name self.placeholder_mapping={} patterns_per_label = len(glob.glob(self.filename_template)) inherent_features=['sf','cr'] try: filename=param.resolve_path(dataset_name+'/MANIFEST_json') filepath=os.path.dirname(filename) dataset=json.loads(open(filename).read()) self.dataset_name=dataset.get('dataset_name', self.dataset_name) self.description=dataset.get('description', self.description) self.filename_template=dataset.get('filename_template', self.filename_template) patterns_per_label=dataset.get('length', len(glob.glob(self.filename_template))) self.source=dataset.get('source', self.source) self.placeholder_mapping=(eval(dataset['placeholder_mapping']) if 'placeholder_mapping' in dataset else self.placeholder_mapping) inherent_features=dataset.get('inherent_features', inherent_features) except IOError: pass if 'patterns_per_label' not in params: params['patterns_per_label'] = self.patterns_per_label super(PatternCoordinatorImages, self).__init__(inherent_features,**params)
def test(plotgroup_names): import topo assert topo.sim.name==sim_name assert topo.sim['V1'].nominal_density==8 assert topo.sim.time()==100 failing_tests=[] for name in plotgroup_names: print "\n* Testing plotgroups['%s']:"%name sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() filename = resolve_path('tests/data_maptests/%s_t%s_%s.data'%(sim_name,topo.sim.timestr(), name.replace(' ','_'))) print "Reading previous results from %s" % (filename) f = open(filename,'r') try: topo_version,previous_views = pickle.load(f) ######################################## except AttributeError: # PRALERT: Code to allow loading of old data files after # boundingregion was moved to holoviews. import sys from holoviews.core import boundingregion sys.modules['imagen.boundingregion'] = boundingregion # CEBALERT: code here just to support old data file. Should # generate a new one so it's no longer necessary. from topo.misc.legacy import preprocess_state import topo.base.boundingregion def _boundingregion_not_parameterized(instance,state): for a in ['initialized', '_name_param_value', 'nopickle']: if a in state: del state[a] preprocess_state(topo.base.boundingregion.BoundingRegion, _boundingregion_not_parameterized) f.seek(0) topo_version,previous_views = pickle.load(f) ######################################## f.close() if 'sheet_views' in previous_views[sheet.name]: previous_sheet_views = previous_views[sheet.name]['sheet_views'] for view_name in previous_sheet_views: failing_tests += checkclose(sheet.name + " " + view_name,topo_version, sheet.views.Maps[view_name].last.data, previous_sheet_views[view_name].view()[0]) if 'curve_dict' in previous_views[sheet.name]: previous_curve_dicts = previous_views[sheet.name]['curve_dict'] # CB: need to cleanup var names e.g. val time, duration = (topo.sim.time(), 1.0) for curve_name in previous_curve_dicts: for other_param in previous_curve_dicts[curve_name]: other_param_val = unit_value(other_param)[-1] for val in previous_curve_dicts[curve_name][other_param]: new_curves = sheet.views.Curves[curve_name.capitalize()+"Tuning"] new_curves = new_curves.clone(key_dimensions=[d(values=[]) for d in new_curves.key_dimensions]) new = new_curves[time, duration, other_param_val-0.01:other_param_val+0.01, val].values()[0].data old = previous_curve_dicts[curve_name][other_param][val].view()[0] failing_tests += checkclose("%s %s %s %s" %(sheet.name,curve_name,other_param,val), topo_version, new, old) if failing_tests != []: raise AssertionError, "Failed map tests: %s" % (failing_tests)
if len(p.targets) == 0: # DEFAULT p.targets = [ 'unit', 'traintests', 'snapshots', 'gui', 'maptests' ] # maptests wouldn't be default except it's caught platform different problems before (there aren't enough unit tests!) # Allow allsnapshottests as shortcut for snapshots and pickle. # CEBALERT: should just combine these tests anyway. if "allsnapshottests" in p.targets: index = p.targets.index("allsnapshottests") p.targets[index] = "snapshots" p.targets.insert(index, "pickle") # ->params ? tests_dir = param.resolve_path("topo/tests", path_to_file=False) scripts_dir = param.resolve_path(".", path_to_file=False) ### XXX topographica_script = xvfb + " " + timing_cmd + coverage_cmd + " " + sys.argv[ 0] + " " + " " def _runc(cmd): print cmd return os.system(cmd) import topo.misc.keyedlist target = topo.misc.keyedlist.KeyedList() speedtarget = topo.misc.keyedlist.KeyedList() # CEBALERT: need to pick which scripts to include for traintests and
def compare_speed_data(script): """ Run and time script with the parameters specified when its SPEEDDATA file was generated, and check for changes. Looks for the SPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_DATA. If not found there, first generates a new SPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_DATA (i.e. to generate new data, delete the existing data before running). """ print "Comparing speed data for %s"%script script_name = os.path.basename(script) data_filename = os.path.join(MACHINETESTSDATADIR,script_name+"_SPEEDDATA") try: locn = resolve_path(data_filename) except IOError: print "No existing data" _run_in_forked_process(_generate_speed_data,script,data_filename,iterations=SPEEDTESTS_ITERATIONS,cortex_density=SPEEDTESTS_CORTEXDENSITY) #_generate_speed_data(script,data_filename,iterations=SPEEDTESTS_ITERATIONS,cortex_density=SPEEDTESTS_CORTEXDENSITY) locn = resolve_path(data_filename) print "Reading data from %s"%locn speed_data_file = open(locn,'r') try: speed_data = pickle.load(speed_data_file) print "Data from release=%s, version=%s"%(speed_data['versions'] if 'versions' in speed_data else ("unknown","unknown")) except: ############################################################### ## Support old data files (used to be string in the file rather ## than pickle) speed_data_file.seek(0) speed_data = speed_data_file.readline() iterations,old_time = speed_data.split('=') iterations = float(iterations); old_time=float(old_time) speed_data = {'iterations':iterations, 'how_long':old_time, 'args':{}} ############################################################### speed_data_file.close() old_time = speed_data['how_long'] iterations = speed_data['iterations'] args = speed_data['args'] _support_old_args(args) _setargs(args) new_time = _time_sim_run(script,iterations) percent_change = 100.0*(new_time-old_time)/old_time print "["+script+"]"+ ' Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\ %(old_time,new_time,new_time-old_time,percent_change)
TOPOGRAPHICAHOME = os.path.join(os.path.expanduser("~"),"topographica") TESTSDATADIR = os.path.join(TOPOGRAPHICAHOME,"tests") if not os.path.exists(TESTSDATADIR): os.makedirs(TESTSDATADIR) # While training data is usually checked into topo/tests and is the # same for all machines, speed data is generated by the machine # running this makefile. Therefore, speed data is stored in a # machine-specific directory. MACHINETESTSDATADIR = os.path.join(TESTSDATADIR,socket.gethostname()) if not os.path.exists(MACHINETESTSDATADIR): os.makedirs(MACHINETESTSDATADIR) FIXEDDATADIR = resolve_path("topo/tests",path_to_file=False) ###################################################################################### ### Support fns def _support_old_args(args): # support old data files which contain 'default_density', etc if 'default_density' in args: args['cortex_density']=args['default_density'] #del args['default_density'] if 'default_retina_density' in args: args['retina_density']=args['default_retina_density'] #del args['default_retina_density'] if 'default_lgn_density' in args:
def test_script(script,decimal=None): """ Run script with the parameters specified when its DATA file was generated, and check for changes. Looks for the DATA file at FIXEDDATADIR/script_name.ty_DATA (for data checked into SVN). If not found there, looks at TESTSDATADIR/script_name.ty_DATA. If also not found there, first generates a new DATA file at TESTSDATADIR/script_name.ty_DATA (i.e. to generate new data, delete the existing data before running). The decimal parameter defines how many decimal points to use when testing for array equality. The default of None causes exact matching. """ print "Comparing results for %s"%script script_name = os.path.basename(script) # CEBALERT: clean up data_filename_only = script_name+"_DATA" data_filename = os.path.join(TESTSDATADIR,data_filename_only) try: locn = resolve_path(data_filename_only,search_paths=[FIXEDDATADIR,TESTSDATADIR]) except IOError: print "No existing data" _run_in_forked_process(_generate_data,script,data_filename,run_for=RUN_FOR,cortex_density=TRAINTESTS_CORTEXDENSITY,lgn_density=LGN_DENSITY,retina_density=RETINA_DENSITY) locn = resolve_path(data_filename) print "Reading data from %s"%locn data_file = open(locn,'rb') data = pickle.load(data_file) print "Data from release=%s, version=%s"%(data['versions'] if 'versions' in data else ("unknown","unknown")) # retrieve parameters used when script was run run_for=data['run_for'] look_at = data['look_at'] #################################################### # support very old data files that contain 'density' instead of args['cortex_density'] if 'args' not in data: data['args']={'cortex_density' : data['density']} args = data['args'] _support_old_args(args) #################################################### _setargs(args) print "Starting '%s'"%script execfile(script,__main__.__dict__) ######################################################### time_fmt = topo.sim.timestr # support old pickled data (could replace time_fmt(topo.sim.time()) with # just topo.sim.timestr() if we didn't need to support old data if topo.sim.timestr(run_for[0]) not in data: time_fmt = float ######################################################### for time in run_for: print "Running for %s iterations"%time topo.sim.run(time) if decimal is None: assert_array_equal(data[time_fmt(topo.sim.time())],topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d, with decimal=%s"%(topo.sim.time(),decimal)) else: assert_array_almost_equal(data[time_fmt(topo.sim.time())],topo.sim[look_at].activity, decimal,err_msg="\nAt topo.sim.time()=%d, with decimal=%s"%(topo.sim.time(),decimal)) result = "Results from " + script + " have not changed." if decimal is not None: result+= " (%d dp)" % (decimal) print result+"\n"
xvfb = which('xvfb-run') if not xvfb: print "xvfb-run not found; any GUI components that are run will display windows" else: xvfb = xvfb + " -a" # Allow allsnapshottests as shortcut for snapshots and pickle. # CEBALERT: should just combine these tests anyway. if "allsnapshottests" in p.targets: index = p.targets.index("allsnapshottests") p.targets[index] = "snapshots" p.targets.insert(index,"pickle") # ->params ? tests_dir = param.resolve_path("topo/tests",path_to_file=False) scripts_dir = param.resolve_path(".",path_to_file=False) ### XXX topographica_script = xvfb + " " + timing_cmd + " " + sys.argv[0] + " " + " " def _runc(cmd): print cmd return os.system(cmd) from imagen.odict import OrderedDict target = OrderedDict() speedtarget = OrderedDict() # CEBALERT: need to pick which scripts to include for training and # speedtests and startupspeedtests (see test_script.py).
import Image import ImageDraw import ImageFont from colorsys import hsv_to_rgb import numpy.oldnumeric as Numeric import numpy import param from param import resolve_path # CEBALERT: can we just use load_default()? Do we even need TITLE_FONT # at all? try: import matplotlib _vera_path = resolve_path(os.path.join(matplotlib.__file__,'matplotlib/mpl-data/fonts/ttf/Vera.ttf')) TITLE_FONT = ImageFont.truetype(_vera_path,20) except: TITLE_FONT = ImageFont.load_default() ### JCALERT: To do: ### - Update the test file. ### - Write PaletteBitmap when the Palette class is fixed ### - Get rid of accessing function (copy, show...) (should we really?) class Bitmap(param.Parameterized): """ Wrapper class for the PIL Image class.
def test_stretch_to_fit(self): """ Test that both image dimensions are made to fit 1.0. """ ### 8 units represent 1.0 in sheet coordinates. image_array = np.array([[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 34., 68., 102., 136., 255., 0., 0., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 34., 68., 102., 136., 255., 0., 0., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 34., 68., 102., 136., 255., 255., 0., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 34., 68., 102., 136., 255., 255., 255., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 34., 68., 102., 136., 255., 255., 255., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 255., 0., 255., 0., 255., 0., 255., 0., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 255., 0., 255., 0., 255., 0., 255., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0., 255., 0., 255., 0., 255., 0., 255., 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ]]) image = FileImage( filename=resolve_path('topo/tests/unit/testimage.pgm'), xdensity=8, ydensity=8, output_fns=[], bounds=BoundingBox(radius=1.0)) ps = image.pattern_sampler ps.size_normalization = 'stretch_to_fit' ps.whole_pattern_output_fns = [] assert_array_almost_equal(image_array, image())
def compare_speed_data(script): """ Run and time script with the parameters specified when its SPEEDDATA file was generated, and check for changes. Looks for the SPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_DATA. If not found there, first generates a new SPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_DATA (i.e. to generate new data, delete the existing data before running). """ print "Comparing speed data for %s" % script script_name = os.path.basename(script) data_filename = os.path.join(MACHINETESTSDATADIR, script_name + "_SPEEDDATA") try: locn = resolve_path(data_filename) except IOError: print "No existing data" _run_in_forked_process(_generate_speed_data, script, data_filename, iterations=SPEEDTESTS_ITERATIONS, cortex_density=SPEEDTESTS_CORTEXDENSITY) #_generate_speed_data(script,data_filename,iterations=SPEEDTESTS_ITERATIONS,cortex_density=SPEEDTESTS_CORTEXDENSITY) locn = resolve_path(data_filename) print "Reading data from %s" % locn speed_data_file = open(locn, 'r') try: speed_data = pickle.load(speed_data_file) print "Data from release=%s, version=%s" % ( speed_data['versions'] if 'versions' in speed_data else ("unknown", "unknown")) except: ############################################################### ## Support old data files (used to be string in the file rather ## than pickle) speed_data_file.seek(0) speed_data = speed_data_file.readline() iterations, old_time = speed_data.split('=') iterations = float(iterations) old_time = float(old_time) speed_data = { 'iterations': iterations, 'how_long': old_time, 'args': {} } ############################################################### speed_data_file.close() old_time = speed_data['how_long'] iterations = speed_data['iterations'] args = speed_data['args'] _support_old_args(args) _setargs(args) new_time = _time_sim_run(script, iterations) percent_change = 100.0 * (new_time - old_time) / old_time print "["+script+"]"+ ' Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\ %(old_time,new_time,new_time-old_time,percent_change)
def test(plotgroup_names): import topo import param assert topo.sim.name==sim_name assert topo.sim['V1'].nominal_density==8 assert topo.sim.time()==100 failing_tests=[] for name in plotgroup_names: print "\n* Testing plotgroups['%s']:"%name sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() filename = resolve_path('tests/data_maptests/%s_t%s_%s.data'%(sim_name,topo.sim.timestr(), name.replace(' ','_'))) print "Reading previous results from %s" % (filename) f = open(filename,'r') try: topo_version,previous_views = pickle.load(f) ######################################## except AttributeError: # CEBALERT: code here just to support old data file. Should # generate a new one so it's no longer necessary. from topo.misc.legacy import preprocess_state import topo.base.boundingregion def _boundingregion_not_parameterized(instance,state): for a in ['initialized', '_name_param_value', 'nopickle']: if a in state: del state[a] preprocess_state(topo.base.boundingregion.BoundingRegion, _boundingregion_not_parameterized) f.seek(0) topo_version,previous_views = pickle.load(f) ######################################## f.close() if 'sheet_views' in previous_views[sheet.name]: previous_sheet_views = previous_views[sheet.name]['sheet_views'] for view_name in previous_sheet_views: failing_tests += checkclose(sheet.name + " " + view_name,topo_version, sheet.sheet_views[view_name].view()[0], previous_sheet_views[view_name].view()[0]) if 'curve_dict' in previous_views[sheet.name]: previous_curve_dicts = previous_views[sheet.name]['curve_dict'] # CB: need to cleanup var names e.g. val for curve_name in previous_curve_dicts: for other_param in previous_curve_dicts[curve_name]: for val in previous_curve_dicts[curve_name][other_param]: failing_tests += checkclose("%s %s %s %s" %(sheet.name,curve_name,other_param,val),topo_version, sheet.curve_dict[curve_name][other_param][val].view()[0], previous_curve_dicts[curve_name][other_param][val].view()[0]) if failing_tests != []: raise AssertionError, "Failed map tests: %s" % (failing_tests)
import param from param import resolve_path, normalize_path import topo from nose.tools import nottest # While training data is usually checked into topo/tests and is the # same for all machines, speed data is generated by the machine # running this makefile. Therefore, speed data is stored in a # machine-specific directory. TOPOGRAPHICAHOME = param.normalize_path.prefix TESTSDATADIR = os.path.join(TOPOGRAPHICAHOME, "tests") MACHINETESTSDATADIR = os.path.join(TESTSDATADIR, socket.gethostname()) FIXEDDATADIR = resolve_path("topo/tests/data_traintests", path_to_file=False) ###################################################################################### ### Support fns def ensure_path_exists(path): """Force the specified path to exist if it doesn't yet.""" if not os.path.exists(path): os.makedirs(path) def _support_old_args(args): # support old data files which contain 'default_density', etc if 'default_density' in args: args['cortex_density'] = args['default_density']
import topo from nose.tools import nottest import cProfile # While training data is usually checked into topo/tests and is the # same for all machines, speed data is generated by the machine # running this makefile. Therefore, speed data is stored in a # machine-specific directory. TOPOGRAPHICAHOME = param.normalize_path.prefix TESTSDATADIR = os.path.join(TOPOGRAPHICAHOME,"tests") MACHINETESTSDATADIR = os.path.join(TESTSDATADIR,socket.gethostname()) FIXEDDATADIR = resolve_path("topo/tests/data_traintests",path_to_file=False) GPUDATADIR = resolve_path("topo/tests/data_gputests", path_to_file=False) ###################################################################################### ### Support fns def ensure_path_exists(path): """Force the specified path to exist if it doesn't yet.""" if not os.path.exists(path): os.makedirs(path) def _support_old_args(args): # support old data files which contain 'default_density', etc if 'default_density' in args:
import paramtk as tk from collections import OrderedDict import topo from topo.plotting.plotgroup import plotgroups, FeatureCurvePlotGroup from topo.misc.commandline import sim_name_from_filename import topo.misc.genexamples import topo.command import topo.tkgui from templateplotgrouppanel import TemplatePlotGroupPanel from featurecurvepanel import FeatureCurvePanel from projectionpanel import SheetPanel,CFProjectionPanel,ProjectionActivityPanel,ConnectionFieldsPanel,RFProjectionPanel from testpattern import TestPattern from editor import ModelEditor tk.AppWindow.window_icon_path = resolve_path('tkgui/icons/topo.xbm') SCRIPT_FILETYPES = [('Topographica scripts','*.ty'), ('Python scripts','*.py'), ('All files','*')] SAVED_FILE_EXTENSION = '.typ' SAVED_FILETYPES = [('Topographica saved networks', '*'+SAVED_FILE_EXTENSION), ('All files','*')] turl = "http://topographica.org/" userman = "User_Manual/index.html" tuts = "Tutorials/index.html" refman = "Reference_Manual/index.html" plotman = "User_Manual/plotting.html"
def test_fit_longest(self): """ Check that the longer image dimension is made to fit the default dimension of 1.0, while the other is scaled the same. """ ### Twice the default BoundingBox dimensions, image size of 2.0. ### In this case, 8 units represent 1.0 in sheet coordinates. image_array = array( [[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 0. , 0. , 0. , 0. ,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 0. , 0. , 0. , 0. ,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 255. , 255. , 0. , 0. ,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 255. , 255. , 0. , 0. ,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 255. , 255. , 255. , 255. ,], [ 0. , 0. , 34. , 34. , 68. , 68. , 102. , 102. , 136. , 136. , 255. , 255. , 255. , 255. , 255. , 255. ,], [ 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. ,], [ 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. ,], [ 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. ,], [ 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. , 0. , 0. , 255. , 255. ,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,]],Float) image = FileImage(filename = resolve_path('tests/testimage.pgm'), xdensity=8, ydensity=8, size=2.0, output_fns=[], bounds=BoundingBox(radius=1.0)) ps = image.pattern_sampler ps.size_normalization='fit_longest' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def test(plotgroup_names): import topo import param assert topo.sim.name==sim_name assert topo.sim['V1'].nominal_density==8 assert topo.sim.time()==100 for name in plotgroup_names: print "\n* Testing plotgroups['%s']:"%name sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() f = open(resolve_path('tests/%s_t%s_%s.data'%(sim_name,topo.sim.timestr(), name.replace(' ','_'))),'r') try: topo_version,previous_views = pickle.load(f) ######################################## except AttributeError: # CEBALERT: code here just to support old data file. Should # generate a new one so it's no longer necessary. from topo.misc.legacy import preprocess_state import topo.base.boundingregion def _boundingregion_not_parameterized(instance,state): for a in ['initialized', '_name_param_value', 'nopickle']: if a in state: del state[a] preprocess_state(topo.base.boundingregion.BoundingRegion, _boundingregion_not_parameterized) f.seek(0) topo_version,previous_views = pickle.load(f) ######################################## f.close() if 'sheet_views' in previous_views[sheet.name]: previous_sheet_views = previous_views[sheet.name]['sheet_views'] for view_name in previous_sheet_views: assert_array_almost_equal(sheet.sheet_views[view_name].view()[0], previous_sheet_views[view_name].view()[0], 12) print '...'+view_name+' array is unchanged since data was generated (%s)'%topo_version if 'curve_dict' in previous_views[sheet.name]: previous_curve_dicts = previous_views[sheet.name]['curve_dict'] # CB: need to cleanup var names e.g. val for curve_name in previous_curve_dicts: for other_param in previous_curve_dicts[curve_name]: for val in previous_curve_dicts[curve_name][other_param]: assert_array_almost_equal(sheet.curve_dict[curve_name][other_param][val].view()[0], previous_curve_dicts[curve_name][other_param][val].view()[0], 12) print "...%s %s %s array is unchanged since data was generated (%s)"%(curve_name,other_param,val,topo_version)
def test_fit_longest(self): """ Check that the longer image dimension is made to fit the default dimension of 1.0, while the other is scaled the same. """ ### Twice the default BoundingBox dimensions, image size of 2.0. ### In this case, 8 units represent 1.0 in sheet coordinates. image_array = np.array([[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 0., 0., 0., 0., ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 0., 0., 0., 0., ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 255., 255., 0., 0., ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 255., 255., 0., 0., ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 255., 255., 255., 255., ], [ 0., 0., 34., 34., 68., 68., 102., 102., 136., 136., 255., 255., 255., 255., 255., 255., ], [ 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., ], [ 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., ], [ 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., ], [ 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., 0., 0., 255., 255., ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, ]], dtype=np.float) image = FileImage( filename=resolve_path('topo/tests/unit/testimage.pgm'), xdensity=8, ydensity=8, size=2.0, output_fns=[], bounds=BoundingBox(radius=1.0)) ps = image.pattern_sampler ps.size_normalization = 'fit_longest' ps.whole_pattern_output_fns = [] assert_array_almost_equal(image_array, image())
def test_stretch_to_fit(self): """ Test that both image dimensions are made to fit 1.0. """ ### 8 units represent 1.0 in sheet coordinates. image_array = array( [[ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 34. , 68. , 102. , 136. , 255. , 0. , 0. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 34. , 68. , 102. , 136. , 255. , 0. , 0. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 34. , 68. , 102. , 136. , 255. , 255. , 0. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 34. , 68. , 102. , 136. , 255. , 255. , 255. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 34. , 68. , 102. , 136. , 255. , 255. , 255. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 255. , 0. , 255. , 0. , 255. , 0. , 255. , 0. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 255. , 0. , 255. , 0. , 255. , 0. , 255. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 0. , 255. , 0. , 255. , 0. , 255. , 0. , 255. , 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,], [ 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909, 96.59090909,]]) image = FileImage(filename = resolve_path('tests/testimage.pgm'), xdensity=8, ydensity=8, output_fns=[], bounds=BoundingBox(radius=1.0)) ps = image.pattern_sampler ps.size_normalization='stretch_to_fit' ps.whole_pattern_output_fns=[] assert_array_almost_equal(image_array,image())
def test(plotgroup_names): import topo assert topo.sim.name == sim_name assert topo.sim['V1'].nominal_density == 8 assert topo.sim.time() == 100 failing_tests = [] for name in plotgroup_names: print "\n* Testing plotgroups['%s']:" % name sheet = topo.sim['V1'] _reset_views(sheet) plotgroups[name]._exec_pre_plot_hooks() filename = resolve_path( 'tests/data_maptests/%s_t%s_%s.data' % (sim_name, topo.sim.timestr(), name.replace(' ', '_'))) print "Reading previous results from %s" % (filename) f = open(filename, 'r') try: topo_version, previous_views = pickle.load(f) ######################################## except AttributeError: # CEBALERT: code here just to support old data file. Should # generate a new one so it's no longer necessary. from topo.misc.legacy import preprocess_state import topo.base.boundingregion def _boundingregion_not_parameterized(instance, state): for a in ['initialized', '_name_param_value', 'nopickle']: if a in state: del state[a] preprocess_state(topo.base.boundingregion.BoundingRegion, _boundingregion_not_parameterized) f.seek(0) topo_version, previous_views = pickle.load(f) ######################################## f.close() if 'sheet_views' in previous_views[sheet.name]: previous_sheet_views = previous_views[sheet.name]['sheet_views'] for view_name in previous_sheet_views: failing_tests += checkclose( sheet.name + " " + view_name, topo_version, sheet.views.maps[view_name].top.data, previous_sheet_views[view_name].view()[0]) if 'curve_dict' in previous_views[sheet.name]: previous_curve_dicts = previous_views[sheet.name]['curve_dict'] # CB: need to cleanup var names e.g. val time, duration = (topo.sim.time(), 1.0) for curve_name in previous_curve_dicts: for other_param in previous_curve_dicts[curve_name]: other_param_val = unit_value(other_param)[-1] for val in previous_curve_dicts[curve_name][other_param]: new_curves = sheet.views.curves[ curve_name.capitalize()] new = new_curves[time, duration, other_param_val - 0.01:other_param_val + 0.01, val].values()[0].data old = previous_curve_dicts[curve_name][other_param][ val].view()[0] failing_tests += checkclose( "%s %s %s %s" % (sheet.name, curve_name, other_param, val), topo_version, new, old) if failing_tests != []: raise AssertionError, "Failed map tests: %s" % (failing_tests)
def test_script(script, decimal=None): """ Run script with the parameters specified when its DATA file was generated, and check for changes. Looks for the DATA file at FIXEDDATADIR/script_name.ty_DATA (for data checked into SVN). If not found there, looks at TESTSDATADIR/script_name.ty_DATA. If also not found there, first generates a new DATA file at TESTSDATADIR/script_name.ty_DATA (i.e. to generate new data, delete the existing data before running). The decimal parameter defines how many decimal points to use when testing for array equality. The default of None causes exact matching. """ print "Comparing results for %s" % script script_name = os.path.basename(script) # CEBALERT: clean up ensure_path_exists(TESTSDATADIR) data_filename_only = script_name + "_DATA" data_filename = os.path.join(TESTSDATADIR, data_filename_only) try: locn = resolve_path(data_filename_only, search_paths=[FIXEDDATADIR, TESTSDATADIR]) except IOError: print "No existing data" #_run_in_forked_process(_generate_data,script,data_filename,run_for=RUN_FOR,cortex_density=TRAINTESTS_CORTEXDENSITY,lgn_density=LGN_DENSITY, retina_density=RETINA_DENSITY) _generate_data(script, data_filename, run_for=RUN_FOR, cortex_density=TRAINTESTS_CORTEXDENSITY, lgn_density=LGN_DENSITY, retina_density=RETINA_DENSITY) locn = resolve_path(data_filename) print "Reading data from %s" % locn data_file = open(locn, 'rb') data = pickle.load(data_file) print "Data from release=%s, version=%s" % (data['versions'] if 'versions' in data else ("unknown", "unknown")) # retrieve parameters used when script was run run_for = data['run_for'] look_at = data['look_at'] #################################################### # support very old data files that contain 'density' instead of args['cortex_density'] if 'args' not in data: data['args'] = {'cortex_density': data['density']} args = data['args'] _support_old_args(args) #################################################### _setargs(args) print "Starting '%s'" % script execfile(script, __main__.__dict__) ######################################################### time_fmt = topo.sim.timestr # support old pickled data (could replace time_fmt(topo.sim.time()) with # just topo.sim.timestr() if we didn't need to support old data if topo.sim.timestr(run_for[0]) not in data: time_fmt = float ######################################################### for time in run_for: print "Running for %s iterations" % time topo.sim.run(time) if decimal is None: assert_array_equal( data[time_fmt(topo.sim.time())], topo.sim[look_at].activity, err_msg="\nAt topo.sim.time()=%d, with decimal=%s" % (topo.sim.time(), decimal)) else: assert_array_almost_equal( data[time_fmt(topo.sim.time())], topo.sim[look_at].activity, decimal, err_msg="\nAt topo.sim.time()=%d, with decimal=%s" % (topo.sim.time(), decimal)) result = "Results from " + script + " have not changed." if decimal is not None: result += " (%d dp)" % (decimal) print result + "\n"
import os import Image import ImageDraw import ImageFont from colorsys import hsv_to_rgb import numpy as np import param from param import resolve_path # CEBALERT: can we just use load_default()? Do we even need TITLE_FONT # at all? try: import matplotlib _vera_path = resolve_path( os.path.join(matplotlib.__file__, 'matplotlib/mpl-data/fonts/ttf/Vera.ttf')) TITLE_FONT = ImageFont.truetype(_vera_path, 20) except: TITLE_FONT = ImageFont.load_default() ### JCALERT: To do: ### - Update the test file. ### - Write PaletteBitmap when the Palette class is fixed ### - Get rid of accessing function (copy, show...) (should we really?) class Bitmap(param.Parameterized): """ Wrapper class for the PIL Image class.
def compare_startup_speed_data(script): """ Run and time script with the parameters specified when its STARTUPSPEEDDATA file was generated, and check for changes. Looks for the STARTUPSPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA. If not found there, first generates a new STARTUPSPEEDDATA file at MACHINETESTSDATADIR/script_name.ty_STARTUPSPEEDDATA (i.e. to generate new data, delete the existing data before running). """ script = script.replace("\\", "\\\\") print "Comparing startup speed data for %s" % script script_name = os.path.basename(script) ensure_path_exists(MACHINETESTSDATADIR) data_filename = os.path.join(MACHINETESTSDATADIR, script_name + "_STARTUPSPEEDDATA") try: locn = resolve_path(data_filename) except IOError: print "No existing data" #_run_in_forked_process(_generate_startup_speed_data,script,data_filename,cortex_density=SPEEDTESTS_CORTEXDENSITY) _generate_startup_speed_data(script, data_filename, cortex_density=SPEEDTESTS_CORTEXDENSITY) locn = resolve_path(data_filename) print "Reading data from %s" % locn speed_data_file = open(locn, 'r') try: speed_data = pickle.load(speed_data_file) print "Data from release=%s, version=%s" % ( speed_data['versions'] if 'versions' in speed_data else ("unknown", "unknown")) except: ############################################################### ## Support old data files (used to be string in the file rather ## than pickle) speed_data_file.seek(0) speed_data = speed_data_file.readline() density, old_time = speed_data.split('=') speed_data = { 'cortex_density': float(density), 'how_long': float(old_time), 'args': {} } _support_old_args(speed_data['args']) ############################################################### _setargs(speed_data['args']) speed_data_file.close() old_time = speed_data['how_long'] new_time = _time_sim_startup(script) percent_change = 100.0 * (new_time - old_time) / old_time print "["+script+ ' startup] Before: %2.1f s Now: %2.1f s (change=%2.1f s, %2.1f percent)'\ %(old_time,new_time,new_time-old_time,percent_change)
def __init__(self, dataset_name, **params): """ dataset_name is the path to a folder containing a MANIFEST_json (https://docs.python.org/2/library/json.html), which contains a description for a dataset. If no MANIFEST_json is present, all image files in the specified folder are used. Any extra parameter values supplied here will be passed down to the feature_coordinators requested in features_to_vary. The JSON file can contain any of the following entries, if an entry is not present, the default is used: :'dataset_name': Name of the dataset (string, default=filepath) :'length': Number of images in the dataset (integer, default=number of files in directory matching filename_template) :'description': Description of the dataset (string, default="") :'source': Citation of paper for which the dataset was created (string, default=name) :'filename_template': Path to the images with placeholders ({placeholder_name}) for inherent features and the image number, e.g. "filename_template": "images/image{i}.png". The placeholders are replaced according to placeholder_mapping. Alternatively, glob patterns such as * or ? can be used, e.g. "filename_template": "images/*.png" (default=path_to_dataset_name/*.*) :'placeholder_mapping': Dictionary specifying the replacement of placeholders in filename_template; value is used in eval() (default={}). :'inherent_features': Features for which the corresponding feature_coordinators should not be applied (default=['sf','or','cr']) Currently, the label of the pattern generator ('pattern_label') as well as the image number ('current_image') are given as parameters to each callable supplied in placeholder_mapping, where current_image varies from 0 to length-1 and pattern_label is one of the items of pattern_labels. (python code, default={'i': lambda params: '%02d' % (params['current_image']+1)} Example 1: Imagine having images without any inherent features named as follows: "images/image01.png", "images/image02.png" and so on. Then, filename_template: "images/image{i}.png" and "placeholder_mapping": "{'i': lambda params: '%02d' % (params['current_image']+1)}" This replaces {i} in the template with the current image number + 1 Example 2: Imagine having image pairs from a stereo webcam named as follows: "images/image01_left.png", "images/image01_right.png" and so on. If pattern_labels=['Left','Right'], then filename_template: "images/image{i}_{dy}" and "placeholder_mapping": "{'i': lambda params: '%02d' % (params['current_image']+1), 'dy':lambda params: 'left' if params['pattern_label']=='Left' else 'right'}" Here, additionally {dy} gets replaced by either 'left' if the pattern_label is 'Left' or 'right' otherwise If the directory does not contain a MANIFEST_json file, the defaults are as follows: :'filename_template': filepath/*.*, whereas filepath is the path given in dataset_name :'patterns_per_label': Number of image files in filepath, whereas filepath is the path given in dataset_name :'inherent_features': [] :'placeholder_mapping': {} """ filepath = param.resolve_path(dataset_name, path_to_file=False) self.dataset_name = filepath self.filename_template = filepath + "/*.*" self.description = "" self.source = self.dataset_name self.placeholder_mapping = {} patterns_per_label = len(glob.glob(self.filename_template)) inherent_features = ['sf', 'cr'] try: filename = param.resolve_path(dataset_name + '/MANIFEST_json') filepath = os.path.dirname(filename) dataset = json.loads(open(filename).read()) self.dataset_name = dataset.get('dataset_name', self.dataset_name) self.description = dataset.get('description', self.description) self.filename_template = dataset.get('filename_template', self.filename_template) patterns_per_label = dataset.get( 'length', len(glob.glob(self.filename_template))) self.source = dataset.get('source', self.source) self.placeholder_mapping = (eval(dataset['placeholder_mapping']) if 'placeholder_mapping' in dataset else self.placeholder_mapping) inherent_features = dataset.get('inherent_features', inherent_features) except IOError: pass if 'patterns_per_label' not in params: params['patterns_per_label'] = self.patterns_per_label super(PatternCoordinatorImages, self).__init__(inherent_features, **params)