Esempio n. 1
0
def load_snapshot(snapshot_name):
    """
    Load the simulation stored in snapshot_name.
    """
    # unpickling the PicklableClassAttributes() executes startup_commands and
    # sets PO class parameters.

    snapshot_name = param.resolve_path(snapshot_name)

    # If it's not gzipped, open as a normal file.
    try:
        snapshot = gzip.open(snapshot_name, 'r')
        snapshot.read(1)
        snapshot.seek(0)
    except (IOError, NameError):
        snapshot = open(snapshot_name, 'r')

    try:
        pickle.load(snapshot)
    except ImportError:
        # CEBALERT: Support snapshots where the unpickling support
        # (UnpickleEnvironmentCreator) cannot be found because the
        # support itself was moved from topo.command.basic to
        # topo.command.__init__! Was it a bad idea to have the support
        # code loaded via an object?
        sys.modules['topo.command.basic'] = topo.command
        # Could instead set find_global on cPickle.Unpickler (could
        # support all import changes that way, as alternative to what
        # we currently do), but I'm reluctant to mess with cPickle's
        # default way of finding things. (Also it would be specific to
        # cPickle; would be different for pickle.)

        snapshot.seek(0)
        try:
            pickle.load(snapshot)
        except:
            import traceback

            m = """
            Snapshot could not be loaded.

            Please file a support request via topographica.org.

Loading error:
%s
            """ % traceback.format_exc()

            param.Parameterized(name="load_snapshot").warning(m)

    snapshot.close()

    # Restore subplotting prefs without worrying if there is a
    # problem (e.g. if topo/analysis/ is not present)
    try:
        from topo.analysis.featureresponses import Subplotting
        Subplotting.restore_subplots()
    except:
        p = param.Parameterized(name="load_snapshot")
        p.message("Unable to restore Subplotting settings")
Esempio n. 2
0
    def setUp(self):

        self.some_pos = [param.Parameterized(),param.Parameterized(),
                         param.Parameterized()]
        
        self.testpo1 = TestPO()
        self.testpo1.params()['osp'].objects = self.some_pos

        self.testpo2 = TestPO()
        self.testpo1.params()['osp'].objects = self.some_pos
        
        self.toplevel = Tkinter.Toplevel()
        self.f = tk.ParametersFrameWithApply(self.toplevel,self.testpo1)
Esempio n. 3
0
def matrix_hsv_to_rgb(hMapArray, sMapArray, vMapArray):
    """
    First matrix sets the Hue (Color).
    Second marix sets the Sauration (How much color)
    Third matrix sets the Value (How bright the pixel will be)

    The three input matrices should all be the same size, and have
    been normalized to 1.  There should be no side-effects on the
    original input matrices.
    """

    shape = hMapArray.shape
    rmat = array(hMapArray, Float)
    gmat = array(sMapArray, Float)
    bmat = array(vMapArray, Float)

    ## This code should never be seen.  It means that calling code did
    ## not take the precaution of clipping the input matrices.
    if max(rmat.ravel()) > 1 or max(gmat.ravel()) > 1 or max(bmat.ravel()) > 1:
        param.Parameterized().warning(
            'HSVBitmap inputs exceed 1. Clipping to 1.0')
        if max(rmat.ravel()) > 0: rmat = clip(rmat, 0.0, 1.0)
        if max(gmat.ravel()) > 0: gmat = clip(gmat, 0.0, 1.0)
        if max(bmat.ravel()) > 0: bmat = clip(bmat, 0.0, 1.0)

    # List comprehensions were not used because they were slower.
    for j in range(shape[0]):
        for i in range(shape[1]):
            rgb = hsv_to_rgb(rmat[j, i], gmat[j, i], bmat[j, i])
            rmat[j, i] = rgb[0]
            gmat[j, i] = rgb[1]
            bmat[j, i] = rgb[2]

    return (rmat, gmat, bmat)
Esempio n. 4
0
def make_template_plot(channels,
                       sheet_views,
                       density=None,
                       plot_bounding_box=None,
                       normalize='None',
                       name='None',
                       range_=False):
    """
     Factory function for constructing a Plot object whose type is not yet known.

     Typically, a TemplatePlot will be constructed through this call, because
     it selects the appropriate type automatically, rather than calling
     one of the Plot subclasses automatically.  See TemplatePlot.__init__ for
     a description of the arguments.
     """
    if _sane_plot_data(channels, sheet_views):
        plot_types = [SHCPlot, RGBPlot, PalettePlot, MultiOrPlot]
        for pt in plot_types:
            plot = pt(channels,
                      sheet_views,
                      density,
                      plot_bounding_box,
                      normalize,
                      name=name,
                      range_=range_)
            if plot.bitmap is not None or range_ is None:
                # range_ is None means we're calculating the range
                return plot

    param.Parameterized(name="make_template_plot").verbose(
        'No', name, 'plot constructed for this Sheet')
    return None
Esempio n. 5
0
    def install(snapshot_release,snapshot_version=None):
        snapshot_version = _get_version(snapshot_release,snapshot_version)

        param.Parameterized().debug("Snapshot is from release %s (r%s)"%(snapshot_release,snapshot_version))

        SnapshotSupport.apply_external_patches()
        SnapshotSupport.apply_support(snapshot_version)
def _get_version(snapshot_release, snapshot_version):

    found_version = False

    if snapshot_version is not None:
        try:  # to detect the passed version
            snapshot_version = snapshot_version.split(":")[0]
            snapshot_version = snapshot_version.split("M")[0]
        except AttributeError:  # the version is a tuple, thus it's from git
            snapshot_version = "%02d%02d%02d%05d" % snapshot_version

        if len(snapshot_version) > 0:
            try:
                snapshot_version = int(snapshot_version)
                found_version = True
            except ValueError:
                pass

    if not found_version:
        snapshot_version = releases[snapshot_release]
        param.Parameterized().debug(
            "No version could be detected for this snapshot; assuming version of release %s (i.e. %s)."
            % (snapshot_release, snapshot_version))

    return snapshot_version
Esempio n. 7
0
def test_server_session_info():
    with config.set(session_history=-1):
        html = Markdown('# Title')

        server = serve(html, port=5009, threaded=True, show=False)

        # Wait for server to start
        time.sleep(1)

        requests.get("http://localhost:5009/")

        assert state.session_info['total'] == 1
        assert len(state.session_info['sessions']) == 1
        sid, session = list(state.session_info['sessions'].items())[0]
        assert session['user_agent'].startswith('python-requests')
        assert state.session_info['live'] == 0

        doc = list(html._documents.keys())[0]
        session_context = param.Parameterized()
        session_context._document = doc
        session_context.id = sid
        doc._session_context = session_context
        state.curdoc = doc
        state._init_session(None)
        assert state.session_info['live'] == 1

    server.stop()
    state.curdoc = None
    html._server_destroy(session_context)
    assert state.session_info['live'] == 0
Esempio n. 8
0
def test_template_session_destroy(document, comm):
    tmplt = Template(template)

    widget = FloatSlider()
    row = Row('A', 'B')

    tmplt.add_panel('A', widget)
    tmplt.add_panel('B', row)

    tmplt._init_doc(document, comm, notebook=True)
    session_context = param.Parameterized()
    session_context._document = document
    session_context.id = 'Some ID'

    assert len(widget._models) == 2
    assert len(row._models) == 2
    assert len(row[0]._models) == 2
    assert len(row[1]._models) == 2

    for cb in document.session_destroyed_callbacks:
        cb(session_context)

    assert len(widget._models) == 0
    assert len(row._models) == 0
    assert len(row[0]._models) == 0
    assert len(row[1]._models) == 0
Esempio n. 9
0
 def find_module(self, fullname, path=None):
     if fullname == self.fullname:
         param.Parameterized().message("%s imported as %s"%(self.module.__name__,self.fullname))
         faker = _ModuleFaker(self.module)
         faker.path = path
         return faker
     return None
Esempio n. 10
0
    def test_object_selector_parameter(self):
        """
        Test that ObjectSelectorParameter representation works.
        """
        some_pos = [
            param.Parameterized(name='cat'),
            param.Parameterized(name='rat'),
            param.Parameterized(name='bat')
        ]
        osp_param = self.f.get_parameter_object('osp')

        osp_param.objects = some_pos
        #self.f.r.default = some_pos[0]

        self.f.pack_param(
            'osp'
        )  # have to pack AFTER populating range for OptionMenu widget to work (see ALERT in tkparameterizedobject.py)

        # (otherwise, could do the following:
        ##         f = SomeFrame(Toplevel())
        ##         f.pack_param('r')
        ##         f.initialize_ranged_parameter('r',
        ##                                       [Parameterized(name='cat'),Parameterized(name='rat'),Parameterized(name='bat')])

        self.assertEqual(self.f.translators['osp'].string2object('cat'),
                         some_pos[0])
        self.assertEqual(self.f.translators['osp'].string2object('rat'),
                         some_pos[1])
        self.assertEqual(self.f.translators['osp'].string2object('bat'),
                         some_pos[2])

        gnat = param.Parameterized(name='gnat')
        osp_param.objects.append(gnat)

        self.f.unpack_param('osp')
        self.f.pack_param(
            'osp')  # again, note the need to pack after updating range.
        ##         self.f.initialize_ranged_parameter('r',Parameterized)
        self.assertEqual(self.f.translators['osp'].string2object('gnat'), gnat)

        self.assertEqual(self.f._object2string('osp', some_pos[0]), 'cat')
        self.assertEqual(self.f._object2string('osp', some_pos[1]), 'rat')
        self.assertEqual(self.f._object2string('osp', some_pos[2]), 'bat')

        # Simulate a GUI set
        self.f._tkvars['osp'].set('bat')
        self.assertEqual(self.f.osp, some_pos[2])
Esempio n. 11
0
    def apply_support(version_to_support):
        global support

        # apply oldest to newest
        for version in sorted(support.keys())[::-1]:
            if version_to_support < version:
                param.Parameterized().message("Applying legacy support for change r%s"%version)
                support[version]()
Esempio n. 12
0
def default_input_sheet():
    """Returns the first GeneratorSheet defined, for use as a default value."""
    sheets=topo.sim.objects(GeneratorSheet).values()
    if len(sheets)<1:
        raise ValueError("Unable to find a suitable input sheet.")
    sht=sheets[0]
    if len(sheets)>1:
        param.Parameterized().message("Using input sheet %s." % sht.name)
    return sht
Esempio n. 13
0
 def find_module(self, fullname, path=None):
     if fullname == 'gmpy' or fullname.startswith('gmpy.'):
         import param
         param.Parameterized().warning(
             'Module "gmpy" is not available. gmpy.mpq is provided by using fixedpoint.FixedPoint.'
         )
         g = gmpyFaker()
         g.path = path
         return g
     return None
Esempio n. 14
0
def default_measureable_sheet():
    """Returns the first sheet for which measure_maps is True (if any), or else the first sheet, for use as a default value."""

    sheets = [s for s in topo.sim.objects(Sheet).values()
              if hasattr(s,'measure_maps') and s.measure_maps]
    if len(sheets)<1:
        sheets = [s for s in topo.sim.objects(Sheet).values()]
    if len(sheets)<1:
        raise ValueError("Unable to find a suitable measureable sheet.")
    sht=sheets[0]
    if len(sheets)>1:
        param.Parameterized().message("Using sheet %s." % sht.name)
    return sht
Esempio n. 15
0
    def test_params(self):
        """Basic tests of params() method."""

        # CB: test not so good because it requires changes if params
        # of PO are changed
        assert 'name' in param.Parameterized.param.params()
        assert len(param.Parameterized.param.params()) in [1,2]

        ## check for bug where subclass Parameters were not showing up
        ## if params() already called on a super class.
        assert 'inst' in TestPO.param.params()
        assert 'notinst' in TestPO.param.params()

        ## check caching
        assert param.Parameterized.param.params() is param.Parameterized().param.params(), "Results of params() should be cached." # just for performance reasons
Esempio n. 16
0
    def install(snapshot_release, snapshot_version=None):

        # CEB: I think there's no simple way to tell what "version" of
        # Topographica a snapshot comes from. When you're running
        # Topographica from svn, you can try topo.version, but you'll
        # get things like 11499:11503 or 11499M. If you use git,
        # you'll see "exported". Therefore, we can't always have
        # fine-grained control over what's loaded. We can at least use
        # the release number for coarse-grained control, though.

        snapshot_version = _get_version(snapshot_release, snapshot_version)

        param.Parameterized().debug("Snapshot is from release %s (r%s)" %
                                    (snapshot_release, snapshot_version))

        SnapshotSupport.apply_external_patches()
        SnapshotSupport.apply_support(snapshot_version)
Esempio n. 17
0
def _find_version():
    """
    Return the version tuple, the release number, the git commit, and
    whether reading pickle files is allowed (False if no version
    information avaliable).
    """

    pickle_allowed = True
    git_output = "v0.0.0-0-"

    (basepath, _) = os.path.split(os.path.abspath(__file__))

    try:
        git_process = Popen(["git", "describe"],
                            stdout=PIPE,
                            stderr=PIPE,
                            cwd=basepath)
        git_output = git_process.communicate()[0].strip()
        if git_process.poll():
            raise OSError
    except OSError, CalledProcessError:  #pyflakes:ignore (has to do with Python versions for CalledProcessError)
        try:
            release_file = open(basepath + "/.release")
            git_output = release_file.read()
            release_file.close()
        except IOError:
            param.Parameterized().warning("""\
Unable to determine the version information for this copy of Topographica.

For an official release, the version information is stored in a file
named topo/.release.  For a development copy checked out from Git, the
version is requested using "git describe".  Neither of these options
was successful, perhaps because Git is not available on this machine.
To work around this problem, either install Git on this machine, or
temporarily use a machine that does have Git and run "topographica
make-release-file", making sure you have write permissions on
Topographica's root directory.

In the meantime, reading and saving snapshots will be disabled,
because version information is necessary for determining how to
interpret saved files.\n\n""")
            pickle_allowed = False
            git_output = "v0.0.0-0-"
Esempio n. 18
0
def _get_version(snapshot_release, snapshot_version):
    """
    Try to determine a single numerical version for use in looking up
    patches in the support dictionary, given a snapshot's declared
    release and version.

    Because of the variety of different version formats that have been
    in use over the different version-control systems over the years,
    it's not always possible to make such a mapping.  E.g. versions
    controlled by SVN would normally return topo.version like 11499,
    which is clear, but also sometimes 11499:11503 or 11499M.
    Versions from git checkouts of svn source would just say
    "exported", while native git versions will have a four-tuple.  If
    nothing else works, the numerical version associated with the
    stated release is used.
    """

    found_version = False

    if snapshot_version is not None:
        try:  # to detect the passed version
            snapshot_version = snapshot_version.split(":")[0]
            snapshot_version = snapshot_version.split("M")[0]
        except AttributeError:  # the version is a tuple, thus it's from git
            snapshot_version = version_int(snapshot_version)

        # Convert to integer if snapshot_version contains only digits
        if snapshot_version:
            try:
                snapshot_version = int(snapshot_version)
                found_version = True
            except ValueError:
                pass

    if not found_version:
        snapshot_version = releases[snapshot_release]
        param.Parameterized().debug(
            "No version could be detected for this snapshot; assuming version of release %s (i.e. %s)."
            % (snapshot_release, snapshot_version))

    return snapshot_version
Esempio n. 19
0
def _get_version(snapshot_release, snapshot_version):

    found_version = False

    if snapshot_version is not None:
        snapshot_version = snapshot_version.split(":")[0]
        snapshot_version = snapshot_version.split("M")[0]

        if len(snapshot_version) > 0:
            try:
                snapshot_version = int(snapshot_version)
                found_version = True
            except ValueError:
                pass

    if not found_version:
        snapshot_version = releases[snapshot_release]
        param.Parameterized().debug(
            "No version could be detected for this snapshot; assuming version of release %s (i.e. %s)."
            % (snapshot_release, snapshot_version))

    return snapshot_version
Esempio n. 20
0
def exec_startup_files():
    """
    Execute startup files.

    Linux/UNIX/OS X: ~/.topographicarc
    Windows: %USERPROFILE%\topographica.ini
    """
    # From Bilal: On OS X, ~/Library/Preferences/ is the standard path
    # for user-defined params. The filename format (corresponding to
    # .ini on windows) is org.topographica.plist, where a plist is an
    # XML file. But, many shell-based programs follow the Unix
    # convention, so we should be fine doing that.

    # Linux/UNIX/OS X:
    rcpath = os.path.join(os.path.expanduser("~"), '.topographicarc')
    # Windows (ini is convention, and can be double clicked to edit):
    inipath = os.path.join(os.path.expandvars("$USERPROFILE"),
                           'topographica.ini')

    for startup_file in (rcpath, inipath):
        if os.path.exists(startup_file):
            print "Executing user startup file %s" % (startup_file)
            execfile(startup_file, __main__.__dict__)

    #####
    # CEBALERT: locations we used to use on Windows and OS X. Should
    # remove after 0.9.8.
    # application data on windows
    inipath = os.path.join(os.path.expandvars("$APPDATA"), 'Topographica',
                           'topographica.ini')
    # application support on OS X
    configpath = os.path.join(os.path.expanduser("~"), "Library",
                              "Application Support", 'Topographica',
                              'topographica.config')
    for startup_file in (configpath, inipath):
        if os.path.exists(startup_file):
            param.Parameterized().warning(
                "Ignoring %s; location for startup file is %s (UNIX/Linux/Mac OS X) or %s (Windows)."
                % (startup_file, rcpath, inipath))
Esempio n. 21
0
def _tkinter_report_exception(widget):
    exc, val = sys.exc_type, sys.exc_value
    msg = "(%s) %s"%(exc.__name__,val)
    # If the supplied widget has no master, it's probably the Tk
    # instance. In that case, resort to the 'last-one-set' hack (see
    # CEBALERT "provide a way of allowing other gui components" in
    # topo/param/tk.py).
    if not widget.master:
        widget = tk._last_one_set

    stat = None

    while (widget is not None and widget.master):
        # CEBALERT: should rename all status bars to the same thing
        # (status_bar)
        if hasattr(widget,'status'):
            stat = widget.status
            break
        elif hasattr(widget,'messageBar'):
            stat = widget.messageBar
            break
        widget = widget.master

    if stat is not None:
        stat.error('%s'%msg)
    else:
        topo.guimain.messageBar.error('%s'%msg)

    # BK-NOTE: Default is now to display full trace always. Any user
    # errors should be caught as special exception cases

    # BK-ALERT: Want to raise errors vs print, however this currently crashes ipython.
    #raise

    param.Parameterized().warning(msg)
    import traceback
    traceback.print_exc()
def pattern_present(inputs={},
                    duration=1.0,
                    plastic=False,
                    overwrite_previous=False,
                    apply_output_fns=True):
    """
    Present the specified test patterns for the specified duration.

    Given a set of input patterns (dictionary of
    GeneratorSheetName:PatternGenerator pairs), installs them into the
    specified GeneratorSheets, runs the simulation for the specified
    length of time, then restores the original patterns and the
    original simulation time.  Thus this input is not considered part
    of the regular simulation, and is usually for testing purposes.

    As a special case, if 'inputs' is just a single pattern, and not
    a dictionary, it is presented to all GeneratorSheets.

    If a simulation is not provided, the active simulation, if one
    exists, is requested.

    If this process is interrupted by the user, the temporary patterns
    may still be installed on the retina.

    If overwrite_previous is true, the given inputs overwrite those
    previously defined.

    If plastic is False, overwrites the existing values of Sheet.plastic
    to disable plasticity, then reenables plasticity.

    In order to to see the sequence of values presented, use the back arrow
    history mechanism in the GUI. Note that the GUI's Activity window must
    be open and the display parameter set to true (display=True).
    """
    # ensure EPs get started (if pattern_present is called before the simulation is run())
    topo.sim.run(0.0)

    if not overwrite_previous:
        save_input_generators()

    if not plastic:
        # turn off plasticity everywhere
        for sheet in topo.sim.objects(Sheet).values():
            sheet.override_plasticity_state(new_plasticity_state=False)

    if not apply_output_fns:
        for each in topo.sim.objects(Sheet).values():
            if hasattr(each, 'measure_maps'):
                if each.measure_maps:
                    each.apply_output_fns = False

    # Register the inputs on each input sheet
    generatorsheets = topo.sim.objects(GeneratorSheet)
    if not isinstance(inputs, dict):
        for g in generatorsheets.values():
            g.set_input_generator(inputs)
    else:
        for each in inputs.keys():
            if generatorsheets.has_key(each):
                generatorsheets[each].set_input_generator(inputs[each])
            else:
                param.Parameterized().warning(
                    '%s not a valid Sheet name for pattern_present.' % each)

    topo.sim.event_push()
    # CBENHANCEMENT: would be nice to break this up for visualizing motion
    topo.sim.run(duration)
    topo.sim.event_pop()

    # turn sheets' plasticity and output_fn plasticity back on if we turned it off before

    if not plastic:
        for sheet in topo.sim.objects(Sheet).values():
            sheet.restore_plasticity_state()

    if not apply_output_fns:
        for each in topo.sim.objects(Sheet).values():
            each.apply_output_fns = True

    if not overwrite_previous:
        restore_input_generators()
 def setUp(self):
     super(TestParamOverrides, self).setUp()
     self.po = param.Parameterized(name='A',print_level=0)
Esempio n. 24
0
__version__ = "$Revision$"

import param
from topo.base.sheet import Sheet
from topo.misc.ptz import PTZ
from topo.base.arrayutil import array_argmax

import numpy as np
from PIL import Image

try:
    import opencv

except ImportError:
    param.Parameterized().warning(
        "ptztracker.py classes will not be usable; python-opencv is not available."
    )


class PtzTracker(Sheet):
    """
    Given an incoming Activity pattern, find the brightest pixel and
    output an activity pattern where all but this pixel is set to zero.
    Also controls a pan/tilt/zoom camera, instructing it to move so that
    the brightest pixel will be in the center of the sheet.
    """

    dest_ports = ['Activity']
    src_ports = ['Activity']

    ratio = param.Number(
Esempio n. 25
0
Before importing this file, you will probably want to do something
like:

  from matplotlib import rcParams
  rcParams['backend']='TkAgg'

to select a backend, or else select an appropriate one in your
matplotlib.rc file (if any).  There are many backends available for
different GUI or non-GUI uses.
"""

try:
    from matplotlib import pylab as plt
except ImportError:
    import param
    param.Parameterized(name=__name__).warning(
        "Could not import matplotlib; module will not be useable.")
    from topo.command import ImportErrorRaisingFakeModule
    plt = ImportErrorRaisingFakeModule(
        "matplotlib")  # pyflakes:ignore (try/except import)

import param

import numpy as np
from numpy.fft.fftpack import fft2
from numpy.fft.helper import fftshift

import topo
from topo.base.sheet import Sheet
from topo.base.arrayutil import wrap

from topo.plotting.plot import make_template_plot
Esempio n. 26
0
def featuremapper_legacy():
    # For snapshots saved before 90800300

    # Replace PatternPresenter objects with stub
    import topo.analysis.featureresponses
    import param
    class PatternPresenter(param.Parameterized):
        def __init__(self):
            pass
    topo.analysis.featureresponses.PatternPresenter = PatternPresenter

    # Move parameters and change them if necessary
    duration_lambda = lambda x: param.List(default=[x.default], doc="""
        If non-None, pattern_response_fn.duration will be set to this value.""")
    apply_output_fns_lambda = lambda x: {'apply_output_fns': x.default}

    name_changes = PicklableClassAttributes.param_name_changes

    mrc_name_changes = name_changes.get(
        'topo.analysis.featureresponses.MeasureResponseCommand',{})
    mrc_name_changes.update(
        {'duration': ('durations', duration_lambda)})
    name_changes['topo.analysis.featureresponses.MeasureResponseCommand']=mrc_name_changes

    mrc_moves = PicklableClassAttributes.param_moves.get(
        'topo.analysis.featureresponses.MeasureResponseCommand',{})
    mrc_moves.update({'apply_output_fns':
                          ('topo.analysis.featureresponses.FeatureResponses','cmd_overrides',apply_output_fns_lambda)})
    PicklableClassAttributes.param_moves['topo.analysis.featureresponses.MeasureResponseCommand'] = mrc_moves

    fcc_name_changes = name_changes.get(
        'topo.analysis.featureresponses.FeatureCurveCommand',{})
    fcc_name_changes.update(
        {'duration': ('durations', duration_lambda)})
    name_changes['topo.analysis.featureresponses.FeatureCurveCommand']=fcc_name_changes

    fcc_moves = PicklableClassAttributes.param_moves.get(
        'topo.analysis.featureresponses.FeatureCurveCommand',{})
    fcc_moves.update({'apply_output_fns':
                          ('topo.analysis.featureresponses.FeatureCurves','cmd_overrides',apply_output_fns_lambda)})
    PicklableClassAttributes.param_moves['topo.analysis.featureresponses.FeatureCurveCommand'] = fcc_moves

    ppc_name_changes = name_changes.get(
        'topo.analysis.featureresponses.PatternPresentingCommand',{})
    ppc_name_changes.update(
        {'duration': ('durations', duration_lambda),
         'sheet_views_prefix': 'measurement_prefix'})
    name_changes['topo.analysis.featureresponses.PatternPresentingCommand']=ppc_name_changes

    # Move enable_fullmatrix parameter
    fr_name_changes = name_changes.get(
        'topo.analysis.featureresponses.FeatureResponses',{})
    fr_name_changes.update(
        {'enable_fullmatrix':'store_fullmatrix'})
    name_changes['topo.analysis.featureresponses.FeatureResponses']=fr_name_changes

    # Measurement Prefix
    fr_name_changes = name_changes.get(
        'topo.analysis.featureresponses.FeatureResponses',{})
    fr_name_changes.update(
        {'enable_fullmatrix':'store_fullmatrix'})
    name_changes['topo.analysis.featureresponses.FeatureResponses']=fr_name_changes


    # Delete old parameters
    old_cmd_params = ('display', 'pattern_presenter', 'generator_sheets',
                      'input_sheet', 'sheet')
    param_no_restore = {'MeasureResponseCommand': old_cmd_params,
                        'FeatureCurveCommand': old_cmd_params,
                        'ProjectionSheetMeasurementCommand' : old_cmd_params,
                        'SingleInputResponseCommand': old_cmd_params,
                        'measure_rfs': ('sampling_interval', 'sampling_area'),
                        'unit_tuning_curve': ('x_axis', 'sheet'),
                        'ReverseCorrelation': old_cmd_params,
                        'FeatureCurves': ('post_collect_responses_hook'),
                        'FeatureMaps': ('sheet_views_prefix'),
                        'measure_latency_preference': old_cmd_params,
                        'PositionMeasurementCommand': old_cmd_params,
                        'measure_corner_or_pref': old_cmd_params,
                        'measure_orientation_contrast': old_cmd_params,
                        'PatternPresenter': ('contrast_parameter', 'divisions',
                                             'generator_sheets', 'apply_output_fns',
                                             'duration'),
                        'measure_or_tuning_fullfield': old_cmd_params,
                        'UnitCurveCommand': old_cmd_params,
                        'measure_frequency_preference': old_cmd_params,
                        'SinusoidalMeasureResponseCommand': old_cmd_params,
                        'measure_log_frequency_preference': old_cmd_params}
    PicklableClassAttributes.deleted_params.update(param_no_restore)

    # Convert old sheet_views and curve_dict
    from topo.misc.attrdict import AttrDict
    from topo.base.sheet import Sheet
    from holoviews import Matrix, NdMapping

    def _set_sheet_views(instance, state):
        if state['simulation'] is None:
            return None
        name = state['_name_param_value']
        if not hasattr(state['simulation'], 'views'):
            state['simulation'].views = AttrDict()
        if name not in state['simulation'].views:
            if hasattr(instance, 'views'):
                state['views'] = instance.views
            else:
                state['views'] = AttrDict()
            state['simulation'].views[name] = state['views']
        views = state['views']
        views['maps'] = AttrDict()
        views['cfs'] = AttrDict()
        views['rfs'] = AttrDict()
        views['curves'] = AttrDict()
        if 'sheet_views' in state:
            svs = state['sheet_views']
            for key, sv in svs.items():
                data, bounds = sv.view()
                new_sv = Matrix(data, bounds)
                metadata = dict(dimension_labels=['Time'])
                metadata_names = ['cyclic_range', 'precedence',
                                  'row_precedence', 'src_name']
                for p in metadata_names:
                    if hasattr(sv, p):
                        metadata[p] = getattr(sv, p)
                state['views'].maps[key] = NdMapping((sv.timestamp, new_sv),
                                                  **metadata)
        if 'curve_dict' in state:
            old_curves = state['curve_dict']
            curves = views['curves']
            for key, value in old_curves.items():
                key = key.capitalize()
                for label, item in value.items():
                    labels = unit_value(label)
                    label_name = labels[0].split(' ')[0]
                    l_val = labels[-1]
                    if key not in views['curves']:
                        curves[key] = NdMapping(dimension_labels=['Time'])
                    for f_val, old_sv in item.items():
                        timestamp = old_sv.timestamp
                        curves[key][timestamp] = NdMapping(dimension_labels=[label_name])
                        if l_val not in curves[key][timestamp].keys():
                            curves[key][timestamp] [l_val] = NdMapping(dimension_labels=[key],
                                                                       label=label,
                                                                       timestamp=old_sv.timestamp)
                        data, bounds = old_sv.view()
                        sv = Matrix(data, bounds)
                        curves[key][timestamp][l_val][f_val] = sv
        state.pop('curve_dict', None)
        state.pop('sheet_views', None)

    preprocess_state(Sheet, _set_sheet_views)

    param.Parameterized().warning('Legacy code does not guarantee all '
        'measurement parameters have been restored. Make sure measurements are '
        'still set up correctly.')
Esempio n. 27
0
import param
import os

from . import TimeSeries, Spectrogram, PowerSpectrum

from numpy import arange, array, ceil, complex64, cos, exp, fft, flipud, \
        float64, floor, hanning, hstack, log, log10, logspace, multiply, \
        nonzero, ones, pi, reshape, shape, size, sqrt, sum, tile, zeros

try:
    import scikits.audiolab as audiolab

except ImportError:
    param.Parameterized().warning(
        "audio.py classes will not be usable because scikits.audiolab is not available."
    )


class AudioFile(TimeSeries):
    """
    Requires an audio file in any format accepted by audiolab (wav, aiff, flac).
    """

    time_series = param.Array(precedence=(-1))
    sample_rate = param.Number(precedence=(-1))

    filename = param.Filename(default='sounds/complex/daisy.wav',
                              doc="""
        File path (can be relative to Param's base path) to an audio file. 
        The audio can be in any format accepted by audiolab, e.g. WAV, AIFF, or FLAC."""
Esempio n. 28
0
    def __call__(self, inputs={}, outputs=[], **params_to_override):
        p = ParamOverrides(self, dict(params_to_override, inputs=inputs))
        # ensure EPs get started (if pattern_response is called before the
        # simulation is run())
        topo.sim.run(0.0)

        if p.restore_state:
            topo.sim.state_push()

        if not p.overwrite_previous:
            save_input_generators()

        if not p.plastic:
            # turn off plasticity everywhere
            for sheet in topo.sim.objects(Sheet).values():
                sheet.override_plasticity_state(new_plasticity_state=False)

        if not p.apply_output_fns:
            for each in topo.sim.objects(Sheet).values():
                if hasattr(each, 'measure_maps'):
                    if each.measure_maps:
                        each.apply_output_fns = False

        # Register the inputs on each input sheet
        generatorsheets = topo.sim.objects(GeneratorSheet)

        if not isinstance(p.inputs, dict):
            for g in generatorsheets.values():
                g.set_input_generator(p.inputs)
        else:
            for each in p.inputs.keys():
                if generatorsheets.has_key(each):
                    generatorsheets[each].set_input_generator(p.inputs[each])
                else:
                    param.Parameterized().warning(
                        '%s not a valid Sheet name for pattern_present.' % each)

        if p.restore_events:
            topo.sim.event_push()

        durations = np.diff([0] + p.durations)
        projection_dict = dict((conn.name, conn) for conn in topo.sim.connections())
        outputs = outputs if len(outputs) > 0 else topo.sim.objects(Sheet).keys() + projection_dict.keys()

        responses = defaultdict(dict)
        for i, d in enumerate(durations):
            topo.sim.run(d)
            time = p.durations[i]
            if hasattr(topo, 'guimain'):
                update_activity(p.install_sheetview)
                topo.guimain.refresh_activity_windows()
            if p.return_responses:

                for output in outputs:
                    if output in topo.sim.objects(Sheet).keys():
                        responses[(output, time)] = topo.sim[output].activity.copy()
                    elif output in projection_dict:
                        responses[(output, time)] = projection_dict[output].activity.copy()

        if p.restore_events:
            topo.sim.event_pop()

        # turn sheets' plasticity and output_fn plasticity back on if we
        # turned it off before
        if not p.plastic:
            for sheet in topo.sim.objects(Sheet).values():
                sheet.restore_plasticity_state()

        if not p.apply_output_fns:
            for each in topo.sim.objects(Sheet).values():
                each.apply_output_fns = True

        if not p.overwrite_previous:
            restore_input_generators()

        if p.restore_state:
            topo.sim.state_pop()

        return responses
Esempio n. 29
0
def version_int(v):
    """
    Convert a version four-tuple to a format that can be used to compare
    version numbers.
    """
    return int("%02d%02d%02d%05d" % v)

__version__ = param.Version(release=(0,9,8), fpath=__file__,
                            commit="3a1c9e8a1", reponame='topographica')
commit  = __version__.commit
version = tuple(list(__version__.release) +[__version__.commit_count])
release = int("%02d%02d%02d%05d" % version)


# Patch for versions of param prior to 10 May 2013
param.main=param.Parameterized(name="main")





# Determine which paths to search for input files
#
# By default, searches in:
# - the current working directory (the default value of param.resolve_path.search_paths),
# - the parent of topo (to get images/, examples/, etc.)
# - topo (for backwards compatibility, e.g. for finding color keys)
#
_package_path = os.path.split(__file__)[0] # location of topo
_root_path = os.path.abspath(os.path.join(_package_path,'..')) # parent of topo
param.resolve_path.search_paths+=[_root_path,_package_path]
Esempio n. 30
0
    def __call__(self, script_file, **params_to_override):
        p = ParamOverrides(self, params_to_override, allow_extra_keywords=True)

        import os
        import shutil

        # Construct simulation name, etc.
        scriptbase = re.sub('.ty$', '', os.path.basename(script_file))
        prefix = ""
        if p.timestamp == (0, 0): prefix += time.strftime(p.name_time_format)
        else: prefix += time.strftime(p.name_time_format, p.timestamp)

        prefix += "_" + scriptbase + "_" + p.tag
        simname = prefix

        # Construct parameter-value portion of filename; should do more filtering
        # CBENHANCEMENT: should provide chance for user to specify a
        # function (i.e. make this a function, and have a parameter to
        # allow the function to be overridden).
        # And sort by name by default? Skip ones that aren't different
        # from default, or at least put them at the end?
        prefix += p.dirname_params_filter(p.extra_keywords())

        # Set provided parameter values in main namespace
        from topo.misc.commandline import global_params
        global_params.set_in_context(**p.extra_keywords())

        # Create output directories
        if not os.path.isdir(normalize_path(p['output_directory'])):
            try:
                os.mkdir(normalize_path(p['output_directory']))
            except OSError:
                pass  # Catches potential race condition (simultaneous run_batch runs)

        dirname = self._truncate(p, p.dirname_prefix + prefix)
        normalize_path.prefix = normalize_path(
            os.path.join(p['output_directory'], dirname))

        if os.path.isdir(normalize_path.prefix):
            print "Batch run: Warning -- directory already exists!"
            print "Run aborted; wait one minute before trying again, or else rename existing directory: \n" + \
                  normalize_path.prefix

            sys.exit(-1)
        else:
            os.mkdir(normalize_path.prefix)
            print "Batch run output will be in " + normalize_path.prefix

        if p['vc_info']:
            _print_vc_info(simname + ".diffs")

        hostinfo = "Host: " + " ".join(platform.uname())
        topographicalocation = "Topographica: " + os.path.abspath(sys.argv[0])
        topolocation = "topo package: " + os.path.abspath(topo.__file__)
        scriptlocation = "script: " + os.path.abspath(script_file)

        starttime = time.time()
        startnote = "Batch run started at %s." % time.strftime(
            "%a %d %b %Y %H:%M:%S +0000", time.gmtime())

        # store a re-runnable copy of the command used to start this batch run
        try:
            # pipes.quote is undocumented, so I'm not sure which
            # versions of python include it (I checked python 2.6 and
            # 2.7 on linux; they both have it).
            import pipes
            quotefn = pipes.quote
        except (ImportError, AttributeError):
            # command will need a human to insert quotes before it can be re-used
            quotefn = lambda x: x

        command_used_to_start = string.join([quotefn(arg) for arg in sys.argv])

        # CBENHANCEMENT: would be nice to separately write out a
        # runnable script that does everything necessary to
        # re-generate results (applies diffs etc).

        # Shadow stdout to a .out file in the output directory, so that
        # print statements will go to both the file and to stdout.
        batch_output = open(normalize_path(simname + ".out"), 'w')
        batch_output.write(command_used_to_start + "\n")
        sys.stdout = MultiFile(batch_output, sys.stdout)

        print
        print hostinfo
        print topographicalocation
        print topolocation
        print scriptlocation
        print
        print startnote

        from topo.misc.commandline import auto_import_commands
        auto_import_commands()

        # Ensure that saved state includes all parameter values
        from topo.command import save_script_repr
        param.parameterized.script_repr_suppress_defaults = False

        # Save a copy of the script file for reference
        shutil.copy2(script_file, normalize_path.prefix)
        shutil.move(normalize_path(scriptbase + ".ty"),
                    normalize_path(simname + ".ty"))

        # Default case: times is just a number that scales a standard list of times
        times = p['times']
        if not isinstance(times, list):
            times = [
                t * times for t in
                [0, 50, 100, 500, 1000, 2000, 3000, 4000, 5000, 10000]
            ]

        # Run script in main
        error_count = 0
        initial_warning_count = param.parameterized.warning_count
        try:
            execfile(script_file, __main__.__dict__)  #global_params.context
            global_params.check_for_unused_names()
            if p.save_global_params:
                _save_parameters(p.extra_keywords(),
                                 simname + ".global_params.pickle")
            print_sizes()
            topo.sim.name = simname

            # Run each segment, doing the analysis and saving the script state each time
            for run_to in times:
                topo.sim.run(run_to - topo.sim.time())
                p['analysis_fn']()
                save_script_repr()
                elapsedtime = time.time() - starttime
                param.Parameterized(name="run_batch").message(
                    "Elapsed real time %02d:%02d." %
                    (int(elapsedtime / 60), int(elapsedtime % 60)))

            if p['snapshot']:
                save_snapshot()

        except:
            error_count += 1
            import traceback
            traceback.print_exc(file=sys.stdout)
            sys.stderr.write("Warning -- Error detected: execution halted.\n")

        print "\nBatch run completed at %s." % time.strftime(
            "%a %d %b %Y %H:%M:%S +0000", time.gmtime())
        print "There were %d error(s) and %d warning(s)%s." % \
              (error_count,(param.parameterized.warning_count-initial_warning_count),
               ((" (plus %d warning(s) prior to entering run_batch)"%initial_warning_count
                 if initial_warning_count>0 else "")))

        # restore stdout
        sys.stdout = sys.__stdout__
        batch_output.close()