コード例 #1
0
ファイル: visual_stimulus.py プロジェクト: teogale/mozaik
 def __init__(self, **params):
     BaseStimulus.__init__(self, **params)
     self._zoom_cache = {}
     self.region_cache = {}
     self.is_visible = True
     self.transparent = True  # And efficiency flag. It should be set to false by the stimulus if there are no transparent points in it.
     # This will avoid all the code related to transparency which is very expensive.
     self.region = VisualRegion(self.location_x, self.location_y,
                                self.size_x, self.size_y)
     self.first_resolution_mismatch_display = True
コード例 #2
0
ファイル: model.py プロジェクト: apdavison/mozaik-contrib
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)
        # Load components
        CortexExcL4 = load_component(self.parameters.l4_cortex_exc.component)
        CortexInhL4 = load_component(self.parameters.l4_cortex_inh.component)

        RetinaLGN = load_component(self.parameters.retina_lgn.component)
      
        # Build and instrument the network
        self.visual_field = VisualRegion(location_x=self.parameters.visual_field.centre[0],location_y=self.parameters.visual_field.centre[1],size_x=self.parameters.visual_field.size[0],size_y=self.parameters.visual_field.size[1])
        self.input_layer = RetinaLGN(self, self.parameters.retina_lgn.params)
        cortex_exc_l4 = CortexExcL4(self, self.parameters.l4_cortex_exc.params)
        cortex_inh_l4 = CortexInhL4(self, self.parameters.l4_cortex_inh.params)

        GaborConnector(self,self.input_layer.sheets['X_ON'],self.input_layer.sheets['X_OFF'],cortex_exc_l4,self.parameters.l4_cortex_exc.AfferentConnection,'V1AffConnection')
        GaborConnector(self,self.input_layer.sheets['X_ON'],self.input_layer.sheets['X_OFF'],cortex_inh_l4,self.parameters.l4_cortex_inh.AfferentConnection,'V1AffInhConnection')

        # initialize projections
        ModularSingleWeightProbabilisticConnector(self,'V1L4ExcL4ExcConnectionRand',cortex_exc_l4,cortex_exc_l4,self.parameters.l4_cortex_exc.L4ExcL4ExcConnectionRand).connect()
        ModularSingleWeightProbabilisticConnector(self,'V1L4ExcL4InhConnectionRand',cortex_exc_l4,cortex_inh_l4,self.parameters.l4_cortex_exc.L4ExcL4InhConnectionRand).connect()
        ModularSingleWeightProbabilisticConnector(self,'V1L4InhL4ExcConnectionRand',cortex_inh_l4,cortex_exc_l4,self.parameters.l4_cortex_inh.L4InhL4ExcConnectionRand).connect()
        ModularSingleWeightProbabilisticConnector(self,'V1L4InhL4InhConnectionRand',cortex_inh_l4,cortex_inh_l4,self.parameters.l4_cortex_inh.L4InhL4InhConnectionRand).connect()

        # initialize projections
        ModularSamplingProbabilisticConnector(self,'V1L4ExcL4ExcConnection',cortex_exc_l4,cortex_exc_l4,self.parameters.l4_cortex_exc.L4ExcL4ExcConnection).connect()
        ModularSamplingProbabilisticConnector(self,'V1L4ExcL4InhConnection',cortex_exc_l4,cortex_inh_l4,self.parameters.l4_cortex_exc.L4ExcL4InhConnection).connect()
        ModularSamplingProbabilisticConnector(self,'V1L4InhL4ExcConnection',cortex_inh_l4,cortex_exc_l4,self.parameters.l4_cortex_inh.L4InhL4ExcConnection).connect()
        ModularSamplingProbabilisticConnector(self,'V1L4InhL4InhConnection',cortex_inh_l4,cortex_inh_l4,self.parameters.l4_cortex_inh.L4InhL4InhConnection).connect()
コード例 #3
0
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)

        # Load components
        RetinaLGN = load_component(self.parameters.retina_lgn.component)
        PGN = load_component(self.parameters.pgn.component)

        # Build and instrument the network
        self.visual_field = VisualRegion(
            location_x=self.parameters.visual_field.centre[0],
            location_y=self.parameters.visual_field.centre[1],
            size_x=self.parameters.visual_field.size[0],
            size_y=self.parameters.visual_field.size[1])
        # init layers
        self.input_layer = RetinaLGN(self, self.parameters.retina_lgn.params)
        pgn = PGN(self, self.parameters.pgn.params)

        ########################################################
        # LGN-PGN
        ModularSamplingProbabilisticConnector(
            self,
            'LGN_PGN_ConnectionOn',  # name
            self.input_layer.sheets['X_ON'],  # source
            pgn,  # target
            self.parameters.pgn.LGN_PGN_ConnectionOn  # params
        ).connect()

        ModularSamplingProbabilisticConnector(
            self,
            'LGN_PGN_ConnectionOff',  # name
            self.input_layer.sheets['X_OFF'],  # source
            pgn,  # target
            self.parameters.pgn.LGN_PGN_ConnectionOff  # params
        ).connect()

        ModularSamplingProbabilisticConnector(
            self,
            'PGN_PGN_Connection',  # name
            pgn,  # source
            pgn,  # target
            self.parameters.pgn.PGN_PGN_Connection  # params
        ).connect()

        ModularSamplingProbabilisticConnector(
            self,
            'PGN_LGN_ConnectionOn',  # name
            pgn,  # source
            self.input_layer.sheets['X_ON'],  # target
            self.parameters.pgn.PGN_LGN_ConnectionOn  # params
        ).connect()

        ModularSamplingProbabilisticConnector(
            self,
            'PGN_LGN_ConnectionOff',  # name
            pgn,  # source
            self.input_layer.sheets['X_OFF'],  # target
            self.parameters.pgn.PGN_LGN_ConnectionOff  # params
        ).connect()
コード例 #4
0
ファイル: visual_stimulus.py プロジェクト: JoelChavas/mozaik
 def __init__(self, **params):
     BaseStimulus.__init__(self, **params)
     self._zoom_cache = {}
     self.region_cache = {}
     self.is_visible = True
     self.transparent = True # And efficiency flag. It should be set to false by the stimulus if there are no transparent points in it. 
                             # This will avoid all the code related to transparency which is very expensive.
     self.region = VisualRegion(self.location_x, self.location_y,
                                self.size_x, self.size_y)
     self.first_resolution_mismatch_display=True
コード例 #5
0
ファイル: model.py プロジェクト: apdavison/mozaik-contrib
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)
        # Load components

        RetinaLGN = load_component(self.parameters.sheets.retina_lgn.component)

        # Build and instrument the network
        self.visual_field = VisualRegion(
            location_x=self.parameters.visual_field.centre[0],
            location_y=self.parameters.visual_field.centre[1],
            size_x=self.parameters.visual_field.size[0],
            size_y=self.parameters.visual_field.size[1])
        self.input_layer = RetinaLGN(self,
                                     self.parameters.sheets.retina_lgn.params)
コード例 #6
0
    def _calculate_input_currents(self, visual_space, duration):
        """
        Calculate the input currents for all cells.
        """
        assert isinstance(visual_space, VisualSpace)
        if duration is None:
            duration = visual_space.get_maximum_duration()

        # create population of CellWithReceptiveFields, setting the receptive
        # field centres based on the size/location of self
        logger.debug("Creating population of `CellWithReceptiveField`s")
        input_cells = {}
        #effective_visual_field_width, effective_visual_field_height = self.parameters.size
        #x_values = numpy.linspace(-effective_visual_field_width/2.0, effective_visual_field_width/2.0, self.shape[0])
        #y_values = numpy.linspace(-effective_visual_field_height/2.0, effective_visual_field_height/2.0, self.shape[1])
        for rf_type in self.rf_types:
            input_cells[rf_type] = []
            for i in numpy.nonzero(self.sheets[rf_type].pop._mask_local)[0]:
                #for i in xrange(0,len(self.sheets[rf_type].pop.positions[0])):
                cell = CellWithReceptiveField(
                    self.sheets[rf_type].pop.positions[0][i],
                    self.sheets[rf_type].pop.positions[1][i], self.rf[rf_type],
                    self.parameters.gain, visual_space)
                cell.initialize(visual_space.background_luminance, duration)
                input_cells[rf_type].append(cell)

        logger.debug("Processing frames")

        t = 0
        retinal_input = []

        while t < duration:
            t = visual_space.update()
            for rf_type in self.rf_types:
                for cell in input_cells[rf_type]:
                    cell.view()
            visual_region = VisualRegion(location_x=0,
                                         location_y=0,
                                         size_x=self.parameters.size[0],
                                         size_y=self.parameters.size[1])
            im = visual_space.view(
                visual_region, pixel_size=self.rf["X_ON"].spatial_resolution)
            retinal_input.append(im)

        input_currents = {}
        for rf_type in self.rf_types:
            input_currents[rf_type] = [
                cell.response_current() for cell in input_cells[rf_type]
            ]
        return (input_currents, retinal_input)
コード例 #7
0
ファイル: model.py プロジェクト: apdavison/mozaik-contrib
    def __init__(self,simulator,num_threads,parameters):
        Model.__init__(self,simulator,num_threads,parameters)        
        
        RetinaLGN = load_component(self.parameters.sheets.retina_lgn.component)
      
        # Build and instrument the network
        self.visual_field = VisualRegion(location_x=self.parameters.visual_field.centre[0],location_y=self.parameters.visual_field.centre[1],size_x=self.parameters.visual_field.size[0],size_y=self.parameters.visual_field.size[1])
        self.input_layer = RetinaLGN(self, self.parameters.sheets.retina_lgn.params)

        # which neurons to record
        tr = {'spikes' : 'all', 
              'v' : numpy.arange(0,60,1),
              'gsyn_exc' :numpy.arange(0,60,1),
              'gsyn_inh' : numpy.arange(0,60,1),
        }

        self.input_layer.sheets['X_ON'].to_record = tr #'all'
        self.input_layer.sheets['X_OFF'].to_record = tr #'all'
コード例 #8
0
 def __init__(self, x, y, receptive_field, gain_control,visual_space):
     self.x = x  # position in space
     self.y = y  #
     self.visual_space = visual_space
     assert isinstance(receptive_field, SpatioTemporalReceptiveField)
     self.receptive_field = receptive_field
     self.gain_control = gain_control  # (nA.m²/cd) could imagine making this a function of
                       # the background luminance
     self.i = 0
     self.visual_region = VisualRegion(location_x=self.x,
                                  location_y=self.y,
                                  size_x=self.receptive_field.width,
                                  size_y=self.receptive_field.height)
     #logger.debug("view_array.shape = %s" % str(view_array.shape))
     #logger.debug("receptive_field.kernel.shape = %s" % str(self.receptive_field.kernel.shape))
     #logger.debug("response.shape = %s" % str(self.response.shape))
     if visual_space.update_interval % self.receptive_field.temporal_resolution != 0:
         errmsg = "The receptive field temporal resolution (%g ms) must be an integer multiple of the visual space update interval (%g ms)" % \
             (self.receptive_field.temporal_resolution, visual_space.update_interval)
         raise Exception(errmsg)
     self.update_factor = int(visual_space.update_interval / self.receptive_field.temporal_resolution)
コード例 #9
0
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)
        # Load components
        CortexExcL4 = load_component(
            self.parameters.sheets.l4_cortex_exc.component)
        CortexInhL4 = load_component(
            self.parameters.sheets.l4_cortex_inh.component)
        if not self.parameters.only_afferent and self.parameters.l23:
            CortexExcL23 = load_component(
                self.parameters.sheets.l23_cortex_exc.component)
            CortexInhL23 = load_component(
                self.parameters.sheets.l23_cortex_inh.component)

        RetinaLGN = load_component(self.parameters.sheets.retina_lgn.component)

        # Build and instrument the network
        self.visual_field = VisualRegion(
            location_x=self.parameters.visual_field.centre[0],
            location_y=self.parameters.visual_field.centre[1],
            size_x=self.parameters.visual_field.size[0],
            size_y=self.parameters.visual_field.size[1],
        )
        self.input_layer = RetinaLGN(self,
                                     self.parameters.sheets.retina_lgn.params)
        cortex_exc_l4 = CortexExcL4(
            self, self.parameters.sheets.l4_cortex_exc.params)
        cortex_inh_l4 = CortexInhL4(
            self, self.parameters.sheets.l4_cortex_inh.params)

        if not self.parameters.only_afferent and self.parameters.l23:
            cortex_exc_l23 = CortexExcL23(
                self, self.parameters.sheets.l23_cortex_exc.params)
            cortex_inh_l23 = CortexInhL23(
                self, self.parameters.sheets.l23_cortex_inh.params)

        # initialize afferent layer 4 projections
        GaborConnector(
            self,
            self.input_layer.sheets["X_ON"],
            self.input_layer.sheets["X_OFF"],
            cortex_exc_l4,
            self.parameters.sheets.l4_cortex_exc.AfferentConnection,
            "V1AffConnection",
        )
        GaborConnector(
            self,
            self.input_layer.sheets["X_ON"],
            self.input_layer.sheets["X_OFF"],
            cortex_inh_l4,
            self.parameters.sheets.l4_cortex_inh.AfferentConnection,
            "V1AffInhConnection",
        )

        # initialize lateral layer 4 projections
        if not self.parameters.only_afferent:

            ModularSamplingProbabilisticConnectorAnnotationSamplesCount(
                self,
                "V1L4ExcL4ExcConnection",
                cortex_exc_l4,
                cortex_exc_l4,
                self.parameters.sheets.l4_cortex_exc.L4ExcL4ExcConnection,
            ).connect()
            ModularSamplingProbabilisticConnectorAnnotationSamplesCount(
                self,
                "V1L4ExcL4InhConnection",
                cortex_exc_l4,
                cortex_inh_l4,
                self.parameters.sheets.l4_cortex_exc.L4ExcL4InhConnection,
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                "V1L4InhL4ExcConnection",
                cortex_inh_l4,
                cortex_exc_l4,
                self.parameters.sheets.l4_cortex_inh.L4InhL4ExcConnection,
            ).connect()
            ModularSamplingProbabilisticConnector(
                self,
                "V1L4InhL4InhConnection",
                cortex_inh_l4,
                cortex_inh_l4,
                self.parameters.sheets.l4_cortex_inh.L4InhL4InhConnection,
            ).connect()

            if self.parameters.l23:

                # initialize afferent layer 4 to layer 2/3 projection
                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L4ExcL23ExcConnection",
                    cortex_exc_l4,
                    cortex_exc_l23,
                    self.parameters.sheets.l23_cortex_exc.
                    L4ExcL23ExcConnection,
                ).connect()
                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L4ExcL23InhConnection",
                    cortex_exc_l4,
                    cortex_inh_l23,
                    self.parameters.sheets.l23_cortex_inh.
                    L4ExcL23InhConnection,
                ).connect()

                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L23ExcL23ExcConnection",
                    cortex_exc_l23,
                    cortex_exc_l23,
                    self.parameters.sheets.l23_cortex_exc.
                    L23ExcL23ExcConnection,
                ).connect()
                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L23ExcL23InhConnection",
                    cortex_exc_l23,
                    cortex_inh_l23,
                    self.parameters.sheets.l23_cortex_exc.
                    L23ExcL23InhConnection,
                ).connect()
                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L23InhL23ExcConnection",
                    cortex_inh_l23,
                    cortex_exc_l23,
                    self.parameters.sheets.l23_cortex_inh.
                    L23InhL23ExcConnection,
                ).connect()
                ModularSamplingProbabilisticConnector(
                    self,
                    "V1L23InhL23InhConnection",
                    cortex_inh_l23,
                    cortex_inh_l23,
                    self.parameters.sheets.l23_cortex_inh.
                    L23InhL23InhConnection,
                ).connect()
                if self.parameters.feedback:
                    ModularSamplingProbabilisticConnector(
                        self,
                        "V1L23ExcL4ExcConnection",
                        cortex_exc_l23,
                        cortex_exc_l4,
                        self.parameters.sheets.l23_cortex_exc.
                        L23ExcL4ExcConnection,
                    ).connect()
                    ModularSamplingProbabilisticConnector(
                        self,
                        "V1L23ExcL4InhConnection",
                        cortex_exc_l23,
                        cortex_inh_l4,
                        self.parameters.sheets.l23_cortex_exc.
                        L23ExcL4InhConnection,
                    ).connect()
コード例 #10
0
    def _calculate_input_currents(self, visual_space, duration):
        """
        Calculate the input currents for all cells.
        """
        assert isinstance(visual_space, VisualSpace)
        if duration is None:
            duration = visual_space.get_maximum_duration()

        # create population of CellWithReceptiveFields, setting the receptive
        # field centres based on the size/location of self
        logger.debug("Creating population of `CellWithReceptiveField`s")
        input_cells = OrderedDict()
        #effective_visual_field_width, effective_visual_field_height = self.parameters.size
        #x_values = numpy.linspace(-effective_visual_field_width/2.0, effective_visual_field_width/2.0, self.shape[0])
        #y_values = numpy.linspace(-effective_visual_field_height/2.0, effective_visual_field_height/2.0, self.shape[1])
        for rf_type in self.rf_types:
            input_cells[rf_type] = []
            for i in numpy.nonzero(self.sheets[rf_type].pop._mask_local)[0]:
                #for i in range(0,len(self.sheets[rf_type].pop.positions[0])):
                cell = CellWithReceptiveField(
                    self.sheets[rf_type].pop.positions[0][i],
                    self.sheets[rf_type].pop.positions[1][i], self.rf[rf_type],
                    self.parameters.gain_control, visual_space)
                cell.initialize(visual_space.background_luminance, duration)
                input_cells[rf_type].append(cell)

        logger.debug("Processing frames")

        t = 0
        retinal_input = []

        #import threading
        #def view_cell(cell):
        #    cell.view()

        if False:
            while t < duration:
                t = visual_space.update()
                for rf_type in self.rf_types:
                    threads = []
                    for cell in input_cells[rf_type]:
                        thread = threading.Thread(target=cell.view())
                        thread.start()
                        threads.append(thread)
                        #cell.view()
                    for t in threads:
                        t.join()

        while t < duration:
            t = visual_space.update()
            for rf_type in self.rf_types:
                for cell in input_cells[rf_type]:
                    cell.view()

            if self.model.parameters.store_stimuli == True:
                visual_region = VisualRegion(
                    location_x=0,
                    location_y=0,
                    size_x=self.model.visual_field.size_x,
                    size_y=self.model.visual_field.size_y)
                im = visual_space.view(
                    visual_region,
                    pixel_size=self.rf["X_ON"].spatial_resolution)
            else:
                im = None
            retinal_input.append(im)

        input_currents = OrderedDict()
        for rf_type in self.rf_types:
            input_currents[rf_type] = [
                cell.response_current() for cell in input_cells[rf_type]
            ]
        return (input_currents, retinal_input)
コード例 #11
0
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)
        # Load components
        LGN = load_component(self.parameters.lgn.component)
        # Instance
        self.input_layer = LGN(self, self.parameters.lgn.params)
      
        # Build and instrument the network
        self.visual_field = VisualRegion(
            location_x=self.parameters.visual_field.centre[0],
            location_y=self.parameters.visual_field.centre[1],
            size_x=self.parameters.visual_field.size[0],
            size_y=self.parameters.visual_field.size[1]
        )

        # PROJECTIONS
        ########################################################

        # PGN
        if withPGN:
            # Load components
            PGN = load_component( self.parameters.pgn.component )
            # Instance
            pgn = PGN(self, self.parameters.pgn.params)

            # LGN-PGN
            ModularSamplingProbabilisticConnector(
                self,
                'LGN_PGN_ConnectionOn',                     # name
                self.input_layer.sheets['X_ON'],     # source
                pgn,                                        # target
                self.parameters.pgn.LGN_PGN_ConnectionOn    # params
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'LGN_PGN_ConnectionOff',                    # name
                self.input_layer.sheets['X_OFF'],    # source
                pgn,                                        # target
                self.parameters.pgn.LGN_PGN_ConnectionOff   # params
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'PGN_PGN_Connection',                       # name
                pgn,                                        # source
                pgn,                                        # target
                self.parameters.pgn.PGN_PGN_Connection      # params
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'PGN_LGN_ConnectionOn',                     # name
                pgn,                                        # source
                self.input_layer.sheets['X_ON'],     # target
                self.parameters.pgn.PGN_LGN_ConnectionOn    # params
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'PGN_LGN_ConnectionOff',                    # name
                pgn,                                        # source
                self.input_layer.sheets['X_OFF'],    # target
                self.parameters.pgn.PGN_LGN_ConnectionOff   # params
            ).connect()

        # V1
        if withV1: # CTC
            # Load components
            CortexExcL4 = load_component(self.parameters.l4_cortex_exc.component)
            CortexInhL4 = load_component(self.parameters.l4_cortex_inh.component)
            # Instance
            cortex_exc_l4 = CortexExcL4(self, self.parameters.l4_cortex_exc.params)
            cortex_inh_l4 = CortexInhL4(self, self.parameters.l4_cortex_inh.params)

            # ########################################################
            # THALAMO-CORTICAL
            # initialize afferent layer 4 projections
            GaborConnector(
                self,
                self.input_layer.sheets['X_ON'],
                self.input_layer.sheets['X_OFF'],
                cortex_exc_l4,                                      # target
                self.parameters.l4_cortex_exc.AfferentConnection,   # parameters
                'V1AffConnection'                                   # name
            )

            GaborConnector(
                self,
                self.input_layer.sheets['X_ON'],
                self.input_layer.sheets['X_OFF'],
                cortex_inh_l4,
                self.parameters.l4_cortex_inh.AfferentConnection,
                'V1AffInhConnection'
            )

            # ########################################################
            # CORTICO-CORTICAL
            # random lateral layer 4 projections
            ModularSingleWeightProbabilisticConnector(
                self,
                'V1L4ExcL4ExcConnectionRand',
                cortex_exc_l4,
                cortex_exc_l4,
                self.parameters.l4_cortex_exc.L4ExcL4ExcConnectionRand
            ).connect()

            ModularSingleWeightProbabilisticConnector(
                self,
                'V1L4ExcL4InhConnectionRand',
                cortex_exc_l4,
                cortex_inh_l4,
                self.parameters.l4_cortex_exc.L4ExcL4InhConnectionRand
            ).connect()
            
            ModularSingleWeightProbabilisticConnector(
                self,
                'V1L4InhL4ExcConnectionRand',
                cortex_inh_l4,
                cortex_exc_l4,
                self.parameters.l4_cortex_inh.L4InhL4ExcConnectionRand
            ).connect()
            
            ModularSingleWeightProbabilisticConnector(
                self,
                'V1L4InhL4InhConnectionRand',
                cortex_inh_l4,
                cortex_inh_l4,
                self.parameters.l4_cortex_inh.L4InhL4InhConnectionRand
            ).connect()

            # lateral layer 4 projections
            ModularSamplingProbabilisticConnector(
                self,
                'V1L4ExcL4ExcConnection',
                cortex_exc_l4,
                cortex_exc_l4,
                self.parameters.l4_cortex_exc.L4ExcL4ExcConnection
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'V1L4ExcL4InhConnection',
                cortex_exc_l4,
                cortex_inh_l4,
                self.parameters.l4_cortex_exc.L4ExcL4InhConnection
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'V1L4InhL4ExcConnection',
                cortex_inh_l4,
                cortex_exc_l4,
                self.parameters.l4_cortex_inh.L4InhL4ExcConnection
            ).connect()

            ModularSamplingProbabilisticConnector(
                self,
                'V1L4InhL4InhConnection',
                cortex_inh_l4,
                cortex_inh_l4,
                self.parameters.l4_cortex_inh.L4InhL4InhConnection
            ).connect()

            ########################################################
            # CORTICO-THALAMIC
            if withFeedback_CxLGN:
                ModularSamplingProbabilisticConnector(
                    self,
                    'V1EffConnectionOn',
                    cortex_exc_l4,
                    self.input_layer.sheets['X_ON'],
                    self.parameters.l4_cortex_exc.EfferentConnection_LGN
                ).connect()

                ModularSamplingProbabilisticConnector(
                    self,
                    'V1EffConnectionOff',
                    cortex_exc_l4,
                    self.input_layer.sheets['X_OFF'],
                    self.parameters.l4_cortex_exc.EfferentConnection_LGN
                ).connect()

                # GaborConnector(
                #     self,
                #     self.input_layer.sheets['X_ON'],
                #     self.input_layer.sheets['X_OFF'],
                #     cortex_exc_l4,                                      # source
                #     self.parameters.l4_cortex_exc.EfferentConnection,   # parameters
                #     'V1EffConnection'                                   # name
                # )


            if withFeedback_CxPGN and withPGN:
                ModularSamplingProbabilisticConnector(
                    self,
                    'V1EffConnectionPGN',
                    cortex_exc_l4,
                    pgn,
                    self.parameters.l4_cortex_exc.EfferentConnection_PGN
                ).connect()
コード例 #12
0
ファイル: visual_stimulus.py プロジェクト: teogale/mozaik
class VisualStimulus(BaseStimulus):
    """
    Abstract base class for visual stimuli.
    
    This class defines all parameters common to all visual stimuli.
    
    This class implements all functions specified by the :class:`mozaik.stimuli.stimulus.BaseStimulus` interface.
    The only function that remains to be implemented by the user whenever creating a new stimulus by subclassing
    this class is the :func:`mozaik.stimuli.stimulus.BaseStimulus.frames` function.
    
    This class also implements functions common to all visual stimuli that are required for it to be compatible 
    with the :class:`mozaik.space.VisualSpace` and  class.
    """
    background_luminance = SNumber(
        lux,
        doc=
        "Background luminance. Maximum luminance of object allowed is 2*background_luminance"
    )
    density = SNumber(1 / (degrees),
                      doc="The density of stimulus - units per degree")
    location_x = SNumber(degrees,
                         doc="x location of the center of  visual region.")
    location_y = SNumber(degrees,
                         doc="y location of the center of  visual region.")
    size_x = SNumber(degrees,
                     doc="The size of the region in degrees (asimuth).")
    size_y = SNumber(degrees,
                     doc="The size of the region in degrees (elevation).")

    def __init__(self, **params):
        BaseStimulus.__init__(self, **params)
        self._zoom_cache = {}
        self.region_cache = {}
        self.is_visible = True
        self.transparent = True  # And efficiency flag. It should be set to false by the stimulus if there are no transparent points in it.
        # This will avoid all the code related to transparency which is very expensive.
        self.region = VisualRegion(self.location_x, self.location_y,
                                   self.size_x, self.size_y)
        self.first_resolution_mismatch_display = True

    def _calculate_zoom(self, actual_pixel_size, desired_pixel_size):
        """
        Sometimes the interpolation procedure returns a new array that is too
        small due to rounding error. This is a crude attempt to work around that.
        """
        zoom = actual_pixel_size / desired_pixel_size
        for i in self.img.shape:
            if int(zoom * i) != round(zoom * i):
                zoom *= (1 + 1e-15)
        return zoom

    def display(self, region, pixel_size):
        assert isinstance(
            region, VisualRegion
        ), "region must be a VisualRegion-descended object. Actually a %s" % type(
            region)
        size_in_pixels = numpy.ceil(
            xy2ij((region.size_x, region.size_y)) /
            float(pixel_size)).astype(int)
        if self.transparent:
            view = TRANSPARENT * numpy.ones(size_in_pixels)
        else:
            view = self.background_luminance * numpy.ones(size_in_pixels)

        if region.overlaps(self.region):
            if not self.region_cache.has_key(region):
                intersection = region.intersection(self.region)
                assert intersection == self.region.intersection(
                    region
                )  # just a consistency check. Could be removed if necessary for performance.
                img_relative_left = (intersection.left -
                                     self.region.left) / self.region.width
                img_relative_width = intersection.width / self.region.width
                img_relative_top = (intersection.top -
                                    self.region.bottom) / self.region.height
                #img_relative_bottom = (intersection.bottom - self.region.bottom) / self.region.height
                img_relative_height = intersection.height / self.region.height
                view_relative_left = (intersection.left -
                                      region.left) / region.width
                view_relative_width = intersection.width / region.width
                view_relative_top = (intersection.top -
                                     region.bottom) / region.height
                #view_relative_bottom = (intersection.bottom - region.bottom) / region.height
                view_relative_height = intersection.height / region.height

                img_pixel_size = xy2ij(
                    (self.region.size_x, self.region.size_y
                     )) / self.img.shape  # is self.size a tuple or an array?
                assert img_pixel_size[0] == img_pixel_size[1]

                # necessary instead of == comparison due to the floating math rounding errors
                if abs(pixel_size - img_pixel_size[0]) < 0.0001:
                    img = self.img
                else:
                    if self.first_resolution_mismatch_display:
                        logger.warning(
                            "Image pixel size does not match desired size (%g vs. %g) degrees. This is extremely inefficient!!!!!!!!!!!"
                            % (pixel_size, img_pixel_size[0]))
                        logger.warning("Image pixel size %g,%g" %
                                       numpy.shape(self.img))
                        self.first_resolution_mismatch_display = False
                    # note that if the image is much larger than the view region, we might save some
                    # time by not rescaling the whole image, only the part within the view region.
                    zoom = self._calculate_zoom(
                        img_pixel_size[0],
                        pixel_size)  # img_pixel_size[0]/pixel_size
                    #logger.debug("Image pixel size (%g deg) does not match desired size (%g deg). Zooming image by a factor %g" % (img_pixel_size[0], pixel_size, zoom))
                    if zoom in self._zoom_cache:
                        img = self._zoom_cache[zoom]
                    else:
                        img = interpolation.zoom(self.img, zoom)
                        self._zoom_cache[zoom] = img

                j_start = numpy.round(img_relative_left *
                                      img.shape[1]).astype(int)
                delta_j = numpy.round(img_relative_width *
                                      img.shape[1]).astype(int)
                i_start = img.shape[0] - numpy.round(
                    img_relative_top * img.shape[0]).astype(int)
                delta_i = numpy.round(img_relative_height *
                                      img.shape[0]).astype(int)

                l_start = numpy.round(view_relative_left *
                                      size_in_pixels[1]).astype(int)
                delta_l = numpy.round(view_relative_width *
                                      size_in_pixels[1]).astype(int)
                k_start = size_in_pixels[0] - numpy.round(
                    view_relative_top * size_in_pixels[0]).astype(int)
                delta_k = numpy.round(view_relative_height *
                                      size_in_pixels[0]).astype(int)

                # unfortunatelly the above code can give inconsistent results even if the inputs are correct due to rounding errors
                # therefore:

                if abs(delta_j - delta_l) == 1:
                    delta_j = min(delta_j, delta_l)
                    delta_l = min(delta_j, delta_l)

                if abs(delta_i - delta_k) == 1:
                    delta_i = min(delta_i, delta_k)
                    delta_k = min(delta_i, delta_k)

                assert delta_j == delta_l, "delta_j = %g, delta_l = %g" % (
                    delta_j, delta_l)
                assert delta_i == delta_k, "delta_i = %g, delta_k = %g" % (
                    delta_i, delta_k)

                i_stop = i_start + delta_i
                j_stop = j_start + delta_j
                k_stop = k_start + delta_k
                l_stop = l_start + delta_l
                ##logger.debug("i_start = %d, i_stop = %d, j_start = %d, j_stop = %d" % (i_start, i_stop, j_start, j_stop))
                ##logger.debug("k_start = %d, k_stop = %d, l_start = %d, l_stop = %d" % (k_start, k_stop, l_start, l_stop))

                try:
                    self.region_cache[region] = ((k_start, k_start + delta_k,
                                                  l_start, l_start + delta_l),
                                                 (i_start, i_start + delta_i,
                                                  j_start, j_start + delta_j))
                    view[k_start:k_start + delta_k, l_start:l_start +
                         delta_l] = img[i_start:i_start + delta_i,
                                        j_start:j_start + delta_j]
                except ValueError:
                    logger.error(
                        "i_start = %d, i_stop = %d, j_start = %d, j_stop = %d"
                        % (i_start, i_stop, j_start, j_stop))
                    logger.error(
                        "k_start = %d, k_stop = %d, l_start = %d, l_stop = %d"
                        % (k_start, k_stop, l_start, l_stop))
                    logger.error("img.shape = %s, view.shape = %s" %
                                 (img.shape, view.shape))
                    logger.error(
                        "img[i_start:i_stop, j_start:j_stop].shape = %s" %
                        str(img[i_start:i_stop, j_start:j_stop].shape))
                    logger.error(
                        "view[k_start:k_stop, l_start:l_stop].shape = %s" %
                        str(view[k_start:k_stop, l_start:l_stop].shape))
                    raise
            else:
                try:
                    ((sx_min, sx_max, sy_min, sy_max),
                     (tx_min, tx_max, ty_min,
                      ty_max)) = self.region_cache[region]
                    view[sx_min:sx_max,
                         sy_min:sy_max] = self.img[tx_min:tx_max,
                                                   ty_min:ty_max]
                except ValueError:
                    logger.error(
                        "i_start = %d, i_stop = %d, j_start = %d, j_stop = %d"
                        % (i_start, i_stop, j_start, j_stop))
                    logger.error(
                        "k_start = %d, k_stop = %d, l_start = %d, l_stop = %d"
                        % (k_start, k_stop, l_start, l_stop))
                    logger.error("img.shape = %s, view.shape = %s" %
                                 (img.shape, view.shape))
                    logger.error(
                        "img[i_start:i_stop, j_start:j_stop].shape = %s" %
                        str(img[i_start:i_stop, j_start:j_stop].shape))
                    logger.error(
                        "view[k_start:k_stop, l_start:l_stop].shape = %s" %
                        str(view[k_start:k_stop, l_start:l_stop].shape))
                    raise
        return view

    def update(self):
        """
        Sets the current frame to the next frame in the sequence.
        """
        try:
            self.img, self.variables = self._frames.next()
        except StopIteration:
            self.visible = False
        else:
            assert self.img.min() >= 0 or self.img.min(
            ) == TRANSPARENT, "frame minimum is less than zero: %g" % self.img.min(
            )
            assert self.img.max(
            ) <= 2 * self.background_luminance, "frame maximum (%g) is greater than the maximum luminance (%g)" % (
                self.img.max(), 2 * self.background_luminance)
        self._zoom_cache = {}

    def reset(self):
        """
        Reset to the first frame in the sequence.
        """
        self.visible = True
        self._frames = self.frames()
        self.update()

    def next_frame(self):
        """For creating movies"""
        self.update()
        return [self.img]
コード例 #13
0
ファイル: model.py プロジェクト: aopy/mozaik
    def __init__(self, sim, num_threads, parameters):
        Model.__init__(self, sim, num_threads, parameters)
        # Load components
        CortexExcL4 = load_component(
            self.parameters.sheets.l4_cortex_exc.component)
        CortexInhL4 = load_component(
            self.parameters.sheets.l4_cortex_inh.component)

        if not self.parameters.only_afferent and self.parameters.l23:
            CortexExcL23 = load_component(
                self.parameters.sheets.l23_cortex_exc.component)
            CortexInhL23 = load_component(
                self.parameters.sheets.l23_cortex_inh.component)

        RetinaLGN = load_component(self.parameters.sheets.retina_lgn.component)

        # Build and instrument the network
        self.visual_field = VisualRegion(
            location_x=self.parameters.visual_field.centre[0],
            location_y=self.parameters.visual_field.centre[1],
            size_x=self.parameters.visual_field.size[0],
            size_y=self.parameters.visual_field.size[1])

        self.input_layer = RetinaLGN(
            self, self.parameters.sheets.retina_lgn.params
        )  # 'pyNN.spiNNaker' has no attribute 'StepCurrentSource'

        cortex_exc_l4 = CortexExcL4(
            self, self.parameters.sheets.l4_cortex_exc.params
        )  # spiNNaker has no attribute EIF_cond_exp_isfa_ista ->Iz

        cortex_inh_l4 = CortexInhL4(
            self, self.parameters.sheets.l4_cortex_inh.params)

        if not self.parameters.only_afferent and self.parameters.l23:
            cortex_exc_l23 = CortexExcL23(
                self, self.parameters.sheets.l23_cortex_exc.params)
            cortex_inh_l23 = CortexInhL23(
                self, self.parameters.sheets.l23_cortex_inh.params)

        # initialize afferent layer 4 projections
        GaborConnector(self, self.input_layer.sheets['X_ON'],
                       self.input_layer.sheets['X_OFF'], cortex_exc_l4,
                       self.parameters.sheets.l4_cortex_exc.AfferentConnection,
                       'V1AffConnection')
        GaborConnector(self, self.input_layer.sheets['X_ON'],
                       self.input_layer.sheets['X_OFF'], cortex_inh_l4,
                       self.parameters.sheets.l4_cortex_inh.AfferentConnection,
                       'V1AffInhConnection')

        # initialize lateral layer 4 projections
        if not self.parameters.only_afferent:

            ModularSamplingProbabilisticConnectorAnnotationSamplesCount(
                self, 'V1L4ExcL4ExcConnection', cortex_exc_l4, cortex_exc_l4,
                self.parameters.sheets.l4_cortex_exc.L4ExcL4ExcConnection
            ).connect()
            ModularSamplingProbabilisticConnectorAnnotationSamplesCount(
                self, 'V1L4ExcL4InhConnection', cortex_exc_l4, cortex_inh_l4,
                self.parameters.sheets.l4_cortex_exc.L4ExcL4InhConnection
            ).connect()

            ModularSamplingProbabilisticConnector(
                self, 'V1L4InhL4ExcConnection', cortex_inh_l4, cortex_exc_l4,
                self.parameters.sheets.l4_cortex_inh.L4InhL4ExcConnection
            ).connect()
            ModularSamplingProbabilisticConnector(
                self, 'V1L4InhL4InhConnection', cortex_inh_l4, cortex_inh_l4,
                self.parameters.sheets.l4_cortex_inh.L4InhL4InhConnection
            ).connect()

            if self.parameters.l23:
                # if False:
                # initialize afferent layer 4 to layer 2/3 projection
                ModularSamplingProbabilisticConnector(
                    self, 'V1L4ExcL23ExcConnection', cortex_exc_l4,
                    cortex_exc_l23, self.parameters.sheets.l23_cortex_exc.
                    L4ExcL23ExcConnection).connect()
                ModularSamplingProbabilisticConnector(
                    self, 'V1L4ExcL23InhConnection', cortex_exc_l4,
                    cortex_inh_l23, self.parameters.sheets.l23_cortex_inh.
                    L4ExcL23InhConnection).connect()

                ModularSamplingProbabilisticConnector(
                    self, 'V1L23ExcL23ExcConnection', cortex_exc_l23,
                    cortex_exc_l23, self.parameters.sheets.l23_cortex_exc.
                    L23ExcL23ExcConnection).connect()
                ModularSamplingProbabilisticConnector(
                    self, 'V1L23ExcL23InhConnection', cortex_exc_l23,
                    cortex_inh_l23, self.parameters.sheets.l23_cortex_exc.
                    L23ExcL23InhConnection).connect()
                ModularSamplingProbabilisticConnector(
                    self, 'V1L23InhL23ExcConnection', cortex_inh_l23,
                    cortex_exc_l23, self.parameters.sheets.l23_cortex_inh.
                    L23InhL23ExcConnection).connect()
                ModularSamplingProbabilisticConnector(
                    self, 'V1L23InhL23InhConnection', cortex_inh_l23,
                    cortex_inh_l23, self.parameters.sheets.l23_cortex_inh.
                    L23InhL23InhConnection).connect()
                if self.parameters.feedback:
                    ModularSamplingProbabilisticConnector(
                        self, 'V1L23ExcL4ExcConnection', cortex_exc_l23,
                        cortex_exc_l4, self.parameters.sheets.l23_cortex_exc.
                        L23ExcL4ExcConnection).connect()
                    ModularSamplingProbabilisticConnector(
                        self, 'V1L23ExcL4InhConnection', cortex_exc_l23,
                        cortex_inh_l4, self.parameters.sheets.l23_cortex_exc.
                        L23ExcL4InhConnection).connect()
コード例 #14
0
ファイル: visual_stimulus.py プロジェクト: JoelChavas/mozaik
class VisualStimulus(BaseStimulus):
    """
    Abstract base class for visual stimuli.
    
    This class defines all parameters common to all visual stimuli.
    
    This class implements all functions specified by the :class:`mozaik.stimuli.stimulus.BaseStimulus` interface.
    The only function that remains to be implemented by the user whenever creating a new stimulus by subclassing
    this class is the :func:`mozaik.stimuli.stimulus.BaseStimulus.frames` function.
    
    This class also implements functions common to all visual stimuli that are required for it to be compatible 
    with the :class:`mozaik.space.VisualSpace` and  class.
    """
    background_luminance = SNumber(lux, doc="Background luminance. Maximum luminance of object allowed is 2*background_luminance")
    density = SNumber(1/(degrees), doc="The density of stimulus - units per degree")
    location_x = SNumber(degrees, doc="x location of the center of  visual region.")
    location_y = SNumber(degrees, doc="y location of the center of  visual region.")
    size_x = SNumber(degrees, doc="The size of the region in degrees (asimuth).")
    size_y = SNumber(degrees, doc="The size of the region in degrees (elevation).")

    def __init__(self, **params):
        BaseStimulus.__init__(self, **params)
        self._zoom_cache = {}
        self.region_cache = {}
        self.is_visible = True
        self.transparent = True # And efficiency flag. It should be set to false by the stimulus if there are no transparent points in it. 
                                # This will avoid all the code related to transparency which is very expensive.
        self.region = VisualRegion(self.location_x, self.location_y,
                                   self.size_x, self.size_y)
        self.first_resolution_mismatch_display=True
        

    def _calculate_zoom(self, actual_pixel_size, desired_pixel_size):
        """
        Sometimes the interpolation procedure returns a new array that is too
        small due to rounding error. This is a crude attempt to work around that.
        """
        zoom = actual_pixel_size/desired_pixel_size
        for i in self.img.shape:
            if int(zoom*i) != round(zoom*i):
                zoom *= (1 + 1e-15)
        return zoom

    def display(self, region, pixel_size):
        assert isinstance(region, VisualRegion), "region must be a VisualRegion-descended object. Actually a %s" % type(region)
        size_in_pixels = numpy.ceil(
                            xy2ij((region.size_x, region.size_y))
                                /float(pixel_size)).astype(int)
        if self.transparent:
            view = TRANSPARENT * numpy.ones(size_in_pixels)
        else:
            view = self.background_luminance * numpy.ones(size_in_pixels)
            
        if region.overlaps(self.region):
            if not self.region_cache.has_key(region):
                intersection = region.intersection(self.region)
                assert intersection == self.region.intersection(region)  # just a consistency check. Could be removed if necessary for performance.
                img_relative_left = (intersection.left - self.region.left) / self.region.width
                img_relative_width = intersection.width/self.region.width
                img_relative_top = (intersection.top - self.region.bottom) / self.region.height
                #img_relative_bottom = (intersection.bottom - self.region.bottom) / self.region.height
                img_relative_height = intersection.height / self.region.height
                view_relative_left = (intersection.left - region.left) / region.width
                view_relative_width = intersection.width / region.width
                view_relative_top = (intersection.top - region.bottom) / region.height
                #view_relative_bottom = (intersection.bottom - region.bottom) / region.height
                view_relative_height = intersection.height / region.height

                img_pixel_size = xy2ij((self.region.size_x, self.region.size_y)) / self.img.shape  # is self.size a tuple or an array?
                assert img_pixel_size[0] == img_pixel_size[1]
                
                # necessary instead of == comparison due to the floating math rounding errors
                if abs(pixel_size-img_pixel_size[0])<0.0001:
                    img = self.img
                else:
                    if self.first_resolution_mismatch_display:
                        logger.warning("Image pixel size does not match desired size (%g vs. %g) degrees. This is extremely inefficient!!!!!!!!!!!" % (pixel_size,img_pixel_size[0]))
                        logger.warning("Image pixel size %g,%g" % numpy.shape(self.img))
                        self.first_resolution_mismatch_display = False
                    # note that if the image is much larger than the view region, we might save some
                    # time by not rescaling the whole image, only the part within the view region.
                    zoom = self._calculate_zoom(img_pixel_size[0], pixel_size)  # img_pixel_size[0]/pixel_size
                    #logger.debug("Image pixel size (%g deg) does not match desired size (%g deg). Zooming image by a factor %g" % (img_pixel_size[0], pixel_size, zoom))
                    if zoom in self._zoom_cache:
                        img = self._zoom_cache[zoom]
                    else:
                        img = interpolation.zoom(self.img, zoom)
                        self._zoom_cache[zoom] = img

                j_start = numpy.round(img_relative_left * img.shape[1]).astype(int)
                delta_j = numpy.round(img_relative_width * img.shape[1]).astype(int)
                i_start = img.shape[0] - numpy.round(img_relative_top * img.shape[0]).astype(int)
                delta_i = numpy.round(img_relative_height * img.shape[0]).astype(int)

                l_start = numpy.round(view_relative_left * size_in_pixels[1]).astype(int)
                delta_l = numpy.round(view_relative_width * size_in_pixels[1]).astype(int)
                k_start = size_in_pixels[0] - numpy.round(view_relative_top * size_in_pixels[0]).astype(int)
                delta_k = numpy.round(view_relative_height * size_in_pixels[0]).astype(int)
                
                # unfortunatelly the above code can give inconsistent results even if the inputs are correct due to rounding errors
                # therefore:
                
                if abs(delta_j-delta_l) == 1:
                   delta_j = min(delta_j,delta_l)
                   delta_l = min(delta_j,delta_l)

                if abs(delta_i-delta_k) == 1:
                   delta_i = min(delta_i,delta_k)
                   delta_k = min(delta_i,delta_k)
                
                assert delta_j == delta_l, "delta_j = %g, delta_l = %g" % (delta_j, delta_l)
                assert delta_i == delta_k, "delta_i = %g, delta_k = %g" % (delta_i, delta_k)

                i_stop = i_start + delta_i
                j_stop = j_start + delta_j
                k_stop = k_start + delta_k
                l_stop = l_start + delta_l
                ##logger.debug("i_start = %d, i_stop = %d, j_start = %d, j_stop = %d" % (i_start, i_stop, j_start, j_stop))
                ##logger.debug("k_start = %d, k_stop = %d, l_start = %d, l_stop = %d" % (k_start, k_stop, l_start, l_stop))

                try:
                    self.region_cache[region] = ((k_start,k_start+delta_k,l_start,l_start+delta_l),(i_start,i_start+delta_i, j_start,j_start+delta_j))
                    view[k_start:k_start+delta_k, l_start:l_start+delta_l] = img[i_start:i_start+delta_i, j_start:j_start+delta_j]
                except ValueError:
                    logger.error("i_start = %d, i_stop = %d, j_start = %d, j_stop = %d" % (i_start, i_stop, j_start, j_stop))
                    logger.error("k_start = %d, k_stop = %d, l_start = %d, l_stop = %d" % (k_start, k_stop, l_start, l_stop))
                    logger.error("img.shape = %s, view.shape = %s" % (img.shape, view.shape))
                    logger.error("img[i_start:i_stop, j_start:j_stop].shape = %s" % str(img[i_start:i_stop, j_start:j_stop].shape))
                    logger.error("view[k_start:k_stop, l_start:l_stop].shape = %s" % str(view[k_start:k_stop, l_start:l_stop].shape))
                    raise
            else:
                try:
                    ((sx_min,sx_max,sy_min,sy_max),(tx_min,tx_max,ty_min,ty_max)) = self.region_cache[region]
                    view[sx_min:sx_max,sy_min:sy_max] = self.img[tx_min:tx_max,ty_min:ty_max]
                except ValueError:
                    logger.error("i_start = %d, i_stop = %d, j_start = %d, j_stop = %d" % (i_start, i_stop, j_start, j_stop))
                    logger.error("k_start = %d, k_stop = %d, l_start = %d, l_stop = %d" % (k_start, k_stop, l_start, l_stop))
                    logger.error("img.shape = %s, view.shape = %s" % (img.shape, view.shape))
                    logger.error("img[i_start:i_stop, j_start:j_stop].shape = %s" % str(img[i_start:i_stop, j_start:j_stop].shape))
                    logger.error("view[k_start:k_stop, l_start:l_stop].shape = %s" % str(view[k_start:k_stop, l_start:l_stop].shape))
                    raise
        return view

    def update(self):
        """
        Sets the current frame to the next frame in the sequence.
        """
        try:
            self.img, self.variables = self._frames.next()
        except StopIteration:
            self.visible = False
        else:
            assert self.img.min() >= 0 or self.img.min() == TRANSPARENT, "frame minimum is less than zero: %g" % self.img.min()
            assert self.img.max() <= 2*self.background_luminance, "frame maximum (%g) is greater than the maximum luminance (%g)" % (self.img.max(), 2*self.background_luminance)
        self._zoom_cache = {}

    def reset(self):
        """
        Reset to the first frame in the sequence.
        """
        self.visible = True
        self._frames = self.frames()
        self.update()

    def next_frame(self):
        """For creating movies"""
        self.update()
        return [self.img]