Пример #1
0
    def test_preference(self):
        
        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(self.fm1.weighted_average()[i,j], 0.5)
                self.assertAlmostEqual(self.fm2.weighted_average()[i,j], 0.5)


        # To test the update function     
        self.fm1.update(self.a1,0.7)
        self.fm2.update(self.a1,0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(self.fm1.weighted_average()[i,j], 0.6)
                vect_sum = wrap(0,1,arg(exp(0.7*2*pi*1j)+exp(0.5*2*pi*1j))/(2*pi)) 
                self.assertAlmostEqual(self.fm2.weighted_average()[i,j],vect_sum) 
                                      
                                      

        
        # To test the keep_peak=True 
        self.fm1.update(self.a1,0.7)
        self.fm2.update(self.a1,0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(self.fm1.weighted_average()[i,j], 0.6)
                vect_sum =wrap(0,1,arg(exp(0.7*2*pi*1j)+exp(0.5*2*pi*1j))/(2*pi))
                self.assertAlmostEqual(self.fm2.weighted_average()[i,j],vect_sum)
                                      
        self.fm1.update(self.a2,0.7)
        self.fm2.update(self.a2,0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(self.fm1.weighted_average()[i,j], 0.65)
                vect_sum =wrap(0,1,arg(3*exp(0.7*2*pi*1j)+exp(0.5*2*pi*1j))/(2*pi))
                self.assertAlmostEqual(self.fm2.weighted_average()[i,j],vect_sum)

        # to even test more....
        
        self.fm1.update(self.a3,0.9)
        self.fm2.update(self.a3,0.9)
        
        for i in range(3):
            self.assertAlmostEqual(self.fm1.weighted_average()[i,0], 0.65)
            self.assertAlmostEqual(self.fm1.weighted_average()[i,1], 0.7)
            vect_sum = wrap(0,1,arg(3*exp(0.7*2*pi*1j)+exp(0.5*2*pi*1j))/(2*pi))
            self.assertAlmostEqual(self.fm2.weighted_average()[i,0],vect_sum)
            vect_sum = wrap(0,1,arg(3*exp(0.7*2*pi*1j)+exp(0.5*2*pi*1j)+exp(0.9*2*pi*1j))/(2*pi))
            self.assertAlmostEqual(self.fm2.weighted_average()[i,1],vect_sum)
Пример #2
0
def measure_tae():
   print "Measuring initial perception of all orientations..."
   before=test_all_orientations(0.0,0.0)
   pylab.figure(figsize=(5,5))
   vectorplot(degrees(before.keys()),  degrees(before.keys()),style="--") # add a dashed reference line
   vectorplot(degrees(before.values()),degrees(before.keys()),\
             title="Initial perceived values for each orientation")

   print "Adapting to pi/2 gaussian at the center of retina for 90 iterations..."
   for p in ["LateralExcitatory","LateralInhibitory","LGNOnAfferent","LGNOffAfferent"]:
      # Value is just an approximate match to bednar:nc00; not calculated directly
      topo.sim["V1"].projections(p).learning_rate = 0.005

   inputs = [pattern.Gaussian(x = 0.0, y = 0.0, orientation = pi/2.0,
                     size=0.088388, aspect_ratio=4.66667, scale=1.0)]
   topo.sim['Retina'].input_generator.generators = inputs
   topo.sim.run(90)


   print "Measuring adapted perception of all orientations..."
   after=test_all_orientations(0.0,0.0)
   before_vals = array(before.values())
   after_vals  = array(after.values())
   diff_vals   = before_vals-after_vals # Sign flipped to match conventions

   pylab.figure(figsize=(5,5))
   pylab.axvline(90.0)
   pylab.axhline(0.0)
   vectorplot(wrap(-90.0,90.0,degrees(diff_vals)),degrees(before.keys()),\
             title="Difference from initial perceived value for each orientation")
Пример #3
0
    def __call__(self, data, cyclic_range=1.0, **params):
        p = ParamOverrides(self, params)

        r, c = data.shape
        dx = np.diff(data, 1, axis=1)[0:r - 1, 0:c - 1]
        dy = np.diff(data, 1, axis=0)[0:r - 1, 0:c - 1]

        if cyclic_range is not None: # Wrap into the specified range
            # Convert negative differences to an equivalent positive value
            dx = wrap(0, cyclic_range, dx)
            dy = wrap(0, cyclic_range, dy)
            #
            # Make it increase as gradient reaches the halfway point,
            # and decrease from there
            dx = 0.5 * cyclic_range - np.abs(dx - 0.5 * cyclic_range)
            dy = 0.5 * cyclic_range - np.abs(dy - 0.5 * cyclic_range)

        return super(gradientplot, self).__call__(np.sqrt(dx*dx + dy*dy), **p)
Пример #4
0
    def __call__(self, data, cyclic_range=1.0, **params):
        p = ParamOverrides(self, params)

        r, c = data.shape
        dx = np.diff(data, 1, axis=1)[0:r - 1, 0:c - 1]
        dy = np.diff(data, 1, axis=0)[0:r - 1, 0:c - 1]

        if cyclic_range is not None: # Wrap into the specified range
            # Convert negative differences to an equivalent positive value
            dx = wrap(0, cyclic_range, dx)
            dy = wrap(0, cyclic_range, dy)
            #
            # Make it increase as gradient reaches the halfway point,
            # and decrease from there
            dx = 0.5 * cyclic_range - np.abs(dx - 0.5 * cyclic_range)
            dy = 0.5 * cyclic_range - np.abs(dy - 0.5 * cyclic_range)

        return super(gradientplot, self).__call__(np.sqrt(dx*dx + dy*dy), **p)
Пример #5
0
    def function(self,p):
        """Selects and returns one of the patterns in the list."""
        int_index=int(len(p.generators)*wrap(0,1.0,p.index))
        pg=p.generators[int_index]

        image_array = pg(xdensity=p.xdensity,ydensity=p.ydensity,bounds=p.bounds,
                         x=p.x+p.size*(pg.x*cos(p.orientation)-pg.y*sin(p.orientation)),
                         y=p.y+p.size*(pg.x*sin(p.orientation)+pg.y*cos(p.orientation)),
                         orientation=pg.orientation+p.orientation,size=pg.size*p.size,
                         scale=pg.scale*p.scale,offset=pg.offset+p.offset)
                       
        return image_array
Пример #6
0
    def vector_sum(self, d):
        """
        Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).

        Each bin contributes a vector of length equal to its value, at
        a direction corresponding to the bin number.  Specifically,
        the total bin number range is mapped into a direction range
        [0,2pi].

        For a cyclic distribution, the avgbinnum will be a continuous
        measure analogous to the max_value_bin() of the distribution.
        But this quantity has more precision than max_value_bin()
        because it is computed from the entire distribution instead of
        just the peak bin.  However, it is likely to be useful only
        for uniform or very dense sampling; with sparse, non-uniform
        sampling the estimates will be biased significantly by the
        particular samples chosen.

        The avgbinnum is not meaningful when the magnitude is 0,
        because a zero-length vector has no direction.  To find out
        whether such cases occurred, you can compare the value of
        undefined_vals before and after a series of calls to this
        function.

        """
        # vectors are represented in polar form as complex numbers
        h = d._data
        r = h.values()
        theta = d._bins_to_radians(array(h.keys()))
        v_sum = innerproduct(r, exp(theta * 1j))

        magnitude = abs(v_sum)
        direction = arg(v_sum)

        if v_sum == 0:
            d.undefined_vals += 1

        direction_radians = d._radians_to_bins(direction)

        # wrap the direction because arctan2 returns principal values
        wrapped_direction = wrap(d.axis_bounds[0], d.axis_bounds[1],
                                 direction_radians)

        return (magnitude, wrapped_direction)
Пример #7
0
    def vector_sum(self, d ):
        """
        Return the vector sum of the distribution as a tuple (magnitude, avgbinnum).

        Each bin contributes a vector of length equal to its value, at
        a direction corresponding to the bin number.  Specifically,
        the total bin number range is mapped into a direction range
        [0,2pi].
        
        For a cyclic distribution, the avgbinnum will be a continuous
        measure analogous to the max_value_bin() of the distribution.
        But this quantity has more precision than max_value_bin()
        because it is computed from the entire distribution instead of
        just the peak bin.  However, it is likely to be useful only
        for uniform or very dense sampling; with sparse, non-uniform
        sampling the estimates will be biased significantly by the
        particular samples chosen.

        The avgbinnum is not meaningful when the magnitude is 0,
        because a zero-length vector has no direction.  To find out
        whether such cases occurred, you can compare the value of
        undefined_vals before and after a series of calls to this
        function.

        """
        # vectors are represented in polar form as complex numbers
        h   = d._data
        r   = h.values()                                  
        theta = d._bins_to_radians(array( h.keys() ))
        v_sum = innerproduct(r, exp(theta*1j))                  

        magnitude = abs(v_sum)
        direction = arg(v_sum)

        if v_sum == 0:
            d.undefined_vals += 1

        direction_radians = d._radians_to_bins(direction)

        # wrap the direction because arctan2 returns principal values
        wrapped_direction = wrap(d.axis_bounds[0], d.axis_bounds[1], direction_radians)
        
        return (magnitude, wrapped_direction) 
Пример #8
0
def measure_dae(scale=0.6):
   print "Measuring initial perception of all directions..."
   before=test_all_directions(0.0,0.0,scale)
   pylab.figure(figsize=(5,5))
   vectorplot(degrees(before.keys()),  degrees(before.keys()),style="--") # add a dashed reference line
   vectorplot(degrees(before.values()),degrees(before.keys()),\
             title="Initial perceived values for each direction")

   print "Adapting to pi/2 gaussian at the center of retina for 90 iterations..."
   
   for p in ["LateralExcitatory","LateralInhibitory",
             "LGNOnAfferent0","LGNOffAfferent0",
             "LGNOnAfferent1","LGNOffAfferent1",
             "LGNOnAfferent2","LGNOffAfferent2",
             "LGNOnAfferent3","LGNOffAfferent3"]:
      # Value is just an approximate match to bednar:nc00; not calculated directly
      topo.sim["V1"].projections(p).learning_rate = 0.005


##    g = pattern.Gaussian(x=0.0,y=0.0,orientation=pi/2.0,size=0.088388,
##                          aspect_ratio=4.66667,scale=1.0)

   g = pattern.SineGrating(frequency=2.4,phase=0.0,orientation=pi/2,scale=scale)
   for j in range(4):
      topo.sim['Retina%s'%j].set_input_generator(pattern.Sweeper(
         generator=copy.deepcopy(g),
         speed=2.0/24.0,
         step=j))
      
   topo.sim.run(90)

   print "Measuring adapted perception of all directions..."
   after=test_all_directions(0.0,0.0,scale)
   before_vals = array(before.values())
   after_vals  = array(after.values())
   diff_vals   = before_vals-after_vals # Sign flipped to match conventions

   pylab.figure(figsize=(5,5))
   pylab.axvline(180.0)
   pylab.axhline(0.0)
   vectorplot(wrap(-2*90.0,2*90.0,degrees(diff_vals)),degrees(before.keys()),\
             title="Difference from initial perceived value for each direction")
    def test_preference(self):

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(weighted_average(self.fm1)[i, j], 0.5)
                self.assertAlmostEqual(weighted_average(self.fm2)[i, j], 0.5)

        # To test the update function
        self.fm1.update(self.a1, 0.7)
        self.fm2.update(self.a1, 0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(weighted_average(self.fm1)[i, j], 0.6)
                vect_sum = wrap(
                    0, 1,
                    arg(exp(0.7 * 2 * pi * 1j) + exp(0.5 * 2 * pi * 1j)) /
                    (2 * pi))
                self.assertAlmostEqual(
                    weighted_average(self.fm2)[i, j], vect_sum)

        # To test the keep_peak=True
        self.fm1.update(self.a1, 0.7)
        self.fm2.update(self.a1, 0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(weighted_average(self.fm1)[i, j], 0.6)
                vect_sum = wrap(
                    0, 1,
                    arg(exp(0.7 * 2 * pi * 1j) + exp(0.5 * 2 * pi * 1j)) /
                    (2 * pi))
                self.assertAlmostEqual(
                    weighted_average(self.fm2)[i, j], vect_sum)

        self.fm1.update(self.a2, 0.7)
        self.fm2.update(self.a2, 0.7)

        for i in range(3):
            for j in range(2):
                self.assertAlmostEqual(weighted_average(self.fm1)[i, j], 0.65)
                vect_sum = wrap(
                    0, 1,
                    arg(3 * exp(0.7 * 2 * pi * 1j) + exp(0.5 * 2 * pi * 1j)) /
                    (2 * pi))
                self.assertAlmostEqual(
                    weighted_average(self.fm2)[i, j], vect_sum)

        # to even test more....

        self.fm1.update(self.a3, 0.9)
        self.fm2.update(self.a3, 0.9)

        for i in range(3):
            self.assertAlmostEqual(weighted_average(self.fm1)[i, 0], 0.65)
            self.assertAlmostEqual(weighted_average(self.fm1)[i, 1], 0.7)
            vect_sum = wrap(
                0, 1,
                arg(3 * exp(0.7 * 2 * pi * 1j) + exp(0.5 * 2 * pi * 1j)) /
                (2 * pi))
            self.assertAlmostEqual(weighted_average(self.fm2)[i, 0], vect_sum)
            vect_sum = wrap(
                0, 1,
                arg(3 * exp(0.7 * 2 * pi * 1j) + exp(0.5 * 2 * pi * 1j) +
                    exp(0.9 * 2 * pi * 1j)) / (2 * pi))
            self.assertAlmostEqual(weighted_average(self.fm2)[i, 1], vect_sum)
Пример #10
0
 def get_current_generator(self):
     """Return the current generator (as specified by self.index)."""
     int_index=int(len(self.generators)*wrap(0,1.0,self.inspect_value('index')))
     return self.generators[int_index]