Beispiel #1
0
def create_N(_n,_xmax,_ymax):
	_nodes = []
	for i in range(_n):
 		_tmpx = pylab.rand()*_xmax
		_tmpy = pylab.rand()*_ymax
		_nodes.append((_tmpx, _tmpy))
	return _nodes
Beispiel #2
0
	def __init__(self, phase_potrait, network, info=None, position=None):
		self.system = phase_potrait
		self.network = network
		self.CYCLES = 10
		self.info = info
		self.initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand(), pl.rand())

		self.fig = pl.figure('Voltage Traces', figsize=(6, 2), facecolor='#EEEEEE')
		self.ax = self.fig.add_subplot(111, frameon=False, yticks=[])

		self.li_b, = self.ax.plot([], [], 'b-', lw=2.)
		self.li_g, = self.ax.plot([], [], 'g-', lw=2.)
		self.li_r, = self.ax.plot([], [], 'r-', lw=2.)
		self.li_y, = self.ax.plot([], [], 'y-', lw=2.)

		self.ax.set_xlabel(r'time (sec.)', fontsize=20)

		self.ax.set_xticklabels(np.arange(0., 1., 0.1), fontsize=15)
		self.ax.set_yticklabels(np.arange(0., 1., 0.1), fontsize=15)
		
		self.ax.set_xlim(0., 100.)
		self.ax.set_ylim(-0.06-0.18, 0.04)

		self.fig.tight_layout()

		self.key_func_dict = dict(u=traces.increase_cycles, i=traces.decrease_cycles)
		self.fig.canvas.mpl_connect('key_press_event', self.on_key)
		self.fig.canvas.mpl_connect('axes_enter_event', self.focus_in)

		if not position == None:
			try:
				self.fig.canvas.manager.window.wm_geometry(position)
			except:
				pass
def check():
    gamma = 0.01
    X = matrix(rand(64,10))
    S = matrix(rand(64,10))
    args = (S, X, gamma)
    x0 = rand(4096,)
    return check_grad(f_l2_wd, g_l2_wd, x0, *args)
Beispiel #4
0
	def __init__(self, phase_potrait, info=None, position=None):
		win.window.__init__(self, position)
		self.system = phase_potrait
		self.info = info
		self.CYCLES = 8
		self.running = False
		self.pulsed = 0
                self.num_osci = 8
		self.state = np.random.randn(self.num_osci*model.N_EQ1)
		self.initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand())

		self.ax = self.fig.add_subplot(111, frameon=False, yticks=[])

                self.li = [self.ax.plot([], [], 'k-', lw=2.)[0] for i in xrange(self.num_osci)]

		self.ax.set_xlabel(r'time (sec.)', fontsize=20)

		self.ax.set_xticklabels(np.arange(0., 1., 0.1), fontsize=15)
		self.ax.set_yticklabels(np.arange(0., 1., 0.1), fontsize=15)
		
		self.ax.set_xlim(0., 100.)
		self.ax.set_ylim(-1.5-self.num_osci*2, 1.5)

		self.key_func_dict.update(dict(u=traces.increase_cycles, i=traces.decrease_cycles))
		self.fig.canvas.mpl_connect('button_press_event', self.on_click)
		self.fig.canvas.mpl_connect('axes_enter_event', self.focus_in)
Beispiel #5
0
def normal_test_case():
    '''
    Runs a test case with simulated data from a normal distribution.
    '''
    obs, fa, dur = [], [], []
    for n in range(15):
        d, f, o = make_test_data(
            5, split=min(plt.rand()*50+120, 170),
            intercept=plt.rand()*50 + 225,
            slope1=1 + plt.randn()/0.75, slope2=plt.randn()/.75)
        obs.append(o+n)
        fa.append(f)
        dur.append(d)
        plt.plot(f, d, 'o', alpha=0.1)

    dur, fa, obs = (np.hstack(dur)[:, np.newaxis],
                    np.hstack(fa)[:, np.newaxis],
                    np.hstack(obs)[:, np.newaxis])

    dur_mean = dur.mean()
    dur_std = dur.std()
    dur = (dur-dur_mean)/dur_std

    m = normal_model(dur, fa, obs)
    trace = sample_model(m, 5000)
    predict(trace, 5, 2500, {'mean': dur_mean, 'std': dur_std})
    plt.figure()
    traceplot(trace, 2, 2500)
    return dur, fa, obs, (dur_mean, dur_std), trace
Beispiel #6
0
	def computeTraces(self, initial_condition=None, plotit=True):

		if initial_condition == None:
			initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand())

		V_i = fh.integrate_three_rk4(
				initial_condition,
				self.network.coupling_strength,
				self.system.dt/float(self.system.stride),
				self.system.N_output(self.CYCLES),
				self.system.stride)

		t = self.system.dt*np.arange(V_i.shape[0])

		if plotit:
			ticks = np.asarray(t[::t.size/10], dtype=int)

			xscale, yscale = t[-1], 2.
			for (i, li) in enumerate([self.li_b, self.li_g, self.li_r]):
				tj, Vj = tl.adjustForPlotting(t, V_i[:, i], ratio=xscale/yscale, threshold=0.05*xscale)
				li.set_data(tj, Vj-i*2)

			self.ax.set_xticks(ticks)
			self.ax.set_xticklabels(ticks)
			self.ax.set_xlim(t[0], t[-1])
			self.fig.canvas.draw()

		return t, V_i
Beispiel #7
0
	def computeTraces(self, initial_condition=None, plotit=True):

		if initial_condition == None:
			initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand())

		V_i = fh.integrate_three_rk4(
				initial_condition,
				self.network.coupling_strength,
				self.system.dt/float(self.system.stride),
				self.system.N_output(self.CYCLES),
				self.system.stride)

		t = self.system.dt*np.arange(V_i.shape[0])

		if plotit:
			ticks = np.asarray(t[::t.size/10], dtype=int)
			self.li_b.set_data(t, V_i[:, 0])
			self.li_g.set_data(t, V_i[:, 1]-2.)
			self.li_r.set_data(t, V_i[:, 2]-4.)
			self.ax.set_xticks(ticks)
			self.ax.set_xticklabels(ticks)
			self.ax.set_xlim(t[0], t[-1])
			self.fig.canvas.draw()

		return t, V_i
Beispiel #8
0
	def __init__(self, phase_potrait, network, info=None, position=None):
		win.window.__init__(self, position)
		self.system = phase_potrait
		self.network = network
		self.info = info
		self.CYCLES = 10
		self.initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand())

		self.ax = self.fig.add_subplot(111, frameon=False, yticks=[])

		self.li_b, = self.ax.plot([], [], 'b-', lw=2.)
		self.li_g, = self.ax.plot([], [], 'g-', lw=2.)
		self.li_r, = self.ax.plot([], [], 'r-', lw=2.)

		self.ax.set_xlabel(r'time (sec.)', fontsize=20)

		self.ax.set_xticklabels(np.arange(0., 1., 0.1), fontsize=15)
		self.ax.set_yticklabels(np.arange(0., 1., 0.1), fontsize=15)
		
		self.ax.set_xlim(0., 100.)
		self.ax.set_ylim(-5.5, 1.5)

		#self.fig.tight_layout()

		self.key_func_dict = dict(u=traces.increase_cycles, i=traces.decrease_cycles)
		self.fig.canvas.mpl_connect('key_press_event', self.on_key)
		self.fig.canvas.mpl_connect('axes_enter_event', self.focus_in)
Beispiel #9
0
	def __init__(self, system, network, info=None, position=None):
		win.window.__init__(self, position)
		self.system = system
		self.network = network
		self.info = info
		self.CYCLES = 8
		self.state = system.load_initial_condition( pl.rand(), pl.rand() )
		self.initial_condition = self.system.load_initial_condition(pl.rand(), pl.rand())
		self.running = False
		self.pulsed = 0

		self.ax = self.fig.add_subplot(111, frameon=False, yticks=[])

		self.li_b, = self.ax.plot([], [], 'b-', lw=1.)
		self.li_g, = self.ax.plot([], [], 'g-', lw=1.)
		self.li_r, = self.ax.plot([], [], 'r-', lw=1.)

		self.ax.set_xlabel(r'time (sec.)', fontsize=20)

		self.ax.set_xticklabels(np.arange(0., 1., 0.1), fontsize=15)
		self.ax.set_yticklabels(np.arange(0., 1., 0.1), fontsize=15)
		
		self.ax.set_xlim(0., 100.)
		self.ax.set_ylim(-0.06-0.12, 0.04)

		self.key_func_dict = dict(u=traces.increase_cycles, i=traces.decrease_cycles)
		self.fig.canvas.mpl_connect('button_press_event', self.on_click)
		self.fig.canvas.mpl_connect('axes_enter_event', self.focus_in)
Beispiel #10
0
def datagen(N):
    """
    Produces N pairs of training data and desired output;
    each sample of training data contains -1 in its first position,
    this corresponds to the interpretation of the threshold as first
    element of the weight vector
    """

    fun1 = lambda x1,x2: -2*x1**3-x2+.5*x1**2
    fun2 = lambda x1,x2: x1**2*x2+2*x1*x2+1
    fun3 = lambda x1,x2: .5*x1*x2**2+x2**2-2*x1**2
    
    rarr1 = rand(1,N)
    rarr2 = rand(1,N)
    
    teacher = sign(rand(1,N)-.5)
    
    idplus  = (teacher<0)
    idminus = -idplus
    
    rarr1[idplus] = rarr1[idplus]-1
    
    y1=fun1(rarr1,rarr2)
    y2=fun2(rarr1,rarr2)
    y3=fun3(rarr1,rarr2)
    
    x=transpose(concatenate((-ones((1,N)),y1,y2)))
    
    return x, teacher[0]
Beispiel #11
0
 def step(self):
     # if not tumbling, pick random number.  If less than RUN_P, move RUN_R in direction of orientation.  else, start tumbling.
     # if tumbling, pick random number.  If greater than TUMBLE_P, rotate by TUMBLE_R.  else, stop tumbling.
     # matplotlib has (0,0) in the upper left - adapt trig accordingly...
             
     if not self.TUMBLE:
         p = rand()
         
         if p < self.run_p:
             self.xy[0] = (self.xy[0] + RUN_R*np.sin(self.th*np.pi/180.))%self.frame_lim
             self.xy[1] = (self.xy[1] - RUN_R*np.cos(self.th*np.pi/180.))%self.frame_lim
         else:
             self.TUMBLE = True
     
     if self.TUMBLE:
         p = rand()
         
         if p > self.tumble_p:
             q = rand()
             if q > 0.5:
                 self.th = (self.th + TUMBLE_R)%360
             else:
                 self.th = (self.th - TUMBLE_R)%360
         else:
             self.TUMBLE = False
Beispiel #12
0
def test_add_out_of_center_l(plot=False, color=1):
    # use the same data for both experiments:
    num_imgs = 200

    color_mult = [1, 3][color]

    img_res = 15
    padding = 4

    targ_pix = img_res ** 2 * color_mult
    img_pix = (img_res + 2 * padding) ** 2 * color_mult

    inputs = rand(num_imgs, img_pix) < 0.1
    targets = rand(num_imgs, targ_pix) * 0.4

    inputs[1:] *= 0

    # targets = g.rand(num_imgs,targ_pix)

    ans = [None] * 2
    print "a"
    for (i, gx) in enumerate([g, gc]):
        conv.g = gx  # wonderful. This seems to work. At least.
        conv._cu = gx._cudamat

        a = gx.garray(inputs)
        ans[i] = conv.add_out_of_center_l(a, gx.garray(targets), color=color).asarray()
        print "b"

    print abs(ans[0] - ans[1]).max()

    if plot:
        if not color:
            from pylab import show, subplot

            subplot(221)
            show(inputs[0])
            subplot(223)
            show(ans[0][0])
            subplot(224)
            show(ans[1][0])
        else:
            from pylab import show, subplot

            r = img_pix / color_mult
            subplot(331)
            show(inputs[0][:r])

            r = ans[0].shape[1] / 3
            subplot(332)
            show(ans[0][0][:r])
            subplot(334)
            show(ans[1][0][:r])
            subplot(335)
            show(ans[1][0][r : 2 * r])
            subplot(336)
            show(ans[1][0][2 * r : 3 * r])
Beispiel #13
0
def sim_time(i):
    n = pylab.randint(N_MIN, N_MAX)
    alpha = pylab.rand()
    net = random_network(n)
    r = ne_capacity(net)*((1-MIN_DEMAND)*pylab.rand() + MIN_DEMAND)
    tic = time.clock()
    optimal_stackelberg(net,r,alpha)
    val =  (n,time.clock() - tic)
    print val
    return val
Beispiel #14
0
def make_2DLinearSeparable_Dataset(n):
  xb = (rand(n)*2-1)/2-0.5
  yb = (rand(n)*2-1)/2+0.5
  xr = (rand(n)*2-1)/2+0.5
  yr = (rand(n)*2-1)/2-0.5
  inputs = []
  for i in range(len(xb)):
    inputs.append([xb[i],yb[i],1])
    inputs.append([xr[i],yr[i],-1])
  return inputs
def genererDonnees(n):
    xb=(pl.rand(n)*2-1)/2-0.5
    yb=(pl.rand(n)*2-1)/2+0.5
    xr=(pl.rand(n)*2-1)/2+0.5
    yr=(pl.rand(n)*2-1)/2-0.5
    donnees=[]
    for i in range(len(xb)):
        donnees.append(((xb[i],yb[i]),-1))
        donnees.append(((xr[i],yr[i]),1))
    return donnees
Beispiel #16
0
def add_scatter():
    ax = fig.add_axes([0.6, 0.125, 0.15, 0.4])
    ax.axesPatch.set_alpha(axalpha)
    N = 40
    volume = 100 * rand(N)
    color = 256 * rand(N)
    darkgray = [0.2] * 3
    plt.scatter(rand(N), rand(N), c=color, s=volume, alpha=0.75, edgecolor=darkgray)
    plt.axis("tight")
    ax.set_yticks([])
    ax.set_xticks([])
Beispiel #17
0
def genererDonnees(n):
    "Generer un jeu de donnees 2D lineairement separable de taille n"
    xb=(rand(n)*2-1)/2-0.5
    yb=(rand(n)*2-1)/2+0.5
    xr=(rand(n)*2-1)/2+0.5
    yr=(rand(n)*2-1)/2-0.5
    donnees=[]
    for i in range (len(xb)):
        donnees.append(((xb[i],yb[i]),False))
        donnees.append(((xr[i],yr[i]),True))
    return donnees
def gen_data(n):
    xb = (rand(n)*2-1)/2-0.5
    yb = (rand(n)*2-1)/2+0.5
    inputs = [[xb[i], yb[i]] for i in xrange(len(xb))]
    targets = [[i] for i in repeat(1, len(xb))]

    xr = (rand(n)*2-1)/2+0.5
    yr = (rand(n)*2-1)/2-0.5
    inputs = inputs + [[xr[i], yr[i]] for i in xrange(len(xr))]
    targets = targets + [[i] for i in repeat(0, len(xr))]

    return np.array(inputs), np.array(targets)
Beispiel #19
0
def choose_patches(IMAGES, L, batch_size=1000):
    sz = int(sqrt(L))
    imsz = shape(IMAGES)[0]
    num_images = shape(IMAGES)[2]
    BUFF = 4

    X = matrix(zeros([L,batch_size],'d'))
    for i in range(batch_size):
        j = int(floor(num_images * rand()))
        r = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand()))
        c = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand()))
        X[:,i] = reshape(IMAGES[r-sz/2:r+sz/2,c-sz/2:c+sz/2,j],[L,1])
    return X
def generateData(m, type_):

    data = []
    if type_ == "own-ns":

        feature_1_c1 = (rand(m)*3-1)/2 - 0.1
        feature_2_c1 = (rand(m)*3-1)/4 + 0.2

        feature_1_c2 = (rand(m)*2-1)/2 + 0.2
        feature_2_c2 = (rand(m)*3-1)/2 - 0.1

        for i in range(m):
            data.append([feature_1_c1[i], feature_2_c1[i], 1])

        for i in range(m):
            data.append([feature_1_c2[i], feature_2_c2[i], -1])

    elif type_ == "own-s":

        feature_1_c1 = (rand(m)*2-1)/2 - 0.6
        feature_2_c1 = (rand(m)*2-1)/2 + 0.6

        feature_1_c2 = (rand(m)*2-1)/2 + 0.6
        feature_2_c2 = (rand(m)*2-1)/2 - 0.6

        for i in range(m):
            data.append([feature_1_c1[i], feature_2_c1[i], 1])

        for i in range(m):
            data.append([feature_1_c2[i], feature_2_c2[i], -1])

    return data
Beispiel #21
0
def make2DLinearSeparableDataset(n):
 """ 
  generates a 2D linearly separable dataset with n samples. 
  The third element of the sample is the label
 """
 xb = (rand(n)*2-1)/2-0.5
 yb = (rand(n)*2-1)/2+0.5
 xr = (rand(n)*2-1)/2+0.5
 yr = (rand(n)*2-1)/2-0.5
 inputs = []
 for i in range(len(xb)):
  inputs.append([xb[i],yb[i],1])
  inputs.append([xr[i],yr[i],-1])
 return inputs
Beispiel #22
0
def generateData(n):
 """
  generates a 2D linearly separable dataset with n samples.
  The third element of the sample is the label
 """
 xb = (rand(n)*2-1)/2-0.5
 yb = (rand(n)*2-1)/2+0.5
 xr = (rand(n)*2-1)/2+0.5
 yr = (rand(n)*2-1)/2-0.5
 inputs = []
 for i in range(len(xb)):
  inputs.append(((xb[i],yb[i]),0))
  inputs.append(((xr[i],yr[i]),1))
 return inputs
Beispiel #23
0
 def testActivityMap(self):
     self.spk.dimensions = [5, 10]
     self.spk.activity_map(t_start = 1000, t_stop = 2000, display=pylab.subplot(211), kwargs={'interpolation':'bicubic'})
     positions = pylab.rand(2, 50)
     self.spk.activity_map(float_positions = positions, display=pylab.subplot(212))
     pylab.savefig("Plots/SpikeList_activitymaps.png")
     pylab.close()
 def __init__(self):
     '''
     perceptron initialization
     '''
     self.w = rand(2)*2 -1
     self.learningRate = 0.05
     self.test_steps = 10
def randomwalk(sf, N=None):
    """
    Generates a randomwalk in a ndarray of given shape sf and length N

    --------------------------------------------------------------------------
    Usage:
    
    Call:  w = randomwalk(sf, N=None)
    
    Input: sf  size of ndarray w
           N   length of randomwalk

    Output: ndarray w containing randomwalk of length N.
            Note, that w is normalized to w.sum() = 1
    --------------------------------------------------------------------------

    Copyright (C) 2011 Michael Hirsch   
    """
    if N == None:
        N = np.floor(np.prod(sf)/100)
        
    w = np.zeros(sf);
    ndims   = len(np.shape(w))
    center    = np.ceil(np.array(sf)/2)
    w[tuple(center)] = 1.

    loc = center
    for i in np.arange(N):
        loc += ((pylab.rand(ndims)-0.5)*2).round()
        loc = clip(loc,1,np.array(sf).min()-1)
        w[tuple(loc)] += 1

    w /= w.sum()
    return w
Beispiel #26
0
def randu(*shape):
    """Generate uniformly random values in the range (-1,1).
    This can usually be used as a drop-in replacement for `randn`
    resulting in a different distribution for weight initializations.
    Empirically, the choice of randu/randn can make a difference
    for neural network initialization."""
    return 2*rand(*shape)-1
Beispiel #27
0
def make_fiber(l, a, stepsize=0.5):
    angles = np.random.standard_cauchy(l) * a
    angles[0] += 2 * pi * pylab.rand()
    angles = add.accumulate(angles)
    coss = add.accumulate(cos(angles) * stepsize)
    sins = add.accumulate(sin(angles) * stepsize)
    return array([coss, sins]).transpose(1, 0)
Beispiel #28
0
 def __init__(self):
     """ perceptron initialization """
     self.w = rand(2) * 2 - 1  # weights
     self.learningRate = 0.01
     self.momentum = 0.8
     self.d = 3072
     self.k = 500  # number of hidden layers
     self.n = 10000
     self.nTest = 2000
     self.W1 = np.random.random_sample(
         (self.d + 1, self.k)
     )  # [[rand(1)*2-1 for x in range(self.k)] for x in range(self.d+1)] #for W10
     self.W2 = np.random.random_sample(
         (self.k + 1, 1)
     )  #[[rand(1)*2-1 for x in range(1)] for x in range(self.k+1)]#for W20
     self.Z1 = np.zeros((self.k + 1, 1))
     self.Z2 = 0
     self.f1 = np.zeros((self.k + 1, 1))
     self.f2 = 0
     self.maxIteration = 10000
     self.globalError = np.zeros((self.maxIteration, 1))
     self.globalTestError = np.zeros((self.maxIteration, 1))
     self.batchsize = 1
     self.dict = cPickle.load(open("cifar_2class_py2.p", "rb"))
     self.shuffledData = np.random.permutation(self.n)
     self.D1 = np.zeros((self.d + 1, self.k))
     self.D2 = np.zeros((self.k + 1, 1))
     self.G1 = np.zeros((self.d + 1, self.k))
     self.G2 = np.zeros((self.k + 1, 1))
     self.dictTraindata = np.zeros((self.n, self.d))
     self.dictTrainLables = np.zeros((self.n, 1))
     self.dictTestdata = np.zeros((self.nTest, self.d))
     self.dictTestLables = np.zeros((self.nTest, 1))
 def plotReachSet_norm1(self, NUM, figname):
     fig = p.figure()
     for j in range(n):
         ax = fig.add_subplot(2,2,j+1 , aspect='equal')
         ax.set_xlim(0, 4)
         ax.set_ylim(0, 1)
         ax.set_xlabel('$x_'+str(j+1)+'$')
         ax.set_ylabel('$y_'+str(j+1)+'$')
         for trace in self:
             for i in [int(floor(k*len(trace.T)/NUM)) for k in range(NUM)]:
                 verts = [(trace.x[i][j] + 1/trace.d_norm1[i][2*j], trace.y[i][j]                          ),
                          (trace.x[i][j]                            , trace.y[i][j] - 1/trace.d_norm1[i][2*j+1]),
                          (trace.x[i][j] - 1/trace.d_norm1[i][2*j], trace.y[i][j]                          ),
                          (trace.x[i][j]                              , trace.y[i][j] + 1/trace.d_norm1[i][2*j+1])]
                 # poly = Ellipse((trace.x[i][j],trace.y[i][j]), width=trace.d1[i], height=trace.d2[i], angle=trace.theta[i])
                 poly = Polygon(verts, facecolor='0.8', edgecolor='k')
                 ax.add_artist(poly)
                 poly.set_clip_box(ax.bbox)
                 poly.set_alpha(1)
                 if i==0:
                     poly.set_facecolor('r')
                 else:
                     poly.set_facecolor(p.rand(3))
         #for trace in self:
             #e = Ellipse((trace.x[0][j],trace.y[0][j]), width=trace.d1[0], height=trace.d2[0], angle=trace.theta[0])
             #ax.add_artist(e)
             #e.set_clip_box(ax.bbox)
             #e.set_alpha(1)
             #e.set_facecolor('r')
 #e.set_edgecolor('r')
     p.savefig(figname)
Beispiel #30
0
def test_matrix_to_grid(plot=False):
    # use the same data for both experiments:
    num_imgs = 200

    img_res = 20
    square_size = 10

    regions_per_square = (img_res / square_size) ** 2

    img_pix = img_res ** 2
    inputs = rand(num_imgs * regions_per_square, square_size ** 2) < 0.1

    ans = [None] * 2
    print "a"
    for (i, gx) in enumerate([g, gc]):
        conv.g = gx  # wonderful. This seems to work. At least.
        conv._cu = gx._cudamat

        a = gx.garray(inputs)
        ans[i] = conv.matrix_to_grid(a, img_res).asarray()
        print "b"

    print abs(ans[0] - ans[1]).max()

    # quite successful, indeed.

    if plot:
        from pylab import show, subplot

        subplot(221)
        show(inputs[0])
        subplot(223)
        show(ans[0][0])
        subplot(224)
        show(ans[1][0])
Beispiel #31
0
 def test_block_hankel(self):
     """
     Block hankel function.
     """
     y = pl.rand(3, 100)
     Y = sysid.subspace.block_hankel(y, 5)
     self.assertEqual(Y.shape, (15, 95))
Beispiel #32
0
 def createCellsFixedNum(self):
     ''' Create population cells based on fixed number of cells'''
     cellModelClass = Cell
     cells = []
     seed(f.sim.id32('%d'%(f.cfg['randseed']+self.tags['numCells'])))
     randLocs = rand(self.tags['numCells'], 3)  # create random x,y,z locations
     for icoord, coord in enumerate(['x', 'y', 'z']):
         if coord+'Range' in self.tags:  # if user provided absolute range, convert to normalized
             self.tags[coord+'normRange'] = [point / f.net.params['size'+coord.upper()] for point in self.tags[coord+'Range']]
         if coord+'normRange' in self.tags:  # if normalized range, rescale random locations
             minv = self.tags[coord+'normRange'][0] 
             maxv = self.tags[coord+'normRange'][1] 
             randLocs[:,icoord] = randLocs[:,icoord] / (maxv-minv) + minv
     
     for i in xrange(int(f.rank), f.net.params['scale'] * self.tags['numCells'], f.nhosts):
         gid = f.lastGid+i
         self.cellGids.append(gid)  # add gid list of cells belonging to this population - not needed?
         cellTags = {k: v for (k, v) in self.tags.iteritems() if k in f.net.params['popTagsCopiedToCells']}  # copy all pop tags to cell tags, except those that are pop-specific
         cellTags['xnorm'] = randLocs[i,0] # set x location (um)
         cellTags['ynorm'] = randLocs[i,1] # set y location (um)
         cellTags['znorm'] = randLocs[i,2] # set z location (um)
         cellTags['x'] = f.net.params['sizeX'] * randLocs[i,0] # set x location (um)
         cellTags['y'] = f.net.params['sizeY'] * randLocs[i,1] # set y location (um)
         cellTags['z'] = f.net.params['sizeZ'] * randLocs[i,2] # set z location (um)
         if 'propList' not in cellTags: cellTags['propList'] = []  # initalize list of property sets if doesn't exist
         cells.append(cellModelClass(gid, cellTags)) # instantiate Cell object
         if f.cfg['verbose']: print('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, f.net.params['scale'] * self.tags['numCells']-1, gid, self.tags['popLabel'], f.rank))
     f.lastGid = f.lastGid + self.tags['numCells'] 
     return cells
Beispiel #33
0
    def _shapeStim(self,
                   isi=1,
                   variation=0,
                   width=0.05,
                   weight=10,
                   start=0,
                   finish=1,
                   stimshape='gaussian'):
        from pylab import r_, convolve, shape, exp, zeros, hstack, array, rand

        # Create event times
        timeres = 0.001  # Time resolution = 1 ms = 500 Hz (DJK to CK: 500...?)
        pulselength = 10  # Length of pulse in units of width
        currenttime = 0
        timewindow = finish - start
        allpts = int(timewindow / timeres)
        output = []
        while currenttime < timewindow:
            # Note: The timeres/2 subtraction acts as an eps to avoid later int rounding errors.
            if currenttime >= 0 and currenttime < timewindow - timeres / 2:
                output.append(currenttime)
            currenttime = currenttime + isi + variation * (rand() - 0.5)

        # Create single pulse
        npts = int(pulselength * width / timeres)
        x = (r_[0:npts] - npts / 2 + 1) * timeres
        if stimshape == 'gaussian':
            pulse = exp(-2 * (2 * x / width - 1)**
                        2)  # Offset by 2 standard deviations from start
            pulse = pulse / max(pulse)
        elif stimshape == 'square':
            pulse = zeros(shape(x))
            pulse[int(npts / 2):int(npts / 2) +
                  int(width / timeres)] = 1  # Start exactly on time
        else:
            raise Exception('Stimulus shape "%s" not recognized' % stimshape)

        # Create full stimulus
        events = zeros((allpts))
        events[array(array(output) / timeres, dtype=int)] = 1
        fulloutput = convolve(
            events, pulse, mode='full'
        ) * weight  # Calculate the convolved input signal, scaled by rate
        fulloutput = fulloutput[int(npts / 2 - 1):int(
            -npts / 2
        )]  # Slices out where the convolved pulse train extends before and after sequence of allpts.
        fulltime = (r_[0:allpts] * timeres +
                    start) * 1e3  # Create time vector and convert to ms

        fulltime = hstack(
            (0, fulltime, fulltime[-1] + timeres *
             1e3))  # Create "bookends" so always starts and finishes at zero
        fulloutput = hstack(
            (0, fulloutput,
             0))  # Set weight to zero at either end of the stimulus period
        events = hstack((0, events, 0))  # Ditto
        stimvecs = deepcopy([fulltime, fulloutput,
                             events])  # Combine vectors into a matrix

        return stimvecs
Beispiel #34
0
def randu(*shape):
    """Generate uniformly random values in the range (-1,1).
    This can usually be used as a drop-in replacement for `randn`
    resulting in a different distribution for weight initializations.
    Empirically, the choice of randu/randn can make a difference
    for neural network initialization."""
    return 2*rand(*shape)-1
Beispiel #35
0
def main():
    plt.ion()

    fil = FletcherFilter()
    Niter = 12
    logp = plt.zeros((Niter,2))
    for k in range(Niter):
        while True:
            #print k
            p = plt.rand(2)
            if not fil.dominated(p):
                break
        logp[k] = p
        fil.add(p, 0.0, 0.0)
        ff = fil.values[fil.valid]
        ff = plt.r_[[[1e-6,1]], ff[plt.argsort(ff[:,0])], [[1,1e-6]]]
        ww = plt.zeros((ff.shape[0] * 2 - 1, 2))
        ww[::2] = ff
        ww[1::2,0] = ff[1:,0]
        ww[1::2,1] = ff[:-1,1]
        plt.loglog(ww[:,0], ww[:,1], '-')
    plt.loglog(logp[:,0], logp[:,1], 'ys-', lw=2)
    plt.axis([0,1,0,1])
    plt.axis('equal')
    plt.grid()
        
    code.interact()
Beispiel #36
0
 def __init__(self):

  """ inicialización del perceptron  """

  self.w = rand(2)*2-1 # pesos

  self.tasaAprendizaje = 0.1 
Beispiel #37
0
def make_fibrous_image(shape,
                       nfibers=300,
                       l=300,
                       a=0.2,
                       stepsize=0.5,
                       limits=(0.1, 1.0),
                       blur=1.0):
    h, w = shape
    lo, hi = limits
    result = zeros(shape)
    for i in range(nfibers):
        v = pylab.rand() * (hi - lo) + lo
        fiber = make_fiber(l, a, stepsize=stepsize)
        y, x = randint(0, h - 1), randint(0, w - 1)
        fiber[:, 0] += y
        fiber[:, 0] = clip(fiber[:, 0], 0, h - .1)
        fiber[:, 1] += x
        fiber[:, 1] = clip(fiber[:, 1], 0, w - .1)
        for y, x in fiber:
            result[int(y), int(x)] = v
    result = ndi.gaussian_filter(result, blur)
    result -= amin(result)
    result /= amax(result)
    result *= (hi - lo)
    result += lo
    return result
Beispiel #38
0
def test_json():
    sc.heading('Testing JSON read/write functions')

    not_jsonifiable = sc.Blobject(
    )  # Create an object that can't be JSON serialized

    print('Testing jsonifying a NON-jsonifiable object:')
    notjson = sc.jsonify(not_jsonifiable,
                         die=False)  # Will return a string representation
    sc.sanitizejson(not_jsonifiable,
                    die=True)  # Will still not die thanks to jsonpickle

    jsonifiable = sc.objdict().make(keys=['a', 'b'], vals=pl.rand(10))
    json_obj = sc.jsonify(jsonifiable)
    json_str = sc.jsonify(jsonifiable, tostring=True,
                          indent=2)  # kwargs are passed to json.dumps()

    print('Not-a-JSON as sanitized object:')
    print(notjson)
    print('JSON as sanitized object:')
    print(json_obj)
    print('JSON as string:')
    print(json_str)

    return json_str
Beispiel #39
0
 def norm(pars, noise=0.0, optimum='min', delay=None):
     if delay:
         pl.pause(delay * (0.5 + 0.5 * pl.rand()))  # Add a noticeable delay
     err = pl.linalg.norm(pars)
     err = addnoise(err, noise)
     if optimum == 'max':
         err = -err
     return err
Beispiel #40
0
    def _sample_posteriors_noloop(self, true_N_A, p_A, N_samples):
        true_N_B = self.N_u - true_N_A
        N_values = pl.shape(true_N_A)[0]
        posteriors = pl.zeros((N_samples, N_values))
        for (i, (t_N_A, t_N_B)) in enumerate(zip(true_N_A, true_N_B)):
            A_probs = pl.ones((N_samples, self.N_u))
            A_probs[:, :t_N_A] *= self.p_uA_given_A
            A_probs[:, t_N_A:] *= self.p_uA_given_B
            B_probs = pl.ones((N_samples, self.N_u))
            B_probs[:, :t_N_A] *= self.p_uB_given_A
            B_probs[:, t_N_A:] *= self.p_uB_given_B

            N_A = pl.sum(A_probs > pl.rand(N_samples, self.N_u), 1)
            N_B = pl.sum(B_probs > pl.rand(N_samples, self.N_u), 1)

            posteriors[:, i] = self._p_A_given_N_A(N_A, p_A, N_B)
        return pl.mean(posteriors, 0)
Beispiel #41
0
def bounded_gaussian_noise(shape, sigma, maxdelta):
    n, m = shape
    deltas = pylab.rand(2, n, m)
    deltas = ndi.gaussian_filter(deltas, (0, sigma, sigma))
    deltas -= np.amin(deltas)
    deltas /= np.amax(deltas)
    deltas = (2 * deltas - 1) * maxdelta
    return deltas
Beispiel #42
0
def make_noise_at_scale(shape, scale):
    h, w = shape
    h0, w0 = int(h / scale + 1), int(w / scale + 1)
    data = pylab.rand(h0, w0)
    with np.warnings.catch_warnings():
        np.warnings.simplefilter("ignore")
        result = ndi.zoom(data, scale)
    return result[:h, :w]
Beispiel #43
0
def boot_p(pc, nsamp, bootstraps=2000):
    """Given a probability value p and sample size n, return us bootstraps
  number of probability values obtained by random resampling based on p."""
    r = pylab.rand(nsamp, bootstraps)
    z = pylab.zeros((nsamp, bootstraps))
    idx = pylab.find(r < pc)
    z.flat[idx] = 1
    booted_p = z.mean(axis=0)
    return booted_p
Beispiel #44
0
def natural_selection(selected_mate, survival_rate):
    '''
    randomly kill individuals with low survival rate
    killed individuals will have genotype -1
    '''
    r = pylab.rand(len(selected_mate))
    selected_mate[survival_rate[selected_mate] < r] = -1
    #print 'killed',sum(selected_mate == -1),'individuals'
    return selected_mate
Beispiel #45
0
    def __init__(self, eta, max_iterations, dimension):
        """ Construtor do objeto Perceptron, recebe a taxa de
		aprendizado (eta) e o número máximo de iterações
		que o algoritmo pode executar durante um treinamento. """
        self.w = rand(dimension) * 2 - 1  # weights
        self.learningRate = eta
        self.max_iterations = max_iterations
        self.history = [list(self.w)]
        self.dimension = dimension
def permute(a):
    """
  Randomly permute the elements in array a
  """
    for n in range(len(a)):
        m = int(pylab.rand() * (len(a) - n)) + n
        t = a[m]
        a[m] = a[n]
        a[n] = t
Beispiel #47
0
def funcNbofAttr(P, G):

    endDir = 'G_%.3f_P_%.5f.npy' %(G,P) 
    try:
        pattsx = data2array(dir_prix + '/patterns_' + endDir, mmap_mode="r+")
        pattsA = data2array(dir_priA + '/patterns_' + endDir, mmap_mode="r+")
        return len(pattsx)

    except:
        try:
            pattsx = data2array(dir_prix + '/allPatt_' + endDir)
            pattsA = data2array(dir_priA + '/allPatt_' + endDir)
                    
        except:
            N = data2array(dco).shape[0]
            pattsx = zeros((mmax,N))
            pattsA = zeros((mmax,N))

            conn = {'connAd': dco,
                    'normType': '1'}

            noise = {'colors': None}

            model = {'model': 'HopfieldBasedStatic',
                    'threshold': 'local',
                    'tauT': 0,
                    'P': P,
                    'G': G} 

            out = []

            other = {'init': 'rand',
                    'dens': rand(),} #p2,!!!!!!!!!!!!!!  RAND

            for d in range(mmax):
                eva = main.evaCure(evaCon=conn, evaNoi=noise, evaMod=model, out=out, **other)
                eva.toEquilibrium()
                pattsx[d] = eva.evaMod.x.copy()
                pattsA[d] = eva.evaMod.A.copy()
                 
            array2data(pattsx, dir_prix + '/allPatt_' + endDir)
            array2data(pattsA, dir_priA + '/allPatt_' + endDir)
        
        patts = pattsx  # !!!!!!!!!
        S = sortBy(patts.mean(1) - patts.mean(), inverse=1)[0]
        C1, freq = preClustering(patts[S],                sim_coef=sim_coef, sim_func=similarity_Euclidean)
        C2, freq = preClustering(patts[S][C1], freq=freq, sim_coef=sim_coef, sim_func=fPearsonCorrelation)
        SC, freq = sortBy(freq, inverse=1)

        array2data(pattsx[S][C1][C2][SC], dir_prix + '/patterns_' + endDir)
        array2data(pattsA[S][C1][C2][SC], dir_priA + '/patterns_' + endDir)
        array2data(freq, dir_priT + '/tendances_' + endDir)
        os.system('rm ' + dir_prix + '/allPatt_' + endDir)
        os.system('rm ' + dir_priA + '/allPatt_' + endDir)
    
        return len(pattsx[S][C1][C2][SC])
def main():
    zz = nx.zeros([10,10])
    print 'tr(zz)=',trace(zz)
    oo = nx.ones([4,4],nx.Float)
    print 'tr(oo)=',trace(oo)
    aa = rand(128,128)
    print 'tr(aa)=',trace(aa)
    print 'oo:',oo
    in_place_mult(3,oo)
    print '3*oo:',oo
def stochastic_equations(last_Y, ts, g, b):
    Y = last_Y

    access_rate = b * (N0 - Y) * Y / N0
    denial_rate = g * Y

    #generate random numbers
    rand1 = pl.rand()
    rand2 = pl.rand()

    #time until either event occurs
    ts = -np.log(rand2) / (denial_rate + access_rate)
    if rand1 < (access_rate / (denial_rate + access_rate)):
        # access, one more informed agent
        Y += 1
    else:
        # denial, one fewer informed agent
        Y -= 1
    return [Y, ts]
Beispiel #50
0
    def _sample_posteriors(self, true_N_A, p_A, N_samples):
        true_N_B = self.N_u - true_N_A
        N_values = pl.shape(true_N_A)[0]
        posteriors = pl.zeros((N_samples, N_values))
        for i in range(N_samples):
            for (j, (t_N_A, t_N_B)) in enumerate(zip(true_N_A, true_N_B)):
                A_given_A = pl.ones(t_N_A) * self.p_uA_given_A
                A_given_B = pl.ones(t_N_B) * self.p_uA_given_B
                A_probs = pl.hstack((A_given_A, A_given_B))
                B_given_A = pl.ones(t_N_A) * self.p_uB_given_A
                B_given_B = pl.ones(t_N_B) * self.p_uB_given_B
                B_probs = pl.hstack((B_given_A, B_given_B))

                N_A = pl.sum(A_probs > pl.rand(self.N_u))
                N_B = pl.sum(B_probs > pl.rand(self.N_u))

                posteriors[i, j] = self._p_A_given_N_A(N_A, N_B)

        return pl.mean(posteriors, 0)
Beispiel #51
0
def makestim(isi=1,
             variation=0,
             width=0.05,
             weight=10,
             start=0,
             finish=1,
             stimshape='gaussian'):
    from pylab import r_, convolve, shape

    # Create event times
    timeres = 0.005  # Time resolution = 5 ms = 200 Hz
    pulselength = 10  # Length of pulse in units of width
    currenttime = 0
    timewindow = finish - start
    allpts = int(timewindow / timeres)
    output = []
    while currenttime < timewindow:
        if currenttime >= 0 and currenttime < timewindow:
            output.append(currenttime)
        currenttime = currenttime + isi + variation * (rand() - 0.5)

    # Create single pulse
    npts = min(pulselength * width / timeres,
               allpts)  # Calculate the number of points to use
    x = (r_[0:npts] - npts / 2 + 1) * timeres
    if stimshape == 'gaussian':
        pulse = exp(-(x / width * 2 - 2)**
                    2)  # Offset by 2 standard deviations from start
        pulse = pulse / max(pulse)
    elif stimshape == 'square':
        pulse = zeros(shape(x))
        pulse[int(npts / 2):int(npts / 2) +
              int(width / timeres)] = 1  # Start exactly on time
    else:
        raise Exception('Stimulus shape "%s" not recognized' % stimshape)

# Create full stimulus
    events = zeros((allpts))
    events[array(array(output) / timeres, dtype=int)] = 1
    fulloutput = convolve(
        events, pulse, mode='same'
    ) * weight  # Calculate the convolved input signal, scaled by rate
    fulltime = (r_[0:allpts] * timeres +
                start) * 1e3  # Create time vector and convert to ms
    fulltime = hstack(
        (0, fulltime, fulltime[-1] + timeres *
         1e3))  # Create "bookends" so always starts and finishes at zero
    fulloutput = hstack(
        (0, fulloutput,
         0))  # Set weight to zero at either end of the stimulus period
    events = hstack((0, events, 0))  # Ditto
    stimvecs = [fulltime, fulloutput, events]  # Combine vectors into a matrix

    return stimvecs
Beispiel #52
0
def sample(W,g,batch_size, vis_gauss=False):
    v,h = W.v, W.h
    V = rand(batch_size,v)
    for gg in range(g):
        H = Rsigmoid(W*V)

        if vis_gauss:
            V = W.T()*H + randn(batch_size,v)
        else:
            V = Rsigmoid(W.T()*H)
    return V,H
def bar_graph():
    k = 8
    x = plb.arange(k)
    for z in x:
        y1 = plb.rand(k) * (1 - x / k)
        y2 = plb.rand(k) * (1 - x / k)
        plb.axes([0.075, 0.075, .88, .88])

        plb.bar(x, +y1, facecolor='#9922aa', edgecolor='green')
        plb.bar(x, -y2, facecolor='#ff3366', edgecolor='green')

        for a, b in zip(x, y1):
            plb.text(a+0.41, b+0.08, '%.3f' % b, ha='center', va='bottom')
        for a, b in zip(x, y2):
            plb.text(a+0.41, b+0.08, '%.3f' % b, ha='center', va='top')

        plb.xlim(-.5, k), plb.ylim(-1.12, +1.12)
        plb.grid(True)
        plb.pause(1)
        plb.cla()
Beispiel #54
0
 def testActivityMap(self):
     self.spk.dimensions = [5, 10]
     self.spk.activity_map(t_start=1000,
                           t_stop=2000,
                           display=pylab.subplot(211),
                           kwargs={'interpolation': 'bicubic'})
     positions = pylab.rand(2, 50)
     self.spk.activity_map(float_positions=positions,
                           display=pylab.subplot(212))
     pylab.savefig("Plots/SpikeList_activitymaps.png")
     pylab.close()
Beispiel #55
0
 def test_bipartite_matching():
   from pylab import plot, rand
   from numpy import zeros, arange
   N = 10
   a = rand(N,2)
   a[:,0] = arange(N)
   a[:,1] = 0
   b = rand(N+2,2)
   b[:,0] = arange(N+2)
   b[:,1] = 1
   d = zeros((a.shape[0],b.shape[0]))
   for i,ai in enumerate(a):
     for j,bi in enumerate(b):
       d[i,j] = ((ai-bi)**2).sum()
   assignment,cost = bipartite_matching( d )
   print "Matching cost: ", cost.value
   plot(a[:,0],a[:,1],'o')
   plot(b[:,0],b[:,1],'s')
   for i,j in assignment.iteritems():
     plot([ a[i,0], b[j,0] ], [ a[i,1], b[j,1] ],'k--')
   return assignment
Beispiel #56
0
def knockout_uniformly_at_random(in_fname='noisy_data.csv', out_fname='missing_noisy_data.csv', pct=20.):
    """ replace data.csv y column with uniformly random missing entries

    Parameters
    ----------
    pct : float, percent to knockout
    """
    data = pl.csv2rec(in_fname)
    for i, row in enumerate(data):
        if pl.rand() < pct/100.:
            data[i].y = pl.nan
    pl.rec2csv(data, out_fname)
Beispiel #57
0
def random_distort(images, maxdelta=2.0, sigma=30.0):
    n, m = images[0].shape
    deltas = pylab.rand(2, n, m)
    deltas = ndi.gaussian_filter(deltas, (0, sigma, sigma))
    deltas -= np.amin(deltas)
    deltas /= np.amax(deltas)
    deltas = (2 * deltas - 1) * maxdelta
    #print np.amin(deltas), np.amax(deltas)
    xy = np.transpose(np.array(np.meshgrid(range(n), range(m))),
                      axes=[0, 2, 1])
    #print(xy.shape, deltas.shape)
    deltas += xy
    return [ndi.map_coordinates(image, deltas, order=1) for image in images]
Beispiel #58
0
 def plotPT(self, n):
     N = size(self.Asts)
     PLtest = self
     fig = figure()
     ax = fig.add_subplot(1, 1, 1)
     for i in range(n):
         PLtest.set_w(rand(N))
         ax.scatter(PLtest.s(), PLtest.r(), color='black', s=1)
     ax.scatter(self.s(), self.r(), color='red', s=10)
     xlabel('$\sigma$')
     ylabel('r')
     grid()
     show()
Beispiel #59
0
    def __init__(self, phase_potrait, network, info=None, position=None):
        self.system = phase_potrait
        self.network = network
        self.CYCLES = 10
        self.info = info
        self.initial_condition = self.system.load_initial_condition(
            pl.rand(), pl.rand(), pl.rand())

        self.fig = pl.figure('Voltage Traces',
                             figsize=(6, 2),
                             facecolor='#EEEEEE')
        self.ax = self.fig.add_subplot(111, frameon=False, yticks=[])

        self.li_b, = self.ax.plot([], [], 'b-', lw=2.)
        self.li_g, = self.ax.plot([], [], 'g-', lw=2.)
        self.li_r, = self.ax.plot([], [], 'r-', lw=2.)
        self.li_y, = self.ax.plot([], [], 'y-', lw=2.)

        self.ax.set_xlabel(r'time (sec.)', fontsize=20)

        self.ax.set_xticklabels(np.arange(0., 1., 0.1), fontsize=15)
        self.ax.set_yticklabels(np.arange(0., 1., 0.1), fontsize=15)

        self.ax.set_xlim(0., 100.)
        self.ax.set_ylim(-8.5, 1.5)

        #self.fig.tight_layout()

        self.key_func_dict = dict(u=traces.increase_cycles,
                                  i=traces.decrease_cycles)
        self.fig.canvas.mpl_connect('key_press_event', self.on_key)
        self.fig.canvas.mpl_connect('axes_enter_event', self.focus_in)

        if not position == None:
            try:
                self.fig.canvas.manager.window.wm_geometry(position)
            except:
                pass
Beispiel #60
0
    def __init__(self, w_=rand(2) * 2 - 1, tasaApren_=0.1):
        """ 
        Metodo constructor del preceptron, 
        inicialza los valores por defecto.

        Parametros:
        w_: array-1d
            Pesos actualizados después del ajuste.
            
        tasaApren_: float
            Tasa de aprendizaje.
        """
        self.w = w_  # Vector w, representa los pesos.
        self.tasaApren = tasaApren_  # Tasa de aprendizaje.