예제 #1
0
파일: basic_rbm.py 프로젝트: samblasiak/RBM
 def f(self,x,grad):
     s=self
     W,b1,b2=s.unpack(x)
 
     #contrastive divergence with one sampling iteration
     #multiple sampling iterations
 #    for i in xrange(max(1,int(np.log(iter)/np.log(10.)))):
         
     #sample h
     dotW=np.dot(s.data,W)
     eh=sig(dotW+b2[None,:])
     h=rnd.rand(len(s.data),s.H)<eh
     #sample v
     dotH=np.dot(h,W.T)
     v=rnd.rand(len(s.data),s.KK)<sig(dotH+b1[None,:])
     
     dotW1=np.dot(v,W)
     eh1=sig(dotW1+b2[None,:])
         
     dW=np.dot(s.data.T,eh)-np.dot(v.T,eh1)
     db1=np.sum(s.data-v,0)
     db2=np.sum(eh-eh1,0)
     
     s.pack(grad,dW,db1,db2)
     grad/=len(s.data)
     grad-=s.lam*x
예제 #2
0
def main():
    pylab.ion();
    ind = [0,];
    ldft = [0,];
    lfft = [0,];
    lpfft = [0,]

    # plot a graph Dft vs Fft, lists just support size until 2**9
    for i in range(1, 9, 1):
        t_before = time.clock();
        dsprocessing.dspDft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        ldft.append(dt);
        print ("dft ", 2**i, dt);
        #pylab.plot([2**i,], [time.clock()-t_before,]);
        t_before = time.clock();
        dsprocessing.dspFft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        print ("fft ", 2**i, dt);
        lfft.append(dt);
        #pylab.plot([2**i,], [time.clock()-t_before,]);
        ind.append(2**i);
        # python fft just to compare
        t_before = time.clock();
        pylab.fft(rand(2**i).tolist());
        dt = time.clock() - t_before;
        lpfft.append(dt);

    pylab.plot(ind, ldft);
    pylab.plot(ind, lfft);
    pylab.plot(ind, lpfft);
    pylab.show();
    return [ind, ldft, lfft, lpfft];
예제 #3
0
    def cluster(self, data, n_clusters):

        n, d = shape(data)
        locations = zeros((self.n_particles, n_clusters, d))

        for i in range(self.n_particles):
            for j in range(n_clusters):
                locations[i, j, :] = copy(data[randint(n), :])  # Initialize cluster centers to random datapoints

        bestlocations = copy(locations)
        velocities = zeros((self.n_particles, n_clusters, d))

        bestscores = [score(data, centroids=locations[i, :, :], norm=self.norm) for i in range(self.n_particles)]
        sbestlocation = copy(locations[argmin(bestscores), :, :])
        sbestscore = min(bestscores)

        for i in range(self.n_iterations):
            if i % self.printfreq == 0:
                print "Particle swarm iteration", i, "best score:", sbestscore
            for j in range(self.n_particles):
                r = rand(n_clusters, d)
                s = rand(n_clusters, d)
                velocities[j, :, :] = (self.w * velocities[j, :, :]) + \
                                      (self.c1 * r * (bestlocations[j, :, :] - locations[j, :, :])) + \
                                      (self.c2 * s * (sbestlocation - locations[j, :, :]))
                locations[j, :, :] = locations[j, :, :] + velocities[j, :, :]
                currentscore = score(data, centroids=locations[j, :, :], norm=self.norm)
                if currentscore < bestscores[j]:
                    bestscores[j] = currentscore
                    bestlocations[j, :, :] = locations[j, :, :]
                    if currentscore < sbestscore:
                        sbestscore = currentscore
                        sbestlocation = copy(locations[j, :, :])

        return getlabels(data, centroids=sbestlocation, norm=self.norm)
예제 #4
0
def main():
    # Generate synthetic data
    x = 2 * npr.rand(N,D) - 1  # data features, an (N,D) array
    x[:, 0] = 1
    th_true = 10.0 * np.array([0, 1, 1])
    y = np.dot(x, th_true[:, None])[:, 0]
    t = npr.rand(N) > (1 / ( 1 + np.exp(y)))  # data targets, an (N) array of 0s and 1s

    # Obtain joint distributions over z and th
    model = ff.LogisticModel(x, t, th0=th0, y0=y0)

    # Set up step functions
    th = np.random.randn(D) * th0
    z = ff.BrightnessVars(N)
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
    z__stepper = ff.zStepMH(model.log_pseudo_lik, q)

    plt.ion()
    ax = plt.figure(figsize=(8, 6)).add_subplot(111)
    while True:
        th = th_stepper.step(th, z)  # Markov transition step for theta
        z  = z__stepper.step(th ,z)  # Markov transition step for z
        update_fig(ax, x, y, z, th, t)
        plt.draw()
        plt.pause(0.05)
예제 #5
0
파일: server.py 프로젝트: CySCA/CySCA2014
def maze(width=81, height=51, complexity=.75, density=.75):
    # Only odd shapes
    shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)
    # Adjust complexity and density relative to maze size
    complexity = int(complexity * (5 * (shape[0] + shape[1])))
    density    = int(density * (shape[0] // 2 * shape[1] // 2))
    # Build actual maze
    Z = numpy.zeros(shape, dtype=int)
    # Fill borders
    Z[0, :] = Z[-1, :] = 1
    Z[:, 0] = Z[:, -1] = 1
    # Make isles
    for i in range(density):
        x, y = rand(0, shape[1] // 2) * 2, rand(0, shape[0] // 2) * 2
        Z[y, x] = 1
        for j in range(complexity):
            neighbours = []
            if x > 1:             neighbours.append((y, x - 2))
            if x < shape[1] - 2:  neighbours.append((y, x + 2))
            if y > 1:             neighbours.append((y - 2, x))
            if y < shape[0] - 2:  neighbours.append((y + 2, x))
            if len(neighbours):
                y_,x_ = neighbours[rand(0, len(neighbours) - 1)]
                if Z[y_, x_] == 0:
                    Z[y_, x_] = 1
                    Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
                    x, y = x_, y_
    return Z
예제 #6
0
def LSestimateParams(Xs, delta):
#   Implements the estimator from question 2.6, given a sample of
#   X_i and a fixed delta

    Xn = Xs[1:]
    N = len(Xn)
    ts = delta*arange(N);

    def LS_objective(p):
        #Rip params:
        mu   = p[0];
        beta = p[1];
        
        Ys = Xs[0]*exp(-beta*ts) + mu*(1.0- exp(-beta*ts));
        return (Ys-Xn); 
        
    
    #####################################
    mb_guess = [mu * rand(),
                1./(tau) * rand()];
    
    from scipy.optimize import leastsq;
    mb_hat = leastsq(LS_objective, mb_guess)[0]
    mu_hat, beta_hat = mb_hat[:];
    
    return [mu_hat, beta_hat]
예제 #7
0
def _compare_nmf(m=300, n=300, k=10):
    from pylab import plot, show, legend, xlabel, ylabel

    W_org = random.rand(m, k)
    H_org = random.rand(n, k)
    A = W_org.dot(H_org.T)

    print ('\nComparing NMF algorithms ...\n')

    names = [NMF_MU, NMF_HALS, NMF_ANLS_BLOCKPIVOT,
             NMF_ANLS_AS_NUMPY, NMF_ANLS_AS_GROUP]
    iters = [2000, 1000, 100, 100, 100]
    labels = ['mu', 'hals', 'anls_bp', 'anls_as_numpy', 'anls_as_group']
    styles = ['-x', '-o', '-+', '-s', '-D']

    results = []
    init_val = (random.rand(m, k), random.rand(n, k))

    for i in range(len(names)):
        alg = names[i]()
        results.append(
            alg.run(A, k, init=init_val, max_iter=iters[i], verbose=1))

    for i in range(len(names)):
        his = results[i][2]['his']
        plot(np.cumsum(his['elapsed']), his['rel_error'],
             styles[i], label=labels[i])

    xlabel('time (sec)')
    ylabel('relative error')
    legend()
    show()
예제 #8
0
    def action_callback(self, state):
        """
        Implement this function to learn things and take actions.
        Return 0 if you don't want to jump and 1 if you do.
        """
        # Create tuple from state dictionary, facilitates use in random forest
        st_tuple = self.create_state_tuple(state)

        if not self.fitted:
            new_action = npr.rand() < 0.1
        else:
            # gather new_action in an epsilon greedy manner according to Q estimator
            if npr.rand() > self.eps:
                new_action = npr.rand() < 0.1  # Currently defaults to gliding... may want to adjust
            else:
                new_action = np.argmax(
                    [self.estimator.predict(np.append(st_tuple, a)) for a in range(self.num_actions)]
                )

        # Store new_state and new_action to pass back to SwingyMonkey
        new_state = state

        self.last_action = new_action
        self.last_state = new_state

        return self.last_action
예제 #9
0
파일: test_wigner.py 프로젝트: partus/qutip
def test_wigner_coherent():
    "wigner: test wigner function calculation for coherent states"
    xvec = linspace(-5.0, 5.0, 100)
    yvec = xvec

    X,Y = meshgrid(xvec, yvec)

    a = X + 1j * Y  # consistent with g=2 option to wigner function

    dx = xvec[1]-xvec[0]
    dy = yvec[1]-yvec[0]

    N = 20
    beta = rand() + rand() * 1.0j
    psi = coherent(N, beta)


    # calculate the wigner function using qutip and analytic formula
    W_qutip = wigner(psi, xvec, yvec, g=2)
    W_analytic = 2/pi * exp(-2*abs(a-beta)**2)

    # check difference
    assert_(sum(abs(W_qutip - W_analytic)**2) < 1e-4)

    # check normalization
    assert_(sum(W_qutip)    * dx * dy - 1.0 < 1e-8)
    assert_(sum(W_analytic) * dx * dy - 1.0 < 1e-8)
예제 #10
0
 def setUp(self):
     self.A = rand(10,8)
     self.b1 = rand(8,1)
     self.b2 = rand(8)
     self.b3 = rand(1,8)
     self.b4 = rand(10)
     self.N = 14
예제 #11
0
 def test_boolean(self):
     a = rand(3,5,8)
     V = rand(5,8)
     g1 = randint(0,5,size=15)
     g2 = randint(0,8,size=15)
     V[g1,g2] = -V[g1,g2]
     assert (array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all()
예제 #12
0
def generate_multimask_test_data(num_masks, num_points, num_features):
    # step 1: generate masks
    found = set()
    all_masks = []
    for i in xrange(num_masks):
        while True:
            u, = randint(2, size=num_features).nonzero()
            h = u.tostring()
            if h not in found:
                found.add(h)
                all_masks.append(u)
                break
    assert len(all_masks)==num_masks
    # step 2: generate data
    fet = []
    fmask = []
    offsets = []
    unmasked = []
    n = 0
    offsets = [n]
    for i in xrange(num_points):
        u = all_masks[randint(num_masks)]
        fet.append(rand(len(u)))
        fmask.append(0.5+0.5*rand(len(u)))
        unmasked.append(u)
        n += len(u)
        offsets.append(n)
    assert len(offsets)==num_points+1
    fet = hstack(fet)
    fmask = hstack(fmask)
    unmasked = hstack(unmasked)
    offsets = array(offsets)
    return RawSparseData(full(num_features, 0.5), full(num_features, 1./12), # mean/var of rand()
                         fet, fmask, unmasked, offsets).to_sparse_data()
예제 #13
0
    def test_softmax_reg_loss(self):
        df = DataFrame()
        epsilon = 1e-4
        y_path = ("y/","y/")
        theta_path = ("theta/","theta/")
        X_path = ("X/","X/")

        k = 10
        n,m = 5,8
        df[X_path] = DataFrame.from_matrix(nprand.rand(n,m))
        df[theta_path] = DataFrame.from_matrix(nprand.rand(k,m))
        y = np.zeros((n,k),dtype=bool)
        for i in range(n):
            j = nprand.randint(k)
            y[i,j] = True
        df[y_path] = DataFrame.from_matrix(y)
        reg = 0.0001

        softmax = lambda theta_df: SoftmaxRegression(theta_df, df[X_path], 
                                                df[y_path], reg).f()


        g_central = self.central_diff(softmax,epsilon,df[theta_path])
        g1 = SoftmaxRegression(df[theta_path], df[X_path], df[y_path], reg).g()

        # print g_central
        assert(np.allclose(g_central,g1))

        # Test batch by checking average gradient
        # g2 = np.zeros((k,m))
        # for i in range(n):
        #     g2 += Softmax.g(df[theta_path], df[X_path], df[y_path], reg)
        # g2 /= n
        # assert(np.allclose(g_central,g2))
예제 #14
0
    def sample(self, n=None):
        """
        Return pseudo-random samples from the piecewise linear pdf.
        
        With no argument, a single sample is returned; otherwise the
        requested number is returned as a 1-D array.
        """
        if n is None:
            index = self.popn.sample(1)[0]
            lo, hi = self.nz_intvls[index]
            plo, phi = self.nz_pdf[index]
            if plo==phi:  # handle flat intervals
                # print 'flat:', index, lo, hi, lo + (hi-lo)*rand()
                return lo + (hi-lo)*rand()
            else:
                r = (hi-lo)/(phi-plo)
                return lo - r*plo + r*sqrt(plo**2 + (phi**2 - plo**2)*rand())
                # return self.nz_centers[index]
        else:
            indices = self.popn.sample(n)
            lo, hi = self.nz_intvls[indices,0], self.nz_intvls[indices,1]
            plo, phi = self.nz_pdf[indices,0], self.nz_pdf[indices,1]
            flat = (phi==plo).nonzero()[0]  # id the flat intervals
            r = (hi-lo)/(phi-plo)  # will be NaN for flat intervals
            u = rand(n)
#             if len(flat) != 0:
#                 print plo**2 + (phi**2 - plo**2)*u
#                 print r
#                 print indices
#                 print phi-plo
            vals = lo - r*plo + r*sqrt(plo**2 + (phi**2 - plo**2)*u)
            if len(flat) != 0:
                vals[flat] = lo[flat] + (hi[flat]-lo[flat])*u[flat]
            return vals
예제 #15
0
  def testNormalTriad(self):
    """
    Check the correct working of the normalTriad() function.
    """
    phi=array([0.0,pi/2,pi,3*pi/2,0.0,0.0])
    theta=array([0.0,0.0,0.0,0.0,pi/2,-pi/2])
    pExpected=array([[0,-1,0,1,0,0], [1,0,-1,0,1,1], [0,0,0,0,0,0]])
    qExpected=array([[0,0,0,0,-1,1], [0,0,0,0,0,0], [1,1,1,1,0,0]])
    rExpected=array([[1,0,-1,0,0,0], [0,1,0,-1,0,0], [0,0,0,0,1,-1]])
    p, q, r = normalTriad(phi,theta)
    assert_array_almost_equal(pExpected, p, decimal=15)
    assert_array_almost_equal(qExpected, q, decimal=15)
    assert_array_almost_equal(rExpected, r, decimal=15)

    phiRandom = 2.0*pi*rand(10)
    thetaRandom = -pi/2.0+pi*rand(10)
    z=array([0,0,1])
    for phi in phiRandom:
      for theta in thetaRandom:
        p, q, r = normalTriad(phi,theta)
        rExpected=array([cos(phi)*cos(theta), sin(phi)*cos(theta), sin(theta)])
        pExpected=cross(z,rExpected)
        pExpected=pExpected/sqrt(dot(pExpected,pExpected))
        qExpected=cross(rExpected,pExpected)
        assert_array_almost_equal(pExpected, p, decimal=8)
        assert_array_almost_equal(qExpected, q, decimal=8)
        assert_array_almost_equal(rExpected, r, decimal=8)
예제 #16
0
def hc_only_explore_step(rl_config, Q, state, epsilon=0.9):
    rid2rl_actions = rl_config.rl_actions
    id2rl_state = rl_config.rl_state_ids
    rl_state2id = {v: k for k, v in id2rl_state.items()}

    act = -1
    while (act == -1):
        is_greed = rand(1) < epsilon
        if is_greed:
            idx = state.tolist() + [[x for x in range(len(rid2rl_actions.keys()))]]
            act = np.argmax(Q[tuple(idx)])
            val = np.max(Q[tuple(idx)])
            if val <= 0.00000000001:
                is_greed = False

        if not is_greed:
            if rand(1) < 0.9:
                act = randint(0, 7)
            else:
                act = randint(7, len(rid2rl_actions.keys()))

        (next_state, act, isFinished) = rl_config.transition_function(rl_config, state, act, Q)

    reward = rl_config.reward_function(rl_config, state, act, next_state)
    sarsa_state = np.concatenate((state, [act], [reward], next_state))
    length = sarsa_state.shape[0]
    sarsa_state = np.reshape(sarsa_state, (1,length))

    return (next_state, sarsa_state, isFinished)
예제 #17
0
 def write_exam(self,teacher,toughness_level):
     for pupil in self.pupils:
         # will this pupil try to betray and copy?
         # let's decide random-influenced but probability-based
         random_number=rand()
         if random_number < pupil.honesty_level:
             will_try_betrayal=False
         else:
             will_try_betrayal=True
         if will_try_betrayal:
             # copying from who? imagine random seating and choosing the better of two neighbours
             # need to randomly sample two different ones from the list of pupils in the class
             while True:
                 fellowA=self.pupils[randint(self.size)]
                 fellowB=self.pupils[randint(self.size)]
                 if not (fellowA is fellowB):
                     break
             if fellowA.skill_level > fellowB.skill_level:
                 pupil.write_exam(copy_from_fellow=fellowA, exam_toughness=toughness_level)
             else:
                 pupil.write_exam(copy_from_fellow=fellowB, exam_toughness=toughness_level)
             if rand() < teacher.betrayal_check_probability:
                 betrayal_discovered=True
             else:
                 betrayal_discovered=False
             if betrayal_discovered:
                 # what happens then? grade becomes zero? or skill-level minus 3?
                 # discovery should lead to increased fear and honesty next time
                 raise NotImplementedError('This is up to you.')
             else:
                 # what happens then? grade will be influenced by own and fellow's skill level
                 # no discovery should lead to decreased fear less honesty next time
                 raise NotImplementedError('This is up to you.')
         else:
             pupil.write_exam(exam_toughness=toughness_level)
예제 #18
0
파일: mcmc.py 프로젝트: andymiller/flecks
def elliptical_slice(xx, chol_Sigma, log_like_fn):
    D  = xx.size

    # Select a random ellipse.
    nu = np.dot(chol_Sigma, npr.randn(D))

    # Select the slice threshold.
    hh = np.log(npr.rand()) + log_like_fn(xx)

    # Randomly center the bracket.
    phi     = npr.rand()*2*np.pi
    phi_max = phi
    phi_min = phi_max - 2*np.pi

    # Loop until acceptance.
    while True:

        # Compute the proposal.
        xx_prop = xx*np.cos(phi) + nu*np.sin(phi)

        # If on the slice, return the proposal.
        if log_like_fn(xx_prop) > hh:
            return xx_prop

        if phi > 0:
            phi_max = phi
        elif phi < 0:
            phi_min = phi
        else:
            raise Exception("Shrank to zero!")

        phi = npr.rand()*(phi_max - phi_min) + phi_min
예제 #19
0
def plsa_assym(Nwd,Nz,iteration):
    #dimension order Nw->Nz->Nd
    [Nw,Nd] = np.shape(Nwd)
    b = 1.0/Nw/Nd

    #Initialization
    Pwd    = np.expand_dims(Nwd,1)+b
    Pz_wd  = rand(Nw,Nz,Nd)+b
    Pd     = rand( 1, 1,Nd)+b
    Pz_d   = rand( 1,Nz,Nd)+b
    Pw_z   = rand(Nw,Nz, 1)+b

    #Normalization
    Pwd   /= ksum(ksum(Pwd,0),2)
    Pz_wd /= ksum(Pz_wd,1) 
    Pd    /= ksum(Pd,2) 
    Pz_d  /= ksum(Pz_d,1) 
    Pw_z  /= ksum(Pw_z,0) 
    for i in range(iteration):
        #Expectaion
        Pz_wd  = Pw_z * Pz_d * Pd
        Pz_wd /= ksum(Pz_wd,1) 

        #Maximization
        Pwzd = Pwd*Pz_wd
        Pzd  = ksum(Pwzd,0)
        Pwz  = ksum(Pwzd,2)
        Pd   = ksum(Pzd,1)
        Pz   = ksum(Pzd,2)
        Pz_d = Pzd/Pd
        Pw_z = Pwz/Pz
    return Pd, Pz_d, Pw_z, Pwzd
예제 #20
0
    def getfield(self, catalogue = False):
        r"""Create a simulated cube of point sources.

        Create a pixelised realisation of the sources.

        Parameters
        ----------
        catalogue : boolean, optional
            if true return the population catalogue.

        Returns
        -------
        cube : ndarray
            An array of dimensions (`numf`, `numx` `numy`)
        """

        c = np.zeros(self._num_array())

        fluxes = self.generate_population(self.x_width*self.y_width)

        freq = self.nu_pixels
        
        sr = self.spectral_realisation(fluxes[:,np.newaxis], freq[np.newaxis,:])
        
        for i in xrange(sr.shape[0]):
            # Pick random pixel
            x = int(rnd.rand() * self.x_num)
            y = int(rnd.rand() * self.y_num)

            c[:,x,y] += sr[i,:]

        if not catalogue:
            return c
        else:
            return c, fluxes
 def generate_batch_multi(num_samples, xobjs=['circle'], yobjs=[0], img_scale=1.0):
     obj_imgs = []
     obj_coords = []
     for obj in xobjs:
         imgs, coords = generate_batch(num_samples, obj_type=obj)
         obj_imgs.append(imgs)
         obj_coords.append(coords)
     seq_len = obj_coords[0].shape[0]
     batch_size = obj_coords[0].shape[1]
     x_imgs = np.zeros(obj_imgs[0].shape)
     y_imgs = np.zeros(obj_imgs[0].shape)
     y_coords = np.zeros(obj_coords[0].shape)
     for o_num in range(len(xobjs)):
         x_imgs = x_imgs + obj_imgs[o_num]
         if o_num in yobjs:
             y_imgs = y_imgs + obj_imgs[o_num]
         mask = npr.rand(seq_len, batch_size) < (1. / (o_num+1))
         mask = mask[:,:,np.newaxis]
         y_coords = (mask * obj_coords[o_num]) + ((1.-mask) * y_coords)
     # rescale coordinates as desired
     y_coords = img_scale * y_coords
     # add noise to image sequences
     pix_mask = npr.rand(*x_imgs.shape) < 0.05
     pix_noise = npr.rand(*x_imgs.shape)
     x_imgs = x_imgs + (pix_mask * pix_noise)
     # clip to 0...0.99
     x_imgs = np.maximum(x_imgs, 0.001)
     x_imgs = np.minimum(x_imgs, 0.999)
     y_imgs = np.maximum(y_imgs, 0.001)
     y_imgs = np.minimum(y_imgs, 0.999)
     return [to_fX(x_imgs), to_fX(y_imgs), to_fX(y_coords)]
예제 #22
0
def test_sgd():
    N_weights = 5
    W0 = 0.1 * npr.randn(N_weights)
    V0 = 0.1 * npr.randn(N_weights)
    N_data = 12
    batch_size = 4
    num_epochs = 3
    batch_idxs = BatchList(N_data, batch_size)
    N_iter = num_epochs * len(batch_idxs)
    alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs)
    betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs)

    A = npr.randn(N_data, N_weights)

    def loss_fun(W, idxs):
        sub_A = A[idxs, :]
        return np.dot(np.dot(W, np.dot(sub_A.T, sub_A)), W)

    result = sgd(loss_fun, batch_idxs, N_iter, W0, V0, alphas, betas)
    d_x = result['d_x']
    d_v = result['d_v']
    d_alphas = result['d_alphas']
    d_betas = result['d_betas']

    def full_loss(W0, V0, alphas, betas):
        result = sgd(loss_fun, batch_idxs, N_iter, W0, V0, alphas, betas)
        x_final = result['x_final']
        return loss_fun(x_final, batch_idxs.all_idxs)

    d_an = (d_x, d_v, d_alphas, d_betas)
    d_num = nd(full_loss, W0, V0, alphas, betas)
    for i, (an, num) in enumerate(zip(d_an, d_num)):
        assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
            "Type {0}, diffs are: {1}".format(i, an - num)
예제 #23
0
파일: sphvars.py 프로젝트: nicrip/misc
    def sample(self,n=1):
        """Sample n instances of a given Fisher distribution."""
        from numpy import exp, arcsin, sqrt, log, pi, array
        from numpy.random import rand
        
        k = self.k
        l = exp(-2 * k)

        R1 = rand(n)
        R2 = rand(n)

        T = 2 * arcsin( sqrt(-log(R1*(1-l)+l)/(2*k)) )
        P = 2 * pi* R2

        # Rotate T and P to desired mean
        #a = self.a
        #b = self.b
        #x, y, z = dcos(T, P)
        #A = array([[cos(a)*cos(b), cos(a)*sin(b), -sin(a)],
        #           [-sin(b), cos(b), 0],
        #           [sin(a)*cos(b), sin(a)*sin(b), cos(a)]])
        #px, py, pz = A.dot(array([x,y,z]).T)
        #theta, phi = pos(px, py, pz)

        return T, P#theta, phi
예제 #24
0
def test_sgd_parser():
    N_weights = 6
    W0 = 0.1 * npr.randn(N_weights)
    N_data = 12
    batch_size = 4
    num_epochs = 4
    batch_idxs = BatchList(N_data, batch_size)

    parser = VectorParser()
    parser.add_shape('first',  [2,])
    parser.add_shape('second', [1,])
    parser.add_shape('third',  [3,])
    N_weight_types = 3

    alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    meta = 0.1 * npr.randn(N_weights*2)

    A = npr.randn(N_data, N_weights)
    def loss_fun(W, meta, i=None):
        idxs = batch_idxs.all_idxs if i is None else batch_idxs[i % len(batch_idxs)]
        sub_A = A[idxs, :]
        return np.dot(np.dot(W + meta[:N_weights] + meta[N_weights:], np.dot(sub_A.T, sub_A)), W)

    def full_loss(params):
        (W0, alphas, betas, meta) = params
        result = sgd_parsed(grad(loss_fun), kylist(W0, alphas, betas, meta), parser)
        return loss_fun(result, meta)

    d_num = nd(full_loss, (W0, alphas, betas, meta))
    d_an_fun = grad(full_loss)
    d_an = d_an_fun([W0, alphas, betas, meta])
    for i, (an, num) in enumerate(zip(d_an, d_num[0])):
        assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
            "Type {0}, diffs are: {1}".format(i, an - num)
예제 #25
0
    def _smoketest(self, spxlu, check, dtype):
        if np.issubdtype(dtype, np.complexfloating):
            A = self.A + 1j*self.A.T
        else:
            A = self.A

        A = A.astype(dtype)
        lu = spxlu(A)

        # Input shapes
        for k in [None, 1, 2, self.n, self.n+2]:
            msg = "k=%r" % (k,)

            if k is None:
                b = random.rand(self.n)
            else:
                b = random.rand(self.n, k)

            if np.issubdtype(dtype, np.complexfloating):
                b = b + 1j*random.rand(*b.shape)
            b = b.astype(dtype)

            x = lu.solve(b)
            check(A, b, x, msg)

            x = lu.solve(b, 'T')
            check(A.T, b, x, msg)

            x = lu.solve(b, 'H')
            check(A.T.conj(), b, x, msg)
예제 #26
0
def sample_spherical_surface(N_points):
    """
    Randomly sample the sky.
    
    Parameters 
    ----------
    N_points : int
        number of points to sample.
    
    Returns 
    ----------
    coords : list 
        (ra,dec) coordinate pairs in degrees.
    """

    from numpy import random
    from numpy import sin, cos, arccos
    from math import pi

    ran1 = random.rand(N_points) #oversample, to account for box sample  
    ran2 = random.rand(N_points) #oversample, to account for box sample

    ran1 = ran1 * 2.0 * pi #convert to radians
    ran2 = arccos(2.0 * ran2 - 1.0) - 0.5*pi #convert to radians

    ran1 = ran1 * 360.0 / (2.0 * pi) #convert to degrees 
    ran2 = ran2 * 360.0 / (2.0 * pi) #convert to degrees

    ran_ra = ran1
    ran_dec = ran2

    coords = zip(ran_ra,ran_dec)

    return coords
예제 #27
0
    def step(self):
        '''
        Computes the new positions of the particles, a step of the algorithm.

        This method updates the velocity given the constants associated with the
        particle and global bests; and then updates the positions accordingly.
        Then, the particle bests and the global best are calculated and stored
        for future use.

        This method has no parameters and returns no values. The particles
        positions can be consulted with the ``particles``, ``pbest`` and
        ``gbest`` properties (see above).
        '''
        f = self.__f
        p = self.__p
        v = self.__v
        v = self.__k * (v + self.cp * rand() * (self.__pbest - p) \
                          + self.cg * rand() * (self.__gbest - p))
        v = select( [ v < self.__vmax ], [ v ], sign(v)*self.__vmax )
        p = p + v
        fg = self.__fgbest
        for i in xrange(self.__size):
            f0 = f(p[i])
            if (f0 < self.__fpbest[i]):
                self.__pbest[i] = p[i]
                self.__fpbest[i] = f0
            if (f0 < fg):
                self.__gbest = p[i]
                self.__fgbest = f0
        self.__p = p
        self.__v = v
예제 #28
0
    def test_complex_graph(self):
        # easier debugging, start with small dimensions
        x = Variable((5,5))
        cx = conv_nofft(np.array([[1,1,1]])/3, conv_nofft(np.array([[1],[1],[1]])/3, x))
        scx = subsample(cx, (2,2))
        ed = scx - np.reshape(np.arange(3*3), (3,3))
        w = Variable(x.shape + (2,))
        gw = grad(w,2)
        Ew = gw + transpose(gw, (0,1,3,2))
        gx = grad(x,2)
        tgx = pxwise_matrixmult(np.reshape(np.arange(5*5*2*2), (5,5,2,2)), gx)
        e1 = tgx - w
        inshape = (5*5 + 5*5*2,)
        outshape = (3*3 + 5*5*2*2 + 5*5*2,)
        self._generic_check_adjoint(lambda x: (ed,e1,Ew), inshape, outshape, "complex", eps=5e-4)

        # continue with more values
        K1 = np.abs(random.rand(1,5,1))
        K2 = np.abs(random.rand(5,1,1))

        x = Variable((320,240,2))
        cx = conv_nofft(K1,conv_nofft(K2, x))
        scx = subsample(cx, (5,5,1))
        ed = scx - random.rand(64,48,2)

        w = Variable(x.shape + (2,))
        gw = grad(w, 2)
        Ew = gw + transpose(gw, (0,1,2,4,3))
        gx = grad(x,2)
        tgx = pxwise_matrixmult(random.rand(320,240,2,2,2), gx)
        e1 = tgx - w

        inshape = (320*240*2 + 320*240*2*2,)
        outshape = (64*48*2 + 320*240*2*2*2 + 320*240*2*2,)
        self._generic_check_adjoint(lambda x: (ed,e1,Ew), inshape, outshape, "complex2", eps=5e-4)
예제 #29
0
파일: optimize.py 프로젝트: mmssouza/idsc
 def gera_individuo(self):
  l = []
  l.append(random_integers(self.arg_lim[0][0],self.arg_lim[0][1]))
  l.append(self.arg_lim[1][0]+ (self.arg_lim[1][1] - self.arg_lim[1][0])*rand())
  l.append(self.arg_lim[2][0]+ (self.arg_lim[2][1] - self.arg_lim[2][0])*rand())
  l.append(random_integers(self.arg_lim[3][0],self.arg_lim[3][1]))
  return np.array(l)	
예제 #30
0
def ConnectIzhikevichNetworkLayers(CIJ, NExcitoryLayer, NInhibitoryLayer):
  Dmax = 20 # Maximum Delay
  network = IzNetwork([NExcitoryLayer, NInhibitoryLayer], Dmax)

  NTotalNeurons = NExcitoryLayer + NInhibitoryLayer

  # Set neuron parameters for excitory layer
  rand = rn.rand(NExcitoryLayer)
  network.layer[0].N = NExcitoryLayer
  network.layer[0].a = 0.02 * np.ones(NExcitoryLayer)
  network.layer[0].b = 0.20 * np.ones(NExcitoryLayer)
  network.layer[0].c = -65 + 15*(rand**2)
  network.layer[0].d = 8 - 6*(rand**2)
  
  ## Factor and delay
  network.layer[0].factor[0] = 17
  network.layer[0].factor[1] = 2
  network.layer[0].delay[0] = rn.randint(1,21,size=[NExcitoryLayer,NExcitoryLayer])
  network.layer[0].delay[1] = np.ones([NExcitoryLayer, NInhibitoryLayer])
 
  ## Connectivity matrix (synaptic weights)
  # layer[i].S[j] is the connectivity matrix from layer j to layer i
  # S(i,j) is the strength of the connection from neuron j to neuron i
  # excitory-to-excitory synaptic weights
  network.layer[0].S[0] = CIJ[0]
  # inhibtory-to-excitory synaptic weights
  network.layer[0].S[1] = CIJ[1]
  # inhibtory-to-excitory weights
  rand_array = -1 * rn.random(NInhibitoryLayer*NExcitoryLayer).reshape(NExcitoryLayer,NInhibitoryLayer)
  network.layer[0].S[1] = np.multiply(network.layer[0].S[1],rand_array)

  # Set neuron parameters for inhibitory layer
  rand = rn.rand(NInhibitoryLayer)
  network.layer[1].N = NInhibitoryLayer
  network.layer[1].a = 0.02 + 0.08*rand
  network.layer[1].b = 0.25 - 0.05*rand
  network.layer[1].c = -65 * np.ones(NInhibitoryLayer)
  network.layer[1].d = 2 * np.ones(NInhibitoryLayer)
  
  ## Factor and delay
  network.layer[1].factor[0] = 50
  network.layer[1].factor[1] = 1
  network.layer[1].delay[0] = np.ones([NInhibitoryLayer, NExcitoryLayer])
  network.layer[1].delay[1] = np.ones([NInhibitoryLayer, NInhibitoryLayer])
 
  ## Connectivity matrix (synaptic weights)
  # layer[i].S[j] is the connectivity matrix from layer j to layer i
  # S(i,j) is the strength of the connection from neuron j to neuron i
  # excitory-to-inhibtory synaptic weights
  network.layer[1].S[0] = CIJ[2]
  # inhibtory-to-excitory synaptic weights
  network.layer[1].S[1] = CIJ[3]
  # excitory-to-inhibtory weights
  rand_array = rn.random(NInhibitoryLayer*NExcitoryLayer).reshape(NInhibitoryLayer,NExcitoryLayer)
  network.layer[1].S[0] = np.multiply(network.layer[1].S[0],rand_array)

  # inhibtory-to-inhibtory weights
  rand_array = -1 * rn.random(NInhibitoryLayer*NInhibitoryLayer).reshape(NInhibitoryLayer,NInhibitoryLayer)
  network.layer[1].S[1] = np.multiply(network.layer[1].S[1],rand_array)
  return(network)
예제 #31
0
def als(X, rank, **kwargs):
    """
    RESCAL-ALS algorithm to compute the RESCAL tensor factorization.


    Parameters
    ----------
    X : list
        List of frontal slices X_k of the tensor X.
        The shape of each X_k is ('N', 'N').
        X_k's are expected to be instances of scipy.sparse.csr_matrix
    rank : int
        Rank of the factorization
    lmbdaA : float, optional
        Regularization parameter for A factor matrix. 0 by default
    lmbdaR : float, optional
        Regularization parameter for R_k factor matrices. 0 by default
    lmbdaV : float, optional
        Regularization parameter for V_l factor matrices. 0 by default
    attr : list, optional
        List of sparse ('N', 'L_l') attribute matrices. 'L_l' may be different
        for each attribute
    init : string, optional
        Initialization method of the factor matrices. 'nvecs' (default)
        initializes A based on the eigenvectors of X. 'random' initializes
        the factor matrices randomly.
    compute_fit : boolean, optional
        If true, compute the fit of the factorization compared to X.
        For large sparse tensors this should be turned of. None by default.
    maxIter : int, optional
        Maximium number of iterations of the ALS algorithm. 500 by default.
    conv : float, optional
        Stop when residual of factorization is less than conv. 1e-5 by default

    Returns
    -------
    A : ndarray
        array of shape ('N', 'rank') corresponding to the factor matrix A
    R : list
        list of 'M' arrays of shape ('rank', 'rank') corresponding to the
        factor matrices R_k
    fval : float
        function value of the factorization
    itr : int
        number of iterations until convergence
    exectimes : ndarray
        execution times to compute the updates in each iteration

    Examples
    --------
    >>> X1 = csr_matrix(([1,1,1], ([2,1,3], [0,2,3])), shape=(4, 4))
    >>> X2 = csr_matrix(([1,1,1,1], ([0,2,3,3], [0,1,2,3])), shape=(4, 4))
    >>> A, R, fval, iter, exectimes = rescal([X1, X2], 2)

    See
    ---
    For a full description of the algorithm see:
    .. [1] Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel,
        "A Three-Way Model for Collective Learning on Multi-Relational Data",
        ICML 2011, Bellevue, WA, USA

    .. [2] Maximilian Nickel, Volker Tresp, Hans-Peter-Kriegel,
        "Factorizing YAGO: Scalable Machine Learning for Linked Data"
        WWW 2012, Lyon, France
    """

    # ------------ init options ----------------------------------------------
    ainit = kwargs.pop('init', _DEF_INIT)
    maxIter = kwargs.pop('maxIter', _DEF_MAXITER)
    conv = kwargs.pop('conv', _DEF_CONV)
    lmbdaA = kwargs.pop('lambda_A', _DEF_LMBDA)
    lmbdaR = kwargs.pop('lambda_R', _DEF_LMBDA)
    lmbdaV = kwargs.pop('lambda_V', _DEF_LMBDA)
    func_compute_fval = kwargs.pop('compute_fval', _DEF_FIT_METHOD)
    orthogonalize = kwargs.pop('orthogonalize', False)
    P = kwargs.pop('attr', _DEF_ATTR)
    dtype = kwargs.pop('dtype', np.float)

    # ------------- check input ----------------------------------------------
    if not len(kwargs) == 0:
        raise ValueError('Unknown keywords (%s)' % (kwargs.keys()))

    # check frontal slices have same size and are matrices
    sz = X[0].shape
    for i in range(len(X)):
        if X[i].ndim != 2:
            raise ValueError('Frontal slices of X must be matrices')
        if X[i].shape != sz:
            raise ValueError('Frontal slices of X must be all of same shape')
        #if not issparse(X[i]):
        #raise ValueError('X[%d] is not a sparse matrix' % i)

    if func_compute_fval is None:
        if orthogonalize:
            func_compute_fval = _compute_fval_orth
        elif prod(X[0].shape) * len(X) > _DEF_NO_FIT:
            _log.warn(
                'For large tensors automatic computation of fit is disabled by default\nTo compute the fit, call rescal.als with "compute_fit=True"\nPlease note that this might cause memory and runtime problems'
            )
            func_compute_fval = None
        else:
            func_compute_fval = _compute_fval

    n = sz[0]
    k = len(X)

    _log.debug('[Config] rank: %d | maxIter: %d | conv: %7.1e | lmbda: %7.1e' %
               (rank, maxIter, conv, lmbdaA))
    _log.debug('[Config] dtype: %s / %s' % (dtype, X[0].dtype))

    # ------- convert X and P to CSR ------------------------------------------
    for i in range(k):
        if issparse(X[i]):
            X[i] = X[i].tocsr()
            X[i].sort_indices()
    for i in range(len(P)):
        if issparse(P[i]):
            P[i] = P[i].tocoo().tocsr()
            P[i].sort_indices()

    # ---------- initialize A ------------------------------------------------
    _log.debug('Initializing A')
    if ainit == 'random':
        A = array(rand(n, rank), dtype=dtype)
    elif ainit == 'nvecs':
        S = csr_matrix((n, n), dtype=dtype)
        for i in range(k):
            S = S + X[i]
            S = S + X[i].T
        _, A = eigsh(csr_matrix(S, dtype=dtype, shape=(n, n)), rank)
        A = array(A, dtype=dtype)
    else:
        raise ValueError('Unknown init option ("%s")' % ainit)

    # ------- initialize R and Z ---------------------------------------------
    R = _updateR(X, A, lmbdaR)
    Z = _updateZ(A, P, lmbdaV)

    # precompute norms of X
    normX = [sum(M.data**2) for M in X]

    #  ------ compute factorization ------------------------------------------
    fit = fitchange = fitold = f = 0
    exectimes = []
    for itr in range(maxIter):
        tic = time.time()
        fitold = fit
        A = _updateA(X, A, R, P, Z, lmbdaA, orthogonalize)
        R = _updateR(X, A, lmbdaR)
        Z = _updateZ(A, P, lmbdaV)

        # compute fit value
        if func_compute_fval is not None:
            fit = func_compute_fval(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaV,
                                    normX)
        else:
            fit = np.Inf

        fitchange = abs(fitold - fit)

        toc = time.time()
        exectimes.append(toc - tic)

        _log.debug('[%3d] fval: %0.5f | delta: %7.1e | secs: %.5f' %
                   (itr, fit, fitchange, exectimes[-1]))
        if itr > 0 and fitchange < conv:
            break
    return A, R, f, itr + 1, array(exectimes)
예제 #32
0
def linear_shock(contribution_table, shock):
    import numpy.random as NPRD
    table_dim = contribution_table.shape
    ep = NPRD.rand(table_dim[0], table_dim[1], table_dim[2])
    new_table = (1 - shock) * contribution_table + shock * ep
    return new_table
예제 #33
0
        fig.drawRadarPoints(xs[s : e], ys[s : e], colors[i])


    fig.setXYLim(0, 2000, 0, 3000)
    fig.showFigure()
'''

import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
import matplotlib.patches

NUM = 250

ells = [
    Ellipse(xy=rnd.rand(2) * 10,
            width=rnd.rand(),
            height=rnd.rand(),
            angle=rnd.rand() * 360) for i in range(NUM)
]
e = Ellipse((0, 0), width=30, height=40, angle=0, facecolor='black')

fig, ax = plt.subplots()
#ax = plt.subplots()#fig.add_subplot(111, aspect='equal')
#for e in ells:
ax.add_patch(e)
#ax.add_artist(e)
#e.set_clip_box(ax.bbox)
#e.set_alpha(rnd.rand())
#e.set_facecolor(rnd.rand(3))
예제 #34
0
    def test_andrews_curves(self, iris):
        from matplotlib import cm

        from pandas.plotting import andrews_curves

        df = iris

        _check_plot_works(andrews_curves, frame=df, class_column="Name")

        rgba = ("#556270", "#4ECDC4", "#C7F464")
        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", color=rgba
        )
        self._check_colors(
            ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
        )

        cnames = ["dodgerblue", "aquamarine", "seagreen"]
        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", color=cnames
        )
        self._check_colors(
            ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
        )

        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", colormap=cm.jet
        )
        cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
        self._check_colors(
            ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
        )

        length = 10
        df = DataFrame(
            {
                "A": random.rand(length),
                "B": random.rand(length),
                "C": random.rand(length),
                "Name": ["A"] * length,
            }
        )

        _check_plot_works(andrews_curves, frame=df, class_column="Name")

        rgba = ("#556270", "#4ECDC4", "#C7F464")
        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", color=rgba
        )
        self._check_colors(
            ax.get_lines()[:10], linecolors=rgba, mapping=df["Name"][:10]
        )

        cnames = ["dodgerblue", "aquamarine", "seagreen"]
        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", color=cnames
        )
        self._check_colors(
            ax.get_lines()[:10], linecolors=cnames, mapping=df["Name"][:10]
        )

        ax = _check_plot_works(
            andrews_curves, frame=df, class_column="Name", colormap=cm.jet
        )
        cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())]
        self._check_colors(
            ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]
        )

        colors = ["b", "g", "r"]
        df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors})
        ax = andrews_curves(df, "Name", color=colors)
        handles, labels = ax.get_legend_handles_labels()
        self._check_colors(handles, linecolors=colors)
예제 #35
0
def generate_test_batches(root_path,
                          test_list,
                          net_input_shape,
                          batchSize=1,
                          numSlices=1,
                          subSampAmt=0,
                          stride=1,
                          downSampAmt=1):
    # Create placeholders for testing
    logging.info('\nload_3D_data.generate_test_batches')
    img_batch = np.zeros((np.concatenate(((batchSize, ), net_input_shape))),
                         dtype=np.float32)
    count = 0
    logging.info('\nload_3D_data.generate_test_batches: test_list=%s' %
                 (test_list))
    for i, scan_name in enumerate(test_list):
        try:
            scan_name = scan_name[0]
            path_to_np = join(root_path, 'np_files',
                              basename(scan_name)[:-3] + 'npz')
            with np.load(path_to_np) as data:
                test_img = data['img']
        except:
            logging.info(
                '\nPre-made numpy array not found for {}.\nCreating now...'.
                format(scan_name[:-4]))
            test_img = convert_data_to_numpy(root_path,
                                             scan_name,
                                             no_masks=True)
            if np.array_equal(test_img, np.zeros(1)):
                continue
            else:
                logging.info('\nFinished making npz file.')

        if numSlices == 1:
            subSampAmt = 0
        elif subSampAmt == -1 and numSlices > 1:
            np.random.seed(None)
            subSampAmt = int(rand(1) * (test_img.shape[2] * 0.05))

        indicies = np.arange(
            0, test_img.shape[2] - numSlices * (subSampAmt + 1) + 1, stride)
        for j in indicies:
            if img_batch.ndim == 4:
                img_batch[
                    count, :, :, :] = test_img[:, :, j:j + numSlices *
                                               (subSampAmt + 1):subSampAmt + 1]
            elif img_batch.ndim == 5:
                # Assumes img and mask are single channel. Replace 0 with : if multi-channel.
                img_batch[count, :, :, :,
                          0] = test_img[:, :, j:j + numSlices *
                                        (subSampAmt + 1):subSampAmt + 1]
            else:
                logging.error(
                    'Error this function currently only supports 2D and 3D data.'
                )
                exit(0)

            count += 1
            if count % batchSize == 0:
                count = 0
                yield (img_batch)

    if count != 0:
        yield (img_batch[:count, :, :, :])
예제 #36
0
def generate_val_batches(root_path,
                         val_list,
                         net_input_shape,
                         net,
                         batchSize=1,
                         numSlices=1,
                         subSampAmt=-1,
                         stride=1,
                         downSampAmt=1,
                         shuff=1):
    # Create placeholders for validation
    img_batch = np.zeros((np.concatenate(((batchSize, ), net_input_shape))),
                         dtype=np.float32)
    mask_batch = np.zeros((np.concatenate(((batchSize, ), net_input_shape))),
                          dtype=np.uint8)

    while True:
        if shuff:
            shuffle(val_list)
        count = 0
        for i, scan_name in enumerate(val_list):
            try:
                scan_name = scan_name[0]
                path_to_np = join(root_path, 'np_files',
                                  basename(scan_name)[:-3] + 'npz')
                with np.load(path_to_np) as data:
                    val_img = data['img']
                    val_mask = data['mask']
            except:
                logging.info(
                    '\nPre-made numpy array not found for {}.\nCreating now...'
                    .format(scan_name[:-4]))
                val_img, val_mask = convert_data_to_numpy(root_path, scan_name)
                if np.array_equal(val_img, np.zeros(1)):
                    continue
                else:
                    logging.info('\nFinished making npz file.')

            if numSlices == 1:
                subSampAmt = 0
            elif subSampAmt == -1 and numSlices > 1:
                np.random.seed(None)
                subSampAmt = int(rand(1) * (val_img.shape[2] * 0.05))

            indicies = np.arange(
                0, val_img.shape[2] - numSlices * (subSampAmt + 1) + 1, stride)
            if shuff:
                shuffle(indicies)

            for j in indicies:
                if not np.any(val_mask[:, :, j:j + numSlices *
                                       (subSampAmt + 1):subSampAmt + 1]):
                    continue
                if img_batch.ndim == 4:
                    img_batch[
                        count, :, :, :] = val_img[:, :, j:j + numSlices *
                                                  (subSampAmt + 1):subSampAmt +
                                                  1]
                    mask_batch[count, :, :, :] = val_mask[:, :,
                                                          j:j + numSlices *
                                                          (subSampAmt +
                                                           1):subSampAmt + 1]
                elif img_batch.ndim == 5:
                    # Assumes img and mask are single channel. Replace 0 with : if multi-channel.
                    img_batch[count, :, :, :,
                              0] = val_img[:, :, j:j + numSlices *
                                           (subSampAmt + 1):subSampAmt + 1]
                    mask_batch[count, :, :, :,
                               0] = val_mask[:, :, j:j + numSlices *
                                             (subSampAmt + 1):subSampAmt + 1]
                else:
                    logging.error(
                        'Error this function currently only supports 2D and 3D data.'
                    )
                    exit(0)

                count += 1
                if count % batchSize == 0:
                    count = 0
                    if net.find('caps') != -1:
                        yield ([img_batch, mask_batch],
                               [mask_batch, mask_batch * img_batch])
                    else:
                        yield (img_batch, mask_batch)

        if count != 0:
            if net.find('caps') != -1:
                yield ([img_batch[:count, ...], mask_batch[:count, ...]], [
                    mask_batch[:count, ...],
                    mask_batch[:count, ...] * img_batch[:count, ...]
                ])
            else:
                yield (img_batch[:count, ...], mask_batch[:count, ...])
예제 #37
0
def generate_train_batches(root_path,
                           train_list,
                           net_input_shape,
                           net,
                           batchSize=1,
                           numSlices=1,
                           subSampAmt=-1,
                           stride=1,
                           downSampAmt=1,
                           shuff=1,
                           aug_data=1):
    # Create placeholders for training
    # (img_shape[1], img_shape[2], args.slices)
    img_batch = np.zeros((np.concatenate(((batchSize, ), net_input_shape))),
                         dtype=np.float32)
    mask_batch = np.zeros((np.concatenate(((batchSize, ), net_input_shape))),
                          dtype=np.uint8)

    while True:
        if shuff:
            shuffle(train_list)
        count = 0
        for i, scan_name in enumerate(train_list):
            try:
                scan_name = scan_name[0]
                path_to_np = join(root_path, 'np_files',
                                  basename(scan_name)[:-3] + 'npz')
                logging.info('\npath_to_np=%s' % (path_to_np))
                with np.load(path_to_np) as data:
                    train_img = data['img']
                    train_mask = data['mask']
            except:
                logging.info(
                    '\nPre-made numpy array not found for {}.\nCreating now...'
                    .format(scan_name[:-4]))
                train_img, train_mask = convert_data_to_numpy(
                    root_path, scan_name)
                if np.array_equal(train_img, np.zeros(1)):
                    continue
                else:
                    logging.info('\nFinished making npz file.')

            if numSlices == 1:
                subSampAmt = 0
            elif subSampAmt == -1 and numSlices > 1:
                np.random.seed(None)
                subSampAmt = int(rand(1) * (train_img.shape[2] * 0.05))

            indicies = np.arange(
                0, train_img.shape[2] - numSlices * (subSampAmt + 1) + 1,
                stride)
            if shuff:
                shuffle(indicies)

            for j in indicies:
                if not np.any(train_mask[:, :, j:j + numSlices *
                                         (subSampAmt + 1):subSampAmt + 1]):
                    continue
                if img_batch.ndim == 4:
                    img_batch[count, :, :, :] = train_img[:, :,
                                                          j:j + numSlices *
                                                          (subSampAmt +
                                                           1):subSampAmt + 1]
                    mask_batch[count, :, :, :] = train_mask[:, :,
                                                            j:j + numSlices *
                                                            (subSampAmt +
                                                             1):subSampAmt + 1]
                elif img_batch.ndim == 5:
                    # Assumes img and mask are single channel. Replace 0 with : if multi-channel.
                    img_batch[count, :, :, :,
                              0] = train_img[:, :, j:j + numSlices *
                                             (subSampAmt + 1):subSampAmt + 1]
                    mask_batch[count, :, :, :,
                               0] = train_mask[:, :, j:j + numSlices *
                                               (subSampAmt + 1):subSampAmt + 1]
                else:
                    logging.error(
                        'Error this function currently only supports 2D and 3D data.'
                    )
                    exit(0)

                count += 1
                if count % batchSize == 0:
                    count = 0
                    if aug_data:
                        img_batch, mask_batch = augmentImages(
                            img_batch, mask_batch)
                    if debug:
                        if img_batch.ndim == 4:
                            plt.imshow(np.squeeze(img_batch[0, :, :, 0]),
                                       cmap='gray')
                            plt.imshow(np.squeeze(mask_batch[0, :, :, 0]),
                                       alpha=0.15)
                        elif img_batch.ndim == 5:
                            plt.imshow(np.squeeze(img_batch[0, :, :, 0, 0]),
                                       cmap='gray')
                            plt.imshow(np.squeeze(mask_batch[0, :, :, 0, 0]),
                                       alpha=0.15)
                        plt.savefig(join(root_path, 'logs', 'ex_train.png'),
                                    format='png',
                                    bbox_inches='tight')
                        plt.close()
                    if net.find(
                            'caps'
                    ) != -1:  # if the network is capsule/segcaps structure
                        yield ([img_batch, mask_batch],
                               [mask_batch, mask_batch * img_batch])
                    else:
                        yield (img_batch, mask_batch)

        if count != 0:
            if aug_data:
                img_batch[:count,
                          ...], mask_batch[:count, ...] = augmentImages(
                              img_batch[:count, ...], mask_batch[:count, ...])
            if net.find('caps') != -1:
                yield ([img_batch[:count, ...], mask_batch[:count, ...]], [
                    mask_batch[:count, ...],
                    mask_batch[:count, ...] * img_batch[:count, ...]
                ])
            else:
                yield (img_batch[:count, ...], mask_batch[:count, ...])
예제 #38
0
 def randx(self):
     return rand(self.NLPDim) * (self.UB - self.LB) + self.LB
예제 #39
0
def rand_noise(x):
	rad=(npr.rand()*2*math.pi)
	return (math.cos(rad)+1j*math.sin(rad))
예제 #40
0
def random_room_builder(
    wav_files: List[str],
    n_mics: int,
    mic_delta: Optional[float] = None,
    fs: float = 16000,
    t60_interval: Tuple[float, float] = (0.150, 0.500),
    room_width_interval: Tuple[float, float] = (6, 10),
    room_height_interval: Tuple[float, float] = (2.8, 4.5),
    source_zone_height: Tuple[float, float] = [1.0, 2.0],
    guard_zone_width: float = 0.5,
    seed: Optional[int] = None,
):
    """
    This function creates a random room within some parameters.

    The microphone array is circular with the distance between neighboring
    elements set to the maximal distance avoiding spatial aliasing.

    Parameters
    ----------
    wav_files: list of numpy.ndarray
        A list of audio signals for each source
    n_mics: int
        The number of microphones in the microphone array
    mic_delta: float, optional
        The distance between neighboring microphones in the array
    fs: float, optional
        The sampling frequency for the simulation
    t60_interval: (float, float), optional
        An interval where to pick the reverberation time
    room_width_interval: (float, float), optional
        An interval where to pick the room horizontal length/width
    room_height_interval: (float, float), optional
        An interval where to pick the room vertical length
    source_zone_height: (float, float), optional
        The vertical interval where sources and microphones are allowed
    guard_zone_width: float
        The minimum distance between a vertical wall and a source/microphone

    Returns
    -------
    ShoeBox object
        A randomly generated room according to the provided parameters
    float
        The measured T60 reverberation time of the room created
    """

    # save current numpy RNG state and set a known seed
    if seed is not None:
        rng_state = np.random.get_state()
        np.random.seed(seed)

    n_sources = len(wav_files)

    # sanity checks
    assert source_zone_height[0] > 0
    assert source_zone_height[1] < room_height_interval[0]
    assert source_zone_height[0] <= source_zone_height[1]
    assert 2 * guard_zone_width < room_width_interval[1] - room_width_interval[0]

    def random_location(
        room_dim, n, ref_point=None, min_distance=None, max_distance=None
    ):
        """ Helper function to pick a location in the room """

        width = room_dim[0] - 2 * guard_zone_width
        width_intercept = guard_zone_width

        depth = room_dim[1] - 2 * guard_zone_width
        depth_intercept = guard_zone_width

        height = np.diff(source_zone_height)[0]
        height_intercept = source_zone_height[0]

        locs = rand(3, n)
        locs[0, :] = locs[0, :] * width + width_intercept
        locs[1, :] = locs[1, :] * depth + depth_intercept
        locs[2, :] = locs[2, :] * height + height_intercept

        if ref_point is not None:
            # Check condition
            d = np.linalg.norm(locs - ref_point, axis=0)

            if min_distance is not None and max_distance is not None:
                redo = np.where(np.logical_or(d < min_distance, max_distance < d))[0]
            elif min_distance is not None:
                redo = np.where(d < min_distance)[0]
            elif max_distance is not None:
                redo = np.where(d > max_distance)[0]
            else:
                redo = []

            # Recursively call this function on sources to redraw
            if len(redo) > 0:
                locs[:, redo] = random_location(
                    room_dim,
                    len(redo),
                    ref_point=ref_point,
                    min_distance=min_distance,
                    max_distance=max_distance,
                )

        return locs

    c = pra.constants.get("c")

    # Create the room
    # Sometimes the room dimension and required T60 are not compatible, then
    # we just try again with new random values
    retry = True
    while retry:
        try:
            room_dim = np.array(
                [
                    rand() * np.diff(room_width_interval)[0] + room_width_interval[0],
                    rand() * np.diff(room_width_interval)[0] + room_width_interval[0],
                    rand() * np.diff(room_height_interval)[0] + room_height_interval[0],
                ]
            )
            t60 = rand() * np.diff(t60_interval)[0] + t60_interval[0]
            reflection, max_order = inv_sabine(t60, room_dim, c)
            retry = False
        except ValueError:
            pass
    # Create the room based on the random parameters
    room = pra.ShoeBox(room_dim, fs=fs, absorption=1 - reflection, max_order=max_order)

    # The critical distance
    # https://en.wikipedia.org/wiki/Critical_distance
    d_critical = 0.057 * np.sqrt(np.prod(room_dim) / t60)

    # default inter-mic distance is set according to nyquist criterion
    # i.e. 1/2 wavelength corresponding to fs / 2 at given speed of sound
    if mic_delta is None:
        mic_delta = 0.5 * (c / (0.5 * fs))

    # the microphone array is uniformly circular with the distance between
    # neighboring elements of mic_delta
    mic_center = random_location(room_dim, 1)
    mic_rotation = rand() * 2 * np.pi
    mic_radius = 0.5 * mic_delta / np.sin(np.pi / n_mics)
    mic_array = pra.MicrophoneArray(
        np.vstack(
            (
                pra.circular_2D_array(
                    mic_center[:2, 0], n_mics, mic_rotation, mic_radius
                ),
                mic_center[2, 0] * np.ones(n_mics),
            )
        ),
        room.fs,
    )
    room.add_microphone_array(mic_array)

    # Now we will get the sources at random
    source_locs = []

    # Choose the target location at least as far as the critical distance
    # Then the other sources, yet one further meter away
    source_locs = random_location(
        room_dim,
        n_sources,
        ref_point=mic_center,
        min_distance=d_critical,
        max_distance=d_critical + 1,
    )

    source_signals = wav_read_center(wav_files, seed=123)

    for s, signal in enumerate(source_signals):
        room.add_source(source_locs[:, s], signal=signal)

    # pre-compute the impulse responses
    room.compute_rir()

    # average the t60 of all the RIRs
    t60_actual = np.mean(
        [
            pra.experimental.measure_rt60(room.rir[0][0], room.fs)
            for mic_list in room.rir
            for rir in mic_list
        ]
    )

    # save the parameters
    room_params = {
        "shoebox_params": {
            "room_dim": room_dim.tolist(),
            "fs": fs,
            "absorption": 1 - reflection,
            "max_order": max_order,
        },
        "mics": mic_array.R.T.tolist(),
        "sources": {"locs": source_locs.T.tolist(), "signals": wav_files},
        "t60": t60_actual.tolist(),
    }

    # restore numpy RNG former state
    if seed is not None:
        np.random.set_state(rng_state)

    return room, room_params
예제 #41
0
        ksp_flow.solve(w.vector(), b_flow, annotate=annotate)

        solve(temp_eq[0] == temp_eq[1], T, temp_bcs, annotate=annotate)
        if t == 0:
            J += 0.5 * assemble(T * T * dx)
        T_.assign(T, annotate=annotate)
        #plot(T)

        t += timestep
    J += 0.5 * assemble(T * T * dx)
    return J, T


if __name__ == "__main__":

    # Run model
    T0_expr = "0.5*(1.0 - x[1]*x[1]) + 0.01*cos(pi*x[0]/l)*sin(pi*x[1]/h)"
    T0 = Expression(T0_expr, l=1.0, h=1.0, degree=1)
    ic = interpolate(T0, X)
    ic.rename("", "InitialCondition")
    J, T = main(ic, annotate=True)

    Jhat = ReducedFunctional(J, Control(ic))
    from numpy.random import rand, seed
    seed(21)
    p = Function(T.function_space())
    p.vector()[:] = 50 * rand(T.function_space().dim())
    results = taylor_to_dict(Jhat, ic, p)
    print(results)
    assert min(results["R1"]["Rate"]) > 1.85
예제 #42
0
파일: bpsk.py 프로젝트: senrya/tma-project
from numpy import sqrt
from numpy.random import rand, randn
import matplotlib.pyplot as plt

N = 5000000
EbNodB_range = range(0, 11)
itr = len(EbNodB_range)
ber = [None] * itr

for n in range(0, itr):

    EbNodB = EbNodB_range[n]
    EbNo = 10.0**(EbNodB / 10.0)
    x = 2 * (rand(N) >= 0.5) - 1
    noise_std = 1 / sqrt(2 * EbNo)
    y = x + noise_std * randn(N)
    y_d = 2 * (y >= 0) - 1
    errors = (x != y_d).sum()
    ber[n] = 1.0 * errors / N

    print("EbNodB:", EbNodB)
    print("Error bits:", errors)
    print("Error probability:", ber[n])

plt.plot(EbNodB_range, ber, 'bo', EbNodB_range, ber, 'k')
plt.axis([0, 10, 1e-6, 0.1])
plt.xscale('linear')
plt.yscale('log')
plt.xlabel('EbNo(dB)')
plt.ylabel('BER')
plt.grid(True)
import numpy as np
import pandas as pd
from numpy.random import rand

np.random.seed(101)
#creating an column pandas DataFrame using an numpy array.
print('pandas DataFrame')
df = pd.DataFrame(rand(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
print(df)

#adding row in to pandas.DataFrame
df.loc['F'] = [0.2345, 0.4567, 0.6789, 0.7890]
print(df)

#adding row in to pandas.DataFrame with same elemet
df.loc['G'] = '12345'

#droping row from pandas DataFrame
print('\nDrop row from DataFrame')
df.drop('G', axis=0)
print(df)

print('\nDrop row from DataFrame with explicitly specify to delete')
df.drop('G', axis=0, inplace=True)
print(df)
예제 #44
0
def random(size):
    return rand(*size)
예제 #45
0
def random_event(probability=0.5):
    if rand.rand() < probability:
        return True
    return False
예제 #46
0
def solve_via_Newtons_method(f_org,
                             x0,
                             maxStep,
                             grad_f=None,
                             x_tol=10**-6,
                             f_tol=None,
                             maxIt=100,
                             randomPertubationCount=2,
                             debugPrintLevel=0,
                             printF=toStdOut,
                             lineSearchIt=5,
                             record=False):
    '''
    determine the routes of a non-linear equation using netwons method.
    '''
    f = SearchAnalyticsWrapper(f_org) if record else f_org
    n = len(x0)
    x = numpy.array(x0)
    x_c = numpy.zeros(n) * numpy.nan
    x_prev = numpy.zeros([
        maxIt + 1, n
    ])  #used to check against cyclic behaviour, for randomPertubationCount
    x_prev[0, :] = x
    if grad_f == None:
        #grad_f = GradientApproximatorForwardDifference(f)
        grad_f = GradientApproximatorCentralDifference(f)
    if lineSearchIt > 0:
        f_ls = lambda x: norm(f(x))
    for i in range(maxIt):
        b = numpy.array(-f(x))
        singleEq = b.shape == () or b.shape == (1, )
        if debugPrintLevel > 0:
            printF('it %02i: norm(prev. step) %1.1e norm(f(x))  %1.1e' %
                   (i, norm(x_c), norm(-b)))
        if debugPrintLevel > 1:
            printF('  x    %s' % x)
            printF('  f(x) %s' % (-b))
        if norm(x_c) <= x_tol:
            break
        if f_tol != None:
            if singleEq and abs(b) < f_tol:
                break
            elif singleEq == False and all(abs(b) < f_tol):
                break
        if not isinstance(grad_f, GradientApproximatorForwardDifference):
            A = grad_f(x)
        else:
            A = grad_f(x, f0=-b)
        if len(A.shape) == 1:  #singleEq
            A = numpy.array([A])
            b = numpy.array([b])
        try:
            x_c, residuals, rank, s = numpy.linalg.lstsq(A, b)
        except ValueError as e:
            printF(
                '  solve_via_Newtons_method numpy.linalg.lstsq failed: %s.  Setting x_c = x'
                % str(e))
            x_c = x
        if debugPrintLevel > 1:
            if singleEq:
                printF('  grad_f : %s' % A)
            else:
                printF('  grad_f :')
                prettyPrintArray(A, printF, '    ')
            printF('  x_c    %s' % x_c)
        r = abs(x_c / maxStep)
        if r.max() > 1:
            x_c = x_c / r.max()
        if lineSearchIt > 0:
            #x_next = goldenSectionSearch( f_ls, x, norm(b), x_c, lineSearchIt, lineSearchIt_x0, debugPrintLevel, printF )
            x_next = quadraticLineSearch(f_ls,
                                         x,
                                         norm(b),
                                         x_c,
                                         lineSearchIt,
                                         debugPrintLevel - 2,
                                         printF,
                                         tol_x=x_tol)
            x_c = x_next - x
        x = x + x_c
        if randomPertubationCount > 0:  #then peturb as to avoid lock-up [i.e jam which occurs when trying to solve axis direction constraint]
            distances = ((x_prev[:i + 1, :] - x)**2).sum(axis=1)
            #print(distances)
            if any(distances <= x_tol):
                if debugPrintLevel > 0:
                    printF(
                        ' any(distances < x_tol) therefore randomPertubation...'
                    )
                x_p = (0.5 -
                       rand(n)) * numpy.array(maxStep) * (1 - i * 1.0 / maxIt)
                x = x + x_p
                x_c = x_c + x_p
                randomPertubationCount = randomPertubationCount - 1
            x_prev[i, :] = x
    return x
예제 #47
0
    def action_callback(self, state):
        ''''''
        if self.last_state is None:
            new_action = npr.rand() < 0.1
        else:
            '''
            # 0. Running mean estimates of Gravity, Speed and Impulse
            '''
            #trim running list if it is too long
            if len(self.speedList) > self.estlen:
                self.speedList.pop(0)
            if len(self.graviList) > self.estlen:
                self.graviList.pop(0)
            if len(self.impulList) > self.estlen:
                self.impulList.pop(0)

            # estimate speed using distance to tree.
            # (Using old estimate during transition period)
            if state["tree"]["dist"] > 0 and self.last_state["tree"]["dist"] > 0:
                self.speedList.append(
                    self.last_state["tree"]["dist"] - state["tree"]["dist"]
                )

            # estimate gravity using velocity
            # (Using old estimate if previous action is 1)
            if self.last_action is False:
                self.graviList.append(
                    state["monkey"]["vel"] - self.last_state["monkey"]["vel"]
                )
            else:
                self.impulList.append(state["monkey"]["vel"])

            speed = np.mean(self.speedList)
            gravity = np.mean(self.graviList)
            impulse = np.mean(self.impulList)
            '''
            print str(speed) + "\t" + str(gravity) + "\t" + str(impulse) +\
                    "\t" + str(state["tree"]["dist"]) +\
                  "\t" + str(state["tree"]["dist"]/speed)
            '''

            '''
            # 2. Identify state, then update Q matrix and learning time
            '''
            if speed is None:
                speed = 25
            idx_x_old, idx_y_old, idx_p_old, idx_v_old = \
                self.state_index(self.last_state, speed)
            idx_x_new, idx_y_new, idx_p_new, idx_v_new = \
                self.state_index(state, speed)

            '''
            print str(idx_x) + ":" + str(state_x) + "\t" + \
                  str(idx_p) + ":" + str(state_p)
            '''
            # update state and learn time
            a_old = int(self.last_action)
            Q_old = self.Q[idx_x_old, idx_y_old, idx_p_old, idx_v_old, a_old]
            R_new = self.last_reward
            Q_max = np.max(self.Q[idx_x_new, idx_y_new, idx_p_new, idx_v_new])
            Q_new = Q_old + self.learn * (R_new + self.disct * Q_max - Q_old)

            self.Q[idx_x_old, idx_y_old, idx_p_old, idx_v_old, a_old] = Q_new

            self.learnTime[idx_x_old, idx_y_old, idx_p_old, idx_v_old] += 1

            '''
            # 3. select optimal policy
            '''
            # epsilon greedy
            k = float(self.learnTime[idx_x_new, idx_y_new, idx_p_new, idx_v_new])
            if k == 0:
                #random action if haven't learned this state before
                new_action = (npr.rand() < 0.1)
            else:
                decision_epsilon = \
                    np.argmax(npr.multinomial(1, [1 - 1.0 / (2 * k), 1.0 / (2 * k)]))
                decision_optimal = \
                    np.argmax(self.Q[idx_x_new, idx_y_new, idx_p_new, idx_v_new])
                new_action_num = np.abs(decision_epsilon - decision_optimal)
                new_action = bool(new_action_num)

            print "(" + str(int(idx_x_new)) + "\t" + str(int(idx_y_new)) + "\t" + \
                  str(int(idx_p_new)) + "\t" + str(int(idx_v_new)) + ")" + "\t" + \
                    str(k) +\
                    "\t" + "Action:" + str(new_action) + " \t" +\
                    str(round(self.Q[idx_x_new, idx_y_new, idx_p_new,
                                     idx_v_new, new_action],   3)) +\
                    " vs. " +\
                    str(round(self.Q[idx_x_new, idx_y_new, idx_p_new,
                                     idx_v_new, 1-new_action], 3))

        self.last_action = new_action
        self.last_state  = state

        """
        '''
        4. Random action Backup
        '''
        new_action = npr.rand() < 0.1
        new_state  = state

        self.last_action = new_action
        self.last_state  = new_state

        """

        return self.last_action
예제 #48
0
# Add variables
od_in = copy.copy(oceandatasets['MITgcm_rect_nc'])
od_in = od_in.subsample.cutout(timeRange=od_in.dataset['time'].isel(time=0))

# Add random values
varNeeded = [
    'Temp', 'S', 'HFacC', 'HFacW', 'HFacS', 'rAz', 'rA', 'rAw', 'rAs', 'dyC',
    'dxC', 'dxF', 'dyF', 'dxG', 'dyG', 'dxV', 'dyU', 'drF', 'drC', 'U', 'V',
    'W', 'Depth'
]
ds_dict = {}
for name, dimList in MITgcmVarDims.items():
    if name not in varNeeded: continue
    dimSize = [len(od_in.dataset[dim]) for dim in dimList]
    ds_dict[name] = xr.DataArray(rand(*dimSize), dims=dimList)
ds_in = xr.Dataset(ds_dict)
od_in = od_in.merge_into_oceandataset(ds_in)


@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.parametrize("subs", [None, 'survey', 'mooring'])
@pytest.mark.parametrize("colorName", [None, 'Temp', 'U'])
@pytest.mark.parametrize("meanAxes", [None, 'Z'])
def test_TS_diagram(subs, meanAxes, colorName):

    if subs is None:
        od2plot = od_in

    elif subs in ['survey', 'mooring']:
        # Get random coords
예제 #49
0
import os
import sys
import numpy as np
from numpy import random as rand

# 均匀分布,[0,1)
a = rand.rand(3)  # array([ 0.3482032 ,  0.8529326 ,  0.29806286])
"""array([[ 0.16264963,  0.47026467,  0.63152363],
        [ 0.16287412,  0.24696563,  0.71448236]]) """
a = rand.rand(2, 3)

a = rand.randn(2, 3)  # 2*3纬度的正态分布

# randint(low[, high, size])
a = rand.randint(3, 6)  # 3到5之间的一个数,6不包含
"""
array([[6, 6],
       [3, 2],
       [5, 3]])
"""
a = rand.randint(1, 10, size=[3, 2])
"""
每项都在0,1之间, 连续的均匀分布,看文档也不知道和rand()有啥区别
array([[ 0.66246962,  0.03469166,  0.9865776 ],
       [ 0.47714682,  0.87002386,  0.98806874]])
"""
a = rand.random_sample(size=(2, 3))

# sklearn的测试数据
import sklearn as sklearn
import matplotlib.pyplot as plt
예제 #50
0
def main():
    """Generate fixture data."""
    n = np.round(rand(1000) * 100.0)
    p = rand(1000)
    gen(n, p, "data.json")
def _jointly_reduce_data(data1, data2, chunksize):
    lnvox = data1.shape[0]
    aux = np.argsort(rand(lnvox))[:np.minimum(chunksize, lnvox)]
    rdata1 = data1[aux]
    rdata2 = data2[aux]
    return rdata1, rdata2
예제 #52
0
def force_training(target, rlms_start, rlms_stop):
    """NETWORK PARAMETERS"""
    dt = 0.00005
    N = 2000  # Number of neurons
    tref = 0.002  # Refractory time constant in seconds
    tm = 0.01  # Membrane time constant
    vreset = -65  # Voltage reset
    vpeak = -40  # Voltage peak
    td = 0.02  # Synaptic decay time constant
    tr = 0.002  # Synaptic rise time constant
    p = 0.1  # Set the network sparsity
    BIAS = vpeak  # Set the BIAS current, can help decrease/increase firing rates.
    g = 0.04  # Factor of fixed weight matrix
    nt = target.shape[0]
    """RLMS PARAMETERS"""
    m = 2
    alpha = dt * 0.1  # Sets the rate of weight change
    Pinv = np.eye(
        N) * alpha  # Initialize the correlation weight matrix for RLMS
    # RECB = np.zeros((nt, 5))  # Storage matrix for some synaptic weights
    # The initial weight matrix with fixed random weights
    BPhi = np.zeros(
        (N, m))  # The initial matrix that will be learned by FORCE method
    w = normalize_weights(N, p, g)

    step = 30  # Interval of RLMS in indices
    Q = 10
    E = (2 * rand(N, m) - 1) * Q
    """PREINITIALIZE STORAGE"""
    IPSC = np.zeros(N)  # Post synaptic current storage variable
    hm = np.zeros(N)  # Storage variable for filtered firing rates
    r = np.zeros(N)  # Second storage variable for filtered rates
    hr = np.zeros(N)  # Third variable for filtered rates
    JD = 0 * IPSC  # Storage variable required for each spike time
    z = np.zeros(m)  # Initialize the approximant
    err = np.zeros(m)
    tlast = np.zeros((N))  # This vector is used to set  the refractory times
    v = vreset + rand(N) * (30 - vreset)  # Initialize neuron voltage
    #REC2 = np.zeros((nt, 5))
    REC = np.zeros((nt, 5))
    current = np.zeros((nt, m))
    """START INTEGRATION LOOP"""
    for i in range(0, nt, 1):
        # print(i)
        inp = IPSC + E @ z + BIAS  # Total input current

        # Voltage equation with refractory period
        dv = (dt * i > tlast + tref) * (-v + inp) / tm
        v = v + dt * dv

        index = np.argwhere(find_spikes(
            v, vpeak))[:, 0]  # Find the neurons that have spiked

        # Store spike times, and get the weight matrix column sum of spikers
        if len(index) > 0:
            # Compute the increase in current due to spiking
            JD = w[:, index].sum(axis=1)
        else:
            JD = 0 * IPSC

        # Used to set the refractory period of LIF neurons
        tlast = tlast + (dt * i - tlast) * find_spikes(v, vpeak)

        IPSC = IPSC * np.exp(-dt / tr) + hm * dt

        # Integrate the current
        hm = hm * np.exp(-dt / td) + JD * (len(index) > 0) / (tr * td)

        r = r * np.exp(-dt / tr) + hr * dt
        hr = hr * np.exp(-dt / td) + find_spikes(v, vpeak) / (tr * td)

        # Implement RLMS with the FORCE method
        z = BPhi.T @ r  # Approximant
        err = z - target[i]  # Error
        # Only adjust at an interval given by step
        if np.mod(i, step) == 1:
            # Only adjust between rlmst_start and rlms_stop
            if (i > rlms_start) and (i < rlms_stop):
                cd = Pinv @ r
                BPhi = BPhi - (
                    cd.reshape(cd.shape[0], 1) @ err.reshape(1, err.shape[0]))
                Pinv = Pinv - (cd.reshape(cd.shape[0], 1) @ cd.reshape(
                    1, cd.shape[0])) / (1 + (r @ cd))

        v = v + (30 - v) * find_spikes(v, vpeak)

        REC[i, :] = v[0:5]  # Record a random voltage

        # Reset with spike time interpolant implemented
        v = v + (vreset - v) * find_spikes(v, vpeak)
        current[i] = z
        #RECB[i, :] = BPhi[0:5]
        #REC2[i, :] = r[0:5]

    return current, REC
예제 #53
0
def gbar(ax, x, y, width=0.5, bottom=0):
    X = [[.6, .6], [.7, .7]]
    for left, top in zip(x, y):
        right = left + width
        ax.imshow(X,
                  interpolation='bicubic',
                  cmap=cm.Blues,
                  extent=(left, right, bottom, top),
                  alpha=1)


fig = figure()

xmin, xmax = xlim = 0, 10
ymin, ymax = ylim = 0, 1
ax = fig.add_subplot(111, xlim=xlim, ylim=ylim, autoscale_on=False)
X = [[.6, .6], [.7, .7]]

ax.imshow(X,
          interpolation='bicubic',
          cmap=cm.copper,
          extent=(xmin, xmax, ymin, ymax),
          alpha=1)

N = 10
x = arange(N) + 0.25
y = rand(N)
gbar(ax, x, y, width=0.7)
ax.set_aspect('normal')
show()
def beacon_decision_default(selected_minters, percentage):
    #default function simply decide to a checkpoint when the majority is reached

    agreement_reached = False
    proposed_checkpoint = None
    num_selected = len(selected_minters)
    #the number of nodes that need to agree. Make sure it is an odd number.
    agreed_required = percentage
    if agreed_required % 2 == 0:
        agreed_required += 1

    #this matrix holds information on which minters agree
    match_matrix = np.zeros([num_selected, num_selected])

    #check all the checkpoints between the minters
    for counter1, s1 in enumerate(selected_minters):
        for counter2, s2 in enumerate(selected_minters):
            if s1 != s2:
                if s1.proposed_checkpoint == s2.proposed_checkpoint:
                    match_matrix[counter1, counter2] = 1
                else:
                    #set1=set(s1.proposed_checkpoint.transactions)
                    #set2=set(s2.proposed_checkpoint.transactions)

                    #if set1.issubset(set2) or set2.issubset(set1):
                    #   match_matrix[counter1,counter2]=1
                    pass

    num_matches = match_matrix.sum(axis=1) / len(match_matrix) * 1.0
    for i in range(num_selected):
        if num_matches[i] >= agreed_required:
            proposed_checkpoint = selected_minters[i].proposed_checkpoint
            agreement_reached = True
            break

    speeds = []
    ids = []
    for s in selected_minters:
        speeds.append(s.speed)
        ids.append(s.unique_id)

    success_df = pd.DataFrame({
        'id': ids,
        'num_matches': num_matches,
        'speed': speeds
    })

    #keep only potential winners
    winners = success_df['num_matches'] >= percentage
    success_df['could_be_winners'] = winners
    success_df = success_df[success_df['could_be_winners'] == True]

    required_num = int(np.ceil(len(selected_minters) * percentage))
    if len(success_df) >= required_num:
        success_df.sort_values('speed', ascending=False, inplace=True)
        #choose the minters that sent the message fast based on speed
        success_df.speed = success_df.speed / (success_df.speed.max() +
                                               2 * np.abs(np.random.randn()))
        success_df['winner'] = False

        #write a while loop to find the winners!
        index = 0
        patience = 0
        while success_df.winner.sum() < required_num:
            print('trying to find winners iteration number ' + str(index))
            if rand() < success_df.speed.values[index]:
                success_df.ix[index, 'winner'] = True
            index += 1
            if index >= len(success_df) - 1:
                index = 0

            patience += 1
            #if this process cannot generate winners randomly, then all of the minters are the winners
            #this is an arbitrary rule so that the simulation does not get stuck
            if patience > len(selected_minters) * 10:
                success_df['winner'] = True

    return agreement_reached, proposed_checkpoint
예제 #55
0
    from numpy.random import rand

    ### for plotting
    if make_plots:
        from matplotlib.pyplot import figure, plot, minorticks_on, show, xlabel, ylabel, close
        from matplotlib.pyplot import gca, subplots_adjust, legend, savefig, title, tight_layout

    ### for saving data
    try_mkdir(tmm_spectra_dir)

    for angle_index in range(len(angle_list)):

        angle = angle_list[angle_index]

        T_list = spectra[:, angle_index *
                         2] + noise * (rand(len(lamda_list)) - 0.5)
        R_list = spectra[:, angle_index * 2 +
                         1] + noise * (rand(len(lamda_list)) - 0.5)
        A_list = 1.0 - T_list - R_list

        ##### just plotting now!
        file_name = tmm_spectra_dir + '%i_deg_spectra.txt' % angle
        savetxt(file_name,
                array([lamda_list, T_list, R_list, A_list]).T,
                header='# lamda T R A')

        if make_plots:
            figure(figsize=(3.2, 2.5), dpi=220.0 * 2 /
                   3)  # put your screen dpi in here to get in-print size

            plot(lamda_list,
def _optim_hparcel(feature,
                   domain,
                   graphs,
                   nb_parcel,
                   lamb=1.,
                   dmax=10.,
                   niter=5,
                   initial_mask=None,
                   chunksize=1.e5,
                   verbose=0):
    """ Core function of the heirrachical parcellation procedure.

    Parameters
    ----------
    feature: list of subject-related feature arrays
    Pa : parcellation instance that is updated
    graphs: graph that represents the topology of the parcellation
    anat_coord: array of shape (nvox,3) space defining set of coordinates
    nb_parcel: int
               the number of desrired parcels
    lamb=1.0: parameter to weight position
              and feature impact on the algorithm
    dmax = 10: locality parameter (in the space of anat_coord)
              to limit surch volume (CPU save)
    chunksize = int, optional
    niter = 5: number of iterations in the algorithm
    verbose=0: verbosity level

    Returns
    -------
    U: list of arrays of length nsubj
       subject-dependent parcellations
    Proto_anat: array of shape (nvox) labelling of the common space
                (template parcellation)
    """
    nb_subj = len(feature)

    # a1. perform a rough clustering of the data to make prototype
    indiv_coord = np.array(
        [domain.coord[initial_mask[:, s] > -1] for s in range(nb_subj)])
    reduced_anat, reduced_feature = _reduce_and_concatenate(
        indiv_coord, feature, chunksize)

    _, labs, _ = kmeans(reduced_feature, nb_parcel, Labels=None, maxiter=10)
    proto_anat = [
        np.mean(reduced_anat[labs == k], 0) for k in range(nb_parcel)
    ]
    proto_anat = np.array(proto_anat)
    proto = [np.mean(reduced_feature[labs == k], 0) for k in range(nb_parcel)]
    proto = np.array(proto)

    # a2. topological model of the parcellation
    # group-level part
    spatial_proto = Field(nb_parcel)
    spatial_proto.set_field(proto_anat)
    spatial_proto.voronoi_diagram(proto_anat, domain.coord)
    spatial_proto.set_gaussian(proto_anat)
    spatial_proto.normalize()

    for git in range(niter):
        LP = []
        LPA = []
        U = []
        Energy = 0
        for s in range(nb_subj):
            # b.subject-specific instances of the model
            # b.0 subject-specific information
            Fs = feature[s]
            lac = indiv_coord[s]
            target = proto_anat.copy()
            lseeds = np.zeros(nb_parcel, np.int)
            aux = np.argsort(rand(nb_parcel))
            toto = np.zeros(lac.shape[0])
            for j in range(nb_parcel):
                # b.1 speed-up :only take a small ball
                i = aux[j]
                dx = lac - target[i]
                iz = np.nonzero(np.sum(dx**2, 1) < dmax**2)
                iz = np.reshape(iz, np.size(iz))
                if np.size(iz) == 0:
                    iz = np.array([np.argmin(np.sum(dx**2, 1))])

                # b.2: anatomical constraints
                lanat = np.reshape(lac[iz],
                                   (np.size(iz), domain.coord.shape[1]))
                pot = np.zeros(np.size(iz))
                JM, rmin = _exclusion_map(i, spatial_proto, target, lanat)
                pot[JM < 0] = np.inf
                pot[JM >= 0] = -JM[JM >= 0]

                # b.3: add feature discrepancy
                df = Fs[iz] - proto[i]
                df = np.reshape(df, (np.size(iz), proto.shape[1]))
                pot += lamb * np.sum(df**2, 1)

                # b.4: solution
                if np.sum(np.isinf(pot)) == np.size(pot):
                    pot = np.sum(dx[iz]**2, 1)

                sol = iz[np.argmin(pot)]
                target[i] = lac[sol]
                lseeds[i] = sol
                toto[sol] = 1

            if verbose > 1:
                jm = _field_gradient_jac(spatial_proto, target)
                print(jm.min(), jm.max(), np.sum(toto > 0))

            # c.subject-specific parcellation
            g = graphs[s]
            f = Field(g.V, g.edges, g.weights, Fs)
            U.append(f.constrained_voronoi(lseeds))

            Energy += np.sum((Fs - proto[U[-1]]) ** 2) / \
                np.sum(initial_mask[:, s] > - 1)
            # recompute the prototypes
            # (average in subject s)
            lproto = [np.mean(Fs[U[-1] == k], 0) for k in range(nb_parcel)]
            lproto = np.array(lproto)
            lproto_anat = np.array(
                [np.mean(lac[U[-1] == k], 0) for k in range(nb_parcel)])

            LP.append(lproto)
            LPA.append(lproto_anat)

        # recompute the prototypes across subjects
        proto_mem = proto.copy()
        proto = np.mean(np.array(LP), 0)
        proto_anat = np.mean(np.array(LPA), 0)
        displ = np.sqrt(np.sum((proto_mem - proto)**2, 1).max())
        if verbose:
            print('energy', Energy, 'displacement', displ)

        # recompute the topological model
        spatial_proto.set_field(proto_anat)
        spatial_proto.voronoi_diagram(proto_anat, domain.coord)
        spatial_proto.set_gaussian(proto_anat)
        spatial_proto.normalize()

        if displ < 1.e-4 * dmax:
            break
    return U, proto_anat
예제 #57
0
import numpy as np
from numpy.random import randn, rand
import matplotlib.pyplot as plt

plt.clf()
plt.cla()
plt.close()

mu = 0.0001
n = 3
w = rand(n)
x = np.zeros(n)
epochs = 300

x1 = np.array([7, 9, 11.5, 14, 18, 25, 35.5], dtype=float).T #peso
x2 = np.array([0.4, 0.75, 1.5, 2.5, 4.5, 7.5, 10.5], dtype=float).T #promedio edad
yr = np.array([4.0, 5.0, 7.0, 9.0, 10.0, 14.0, 20.0], dtype=float).T #dosis
yn = np.zeros(np.size(yr)).T
e = np.zeros(np.size(yr)).T

# sum of square error (suma del error cuadratico)
SSE = np.zeros(epochs)

for epoch in range(0, epochs):
    for k in range(0, np.size(yr)):  # ciclo para cada sample k de los datos de entrenamiento hasta el (tamano de yr)-1
        x = np.array([1, x1[k], x2[k]]).T  # generamos cada vector de x en cada sample k
        yn[k] = 0
        for i in range(len(w) - 1):
            yn[k] = yn[k] + w[i] * x[i]
        e[k] = yr[k] - yn[k]
        # w = mu * e[k] * x + w  # actualizacion de los pesos sample by sample (u online learning)
예제 #58
0
def subsample(dataset, ratio):
    n_sample = int64(floor(len(dataset) * ratio))
    idx_w = list(map(lambda x: rand(), range(dataset.shape[0])))
    idx_s = argsort(idx_w)
    sample = vstack(map(lambda x: dataset[idx_s[x], :], range(n_sample)))
    return sample
예제 #59
0
arr[::2,::2]
arr1=np.arange(0,80,10)
l=[1,2,3,5,6,7] 
a=np.array(l)  
l1=[[1,2,3],[11,12,13],[21,22,23]] 
a1=np.array(l1)  
a2=np.arange(0,11,2) 
a3=np.zeros(3) 
a4=np.ones((3,2))  
a5=np.linspace(0,20,50)
a6=a5.reshape(10,5)
a6.shape
aa=np.full((3,4),12) 
from numpy import random
random.randint(4,56)
arand=random.rand(3,5)
arn=random.randn(4,4)
a6=np.eye(4) 
a7=np.random.rand(5) #uniform dist a8=np.random.randn(2) 
a_7=np.random.rand(5,4)
arr1=np.random.randint(5,100,4)
arr1.max() 
arr1.min() 
a1.max()
arr1.sum()
a1.sum()
a1.sum(axis=0)
"""
array([33, 36, 39])
"""
#index location of max n min  
예제 #60
0
파일: SwingyMonkey.py 프로젝트: awasay/p4
    def game_loop(self):
        '''This is called every game tick.  You call this in a loop
        until it returns false, which means you hit a tree trunk, fell
        off the bottom of the screen, or jumped off the top of the
        screen.  It calls the action and reward callbacks.'''

        # Render the background.
        self.screen.blit(self.background_img, (self.iter, 0))
        if self.iter < self.background_img.get_width() - self.screen_width:
            self.screen.blit(self.background_img,
                             (self.iter + self.background_img.get_width(), 0))

        # Perhaps generate a new tree.
        if self.next_tree <= 0:
            self.next_tree = self.tree_img.get_width() * 5 + int(
                npr.geometric(1.0 / self.tree_mean))
            self.trees.append({
                'x':
                self.screen_width + 1,
                'y':
                int((0.3 + npr.rand() * 0.65) *
                    (self.screen_height - self.tree_gap)),
                's':
                False
            })
        # Process input events.
        for event in pg.event.get():
            if event.type == pg.QUIT:
                sys.exit()
            elif self.action_fn is None and event.type == pg.KEYDOWN:
                self.vel = npr.poisson(self.impulse)
                self.hook = self.screen_width

        # Perhaps take an action via the callback.
        if self.action_fn is not None and self.action_fn(self.get_state()):
            self.vel = npr.poisson(self.impulse)
            self.hook = self.screen_width

        # Eliminate trees that have moved off the screen.
        self.trees = filter(lambda x: x['x'] > -self.tree_img.get_width(),
                            self.trees)

        # Monkey dynamics
        self.monkey_loc -= self.vel
        self.vel -= self.gravity

        # Current monkey bounds.
        monkey_top = self.monkey_loc - self.monkey_img.get_height() / 2
        monkey_bot = self.monkey_loc + self.monkey_img.get_height() / 2

        # Move trees to the left, render and compute collision.
        self.next_tree -= self.horz_speed
        edge_hit = False
        tree_hit = False
        pass_tree = False
        for tree in self.trees:
            tree['x'] -= self.horz_speed

            # Render tree.
            self.screen.blit(self.tree_img, (tree['x'], self.tree_offset))

            # Render gap in tree.
            self.screen.blit(self.background_img, (tree['x'], tree['y']),
                             (tree['x'] - self.iter, tree['y'],
                              self.tree_img.get_width(), self.tree_gap))
            if self.iter < self.background_img.get_width() - self.screen_width:
                self.screen.blit(
                    self.background_img, (tree['x'], tree['y']),
                    (tree['x'] - (self.iter + self.background_img.get_width()),
                     tree['y'], self.tree_img.get_width(), self.tree_gap))

            #trunk_left  = tree['x'] + 215
            trunk_left = tree['x']
            #trunk_right = tree['x'] + 290
            trunk_right = tree['x'] + self.tree_img.get_width()
            trunk_top = tree['y']
            trunk_bot = tree['y'] + self.tree_gap

            # Compute collision.
            if (((trunk_left <
                  (self.monkey_left + 15)) and (trunk_right >
                                                (self.monkey_left + 15)))
                    or ((trunk_left < self.monkey_right) and
                        (trunk_right > self.monkey_right))):
                #pg.draw.rect(self.screen, (255,0,0), (trunk_left, trunk_top, trunk_right-trunk_left, trunk_bot-trunk_top), 1)
                #pg.draw.rect(self.screen, (255,0,0), (self.monkey_left+15, monkey_top, self.monkey_img.get_width()-15, monkey_bot-monkey_top), 1)
                if (monkey_top < trunk_top) or (monkey_bot > trunk_bot):
                    tree_hit = True

            # Keep score.
            if not tree['s'] and (self.monkey_left + 15) > trunk_right:
                tree['s'] = True
                self.score += 1
                pass_tree = True
                if self.sound:
                    self.blop_snd.play()

        # Monkey swings down on a vine.
        if self.vel < 0:
            pg.draw.line(self.screen, (92, 64, 51),
                         (self.screen_width / 2 + 20, self.monkey_loc - 25),
                         (self.hook, 0), 4)

        # Render the monkey.
        self.screen.blit(self.monkey_img, (self.monkey_left, monkey_top))

        # Fail on hitting top or bottom.
        if monkey_bot > self.screen_height or monkey_top < 0:
            edge_hit = True

        # Render the score
        score_text = self.font.render("Score: %d" % (self.score), 1,
                                      (230, 40, 40))
        self.screen.blit(score_text, score_text.get_rect())

        if self.text is not None:
            text = self.font.render(self.text, 1, (230, 40, 40))
            textpos = text.get_rect()
            self.screen.blit(
                text,
                (self.screen_width - textpos[2], 0, textpos[2], textpos[3]))

        # Render the display.
        pg.display.update()

        # If failed, play sound and exit.  Also, assign rewards.
        if edge_hit:
            if self.sound:
                ch = self.screech_snd.play()
                while ch.get_busy():
                    pg.time.delay(500)
            if self.reward_fn is not None:
                self.reward_fn(self.edge_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False
        if tree_hit:
            if self.sound:
                ch = self.screech_snd.play()
                while ch.get_busy():
                    pg.time.delay(500)
            if self.reward_fn is not None:
                self.reward_fn(self.tree_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False

        if self.reward_fn is not None:
            if pass_tree:
                self.reward_fn(self.tree_reward)
            else:
                self.reward_fn(0.0)

        # Wait just a bit.
        pg.time.delay(self.tick_length)

        # Move things.
        self.hook -= self.horz_speed
        self.iter -= self.horz_speed
        if self.iter < -self.background_img.get_width():
            self.iter += self.background_img.get_width()

        return True