def __init__(self,
                 num_inputs,
                 num_outputs,
                 activation_fn="tanh",
                 initial_wt_max=0.01,
                 weights=None,
                 bias=None):
        self.activation_fn = activation_fn
        self.num_outputs = num_outputs
        self.num_inputs = num_inputs

        if weights is None:
            weights = get_array(
                matlib.rand((num_outputs, num_inputs)) * 2 *
                initial_wt_max) - initial_wt_max

        if bias is None:
            if self.activation_fn == "relu":
                # enforce positive
                bias = np.ones((num_outputs, 1)) * initial_wt_max
            else:
                bias = get_array(
                    matlib.rand((num_outputs, 1)) * 2 *
                    initial_wt_max) - initial_wt_max

        self.initial_wt_max = initial_wt_max
        self.weights = weights
        self.bias = bias

        #Force creation of best weights\bias
        self.save_state()

        assert self.num_inputs == self.weights.shape[1]
        assert self.num_outputs == self.weights.shape[0]
        assert self.num_outputs == self.bias.shape[0]
    def __init__(self, num_inputs, num_outputs, activation_fn="tanh", initial_wt_max=0.01, weights=None, bias=None):
        self.activation_fn = activation_fn
        self.num_outputs = num_outputs
        self.num_inputs = num_inputs

        if weights is None:
            weights = get_array(matlib.rand((num_outputs, num_inputs)) * 2 * initial_wt_max) - initial_wt_max

        if bias is None:
            if self.activation_fn == "relu":
                # enforce positive
                bias = np.ones((num_outputs, 1)) * initial_wt_max
            else:
                bias = get_array(matlib.rand((num_outputs, 1)) * 2 * initial_wt_max) - initial_wt_max

        self.initial_wt_max = initial_wt_max
        self.weights        = weights
        self.bias           = bias

        #Force creation of best weights\bias
        self.save_state()

        assert self.num_inputs == self.weights.shape[1]
        assert self.num_outputs == self.weights.shape[0]
        assert self.num_outputs == self.bias.shape[0]
示例#3
0
def test_random_matrix():
    n = 2
    k = 10
    A = -matlib.rand(n, n)
    x0 = 1e3 * matlib.rand(n).T
    a = matlib.zeros(n).T
    T = 1e-2

    for i in range(n):
        A[i, i] *= k
    print("A\n", A)

    x = compute_x_T(A, a, x0, T)
    print('x(0)=         ', x0.T)
    print('x=            ', x.T)

    # compute approximate x
    x_a = matlib.zeros(n).T
    for i in range(n):
        Ai = A[i:i + 1, i:i + 1]
        x_a[i, 0] = expm(T * Ai) * x0[i, 0]
    print('approximate x=', x_a.T)

    a = A * x0
    for i in range(n):
        Ai = A[i:i + 1, i:i + 1]
        ai = a[i, 0] - Ai * x0[i, 0]
        xi = compute_x_T(Ai, ai, x0[i, 0], T)
        x_a[i, 0] = xi[0, 0]
    print('approximate x=', x_a.T)
示例#4
0
def alternating_min(T=1000):
    B = np.matrix(rand(n, r))
    C = np.matrix(rand(r, n))
    eta_C = 0.0001
    eta_B = 0.0001
    err = []
    for t in range(T):
        B -= eta_B * grad_B(B, C)
        C -= eta_C * grad_C(B, C)
        err.append(loss(B, C))
    return B, C, err
    def __init__(self, num_inputs, num_hidden, learning_rate = 0.1,
                 activation_fns = ("relu", "tanh"),
                 initial_wt_max = 0.01, weight_decay = 0.0, desired_sparsity = 0.05, sparsity_wt = 0.00,
                 w1_b1 = None, w2_b2 = None):
        '''
        num_inputs = number of inputs \ outputs
        num_hidden = size of the hidden layer
        activation_fn = activation function to use ("sigmoid" | "tanh" | "linear" | "relu")
        initial_wt_max = the initial weights will be set to random weights in the range -initial_wt_max to +initial_wt_max
        weight_decay = a regularization term to stop over-fitting. Only turn on if network converges too fast or overfits the data
        
        w1_b1 are a tuple of weight matrix 1 and bias 1
        w2_b2 are a tuple of weight matrix 2 and bias 2
            This allows weight sharing between networks
        
        '''
        """ Properties """
        self.learning_rate = learning_rate
        self.activation_fns = activation_fns
        self.num_inputs = num_inputs
        self.num_hidden = num_hidden

        """ An auto-encoder """
        num_outputs = num_inputs
        self.num_outputs = num_outputs
        self.initial_wt_max = initial_wt_max
        self.weight_decay = weight_decay
        self.desired_sparsity = desired_sparsity
        self.sparsity_wt = sparsity_wt
        """ END Properties """
        
        if w1_b1 == None:
            self.w1 = matlib.rand((num_hidden, num_inputs)) * initial_wt_max
            self.b1 = matlib.rand((1,num_hidden)) * initial_wt_max
        else:
            self.w1 = w1_b1[0]
            self.b1 = w1_b1[1]
        
        assert self.w1.shape == (num_hidden, num_inputs)
        assert self.b1.shape == (1, num_hidden)
        
        if w2_b2 == None:
            self.w2 = matlib.rand((num_outputs, num_hidden)) * initial_wt_max
            self.b2 = matlib.rand((1, num_outputs)) * initial_wt_max
        else:
            self.w2 = w2_b2[0]
            self.b2 = w2_b2[1]
        
        assert self.w2.shape == (num_outputs, num_hidden)
        assert self.b2.shape == (1, num_outputs)

        pass
示例#6
0
    def __init__(self, hidden_layer_neurons, alpha, max_error, max_epoch_amount,  _lambda=1):
        self.alpha = alpha
        self._lambda = _lambda
        self.maxError = max_error
        self.maxEpochAmount = max_epoch_amount
        self.hiddenLayerNeurons = hidden_layer_neurons
        self.outputLayerNeurons = 4
        self.hiddenLayerWeights = m.rand(hidden_layer_neurons, 24)
        self.outputLayerWeights = m.rand(self.outputLayerNeurons, hidden_layer_neurons)

        self.trainingSet = trainingSet.trainingSet
        self.answerSet = trainingSet.answerSet

        self.hiddenLayerBias = m.rand(self.hiddenLayerNeurons, 1)
        self.outputLayerBias = m.rand(self.outputLayerNeurons, 1)
示例#7
0
    def __init__(self, inpDim, hidDim):
        self.inpDim = inpDim  #number of input neurons (and output neurons)
        self.hidDim = hidDim  #number of hidden neurons

        self.inp = zeros((self.inpDim, 1))  #vector holding current input
        self.out = zeros((self.hidDim, 1))  #output neurons
        self.g = zeros((self.hidDim, 1))  #neural activity before non-linearity
        self.h = zeros((self.hidDim, 1))  #hidden neuron activation
        self.a = ones((self.hidDim, 1))  #slopes of activation functions
        self.b = -3 * ones((self.hidDim, 1))  #biases of activation functions
        scale = 0.025

        self.W = scale * (
            2 * rand((self.inpDim, self.hidDim)) - 0.5 * ones(
                (self.inpDim, self.hidDim))
        ) + scale  #shared network weights, i.e. used to compute hidden layer activations and estimated outputs
        print self.W.shape

        self.lrateRO = 0.01
        #learning rate for synaptic plasticity of read-out layer (RO)
        self.regRO = 0.0002
        #numerical regularization constant
        self.decayP = 0
        #decay factor for positive weights [0..1]
        self.decayN = 1
        #decay factor for negative weights [0..1]

        self.lrateIP = 0.001
        #learning rate for intrinsic plasticity (IP)
        self.meanIP = 0.2
示例#8
0
def test_kfilter_nonstationary():
    T, model = setup_nonstationary()
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, XPred, PPred = model.kfilter(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
示例#9
0
def evaluate_em(dimR, base, eVal, X, ax2):

    threshold = 1e-4;
    N = X.shape[0];
    K = X.shape[1];
    xMean = np.mean(X, axis=1).reshape(3,1); #mean
    W = ml.rand(N, dimR);    # covariance
    Z = W.T.dot(X-xMean);  #recon
    sigma_2 = sum(sum(np.power((W.dot(Z) - X), 2)).T) / (N*dimR); #variance
    sigma_2 = sigma_2[0,0];
    M = W.T.dot(W) + sigma_2*np.ones([dimR, dimR]); 

    iteration = 1;
    oldProbX = 1;
    flag = True;
    while flag:
        #E-step 
        EZ = np.linalg.inv(M).dot(W.T.dot(X - xMean));   #expectation of latent variable
        EZZ = sigma_2*np.linalg.inv(M) + EZ.dot(EZ.T);

        #M-step
        W = N* (X-xMean).dot(EZ.T).dot(N*np.linalg.inv(EZZ));
        sigma_2 = (np.power(np.linalg.norm(X-xMean),2) + np.trace(EZZ.T.dot(W.T.dot(W))) +\
                2*(sum(sum(EZ.T.dot(W.T.dot(X-xMean))).T))) / (N * dimR);
        sigma_2= sigma_2[0,0];

        iteration += 1;
        invM = np.linalg.inv(M);
        probX = N*K + K*(N*np.log(sigma_2) + np.trace(invM) \
                  - np.log(np.linalg.det(invM))) + np.trace( EZ.T.dot(EZ));
        err = abs(1- probX/oldProbX);
        oldProbX = probX;
        if (iteration > 8 and err < threshold):
            flag = False;

           

    # transformation matrix to principal space 
    Cov = W.dot(W.T) + sigma_2*np.ones([W.shape[0], W.shape[0]]);
    [base1, eVal, order] = compute_eig(Cov, dimR);

    #[base1, eVal, order] = compute_eig(np.cov(W.T.dot(X-xMean)), dimR);
    eVec = base1[:, :dimR];
    W = base1.dot(W);
    Z = W.T.dot(X-xMean);

    #projecting train data
    projX = base1[:,0:dimR].T.dot(X);
    xMean = np.mean(X, axis=1).reshape(3,1);
    tmp = xMean *np.ones(X.shape[1]*3).reshape(3,X.shape[1]);
    bias = base1[:,dimR:].T.dot(tmp);
    projX = base1[:,0:dimR].dot(projX) + base1[:,dimR:].dot(bias);  #projected data.
    
    #visualize projected Data
    #ax2.plot(projX[0,:], projX[1,:], projX[2,:], 'o');# label='data projected into principal space');
    ax2.plot(projX[0,:Z.shape[1]/2], projX[1,:Z.shape[1]/2], projX[2,:Z.shape[1]/2], 'ro');# label='data projected into principal space');
    ax2.plot(projX[0,Z.shape[1]/2:], projX[1,Z.shape[1]/2:], projX[2,Z.shape[1]/2:], 'bo');# label='data projected into principal space');
 
    print "The number of EM steps iterated: " + str(iteration);
    return [base1, Z]
示例#10
0
 def generate_testdata(self):
     """ lets try to generate plista like campaign data in the form of a dictionary 
     input_vector = 
     { 'browser' : 'firefox',
     'os' : 'linux',
     'publisher' : 'ksta' }
     output_vector = { campaign : 'kia' 
     """
     
     input_vector = { 'browser' : 'firefox', 'os' : 'linux', 'publisher' : 'ksta' }
     
     possible_browsers = [ 'firefox', 'ie', 'opera', 'mobile']
     possible_os = [ 'windows', 'linux', 'iOS' ]
     possible_publishers = ['ksta' , 'spiegel', 'golem', 'heise']
     
     vector_set = [possible_browsers, possible_os, possible_publishers]
     
     random_vector = []
     
     for i in range(5):
         vector = []
         for x in vector_set:
             additive = 0.0
             index = int(round( ( rand(1)+additive ) * ( len(x)-1 ), 0))
             vector.append( x[index] )
         random_vector.append(vector)
         
         print '\n'
                     
     print random_vector
示例#11
0
文件: Ann.py 项目: kod3r/Ann
 def init_architecture(self, s):
     self.L = 1 + self.n_h + 1  # Total number of layers
     # len(s) == 0 is true when we did not define a hidden layer architecture explicitly
     self.s = []
     if (len(s) == 0):
         '''Calculate number of neurons in each layer (using own heuristic)'''
         self.s = [0] * self.L  # Will hold number of neurons (including hidden) in each layer
         for l in range(0, self.L):
             if (l == 0):
                 self.s[l] = int(self.n_i + 1)  # Inputs plus 1 bias
             if (l > 0 and l < self.L - 1):
                 '''Grow number of hidden neurons logarithmically after 10'''
                 if (self.n_i <= 10):
                     self.s[l] = int(self.n_i + 1) 
                 else:
                     self.s[l] = int(math.floor(10 * math.log10(self.n_i)) + 1)
             if (l == self.L - 1):
                 self.s[l] = int(self.n_o + 1)  # Adding bias to output layer (for convenience) 
     else:
         self.s.append(int(self.n_i + 1))  # Inputs plus 1 bias
         self.s.extend(s)  # Add the defined hidden layer architecture (1 neuron per layer will be treated as bias)
         self.s.append(int(self.n_o + 1))  # Adding bias to output layer (for convenience)
     
     '''Initialize all neuron weights randomly between -1 and 1'''
     self.Thetas = []  # Will hold L-1 matrices
     for l in range(0, self.L - 1):
         '''Neuron activations vector in layer l is shaped (s[l], 1)'''
         '''Number of unbiased neurons in next layer is s[l+1]-1'''
         shape = (self.s[l + 1] - 1, self.s[l])  # Note: we do not compute the activation for the bias neuron in next layer
         Theta = np.ones(shape) - 2 * mp.rand(shape)  # Matrix of 1s minus twice the matrix of random weights between 0 and 1
         self.Thetas.append(Theta)
     '''Examples'''
     '''
示例#12
0
def test_kfilter_nonstationary():
    T, model = setup_nonstationary()
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, XPred, PPred = model.kfilter(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
示例#13
0
 def __init__(self, nbrOfCars, dx_dy, intial_point, start_points,destination_dot_radius_ratio ):
     self.nbrOfCars = nbrOfCars
     self.dx_dy = dx_dy
     self.intial_point = intial_point
     self.start_points = start_points
     self.start_headings = numpy.array((180 * rand(1, self.nbrOfCars) - 90) * math.pi / 180).reshape(-1,).tolist()[0]
     self.start_steerAngles = [0] # is it a bunch of zeros ?????????????????????????????
     self.destination_dot_radius_ratio = destination_dot_radius_ratio
示例#14
0
 def get_random_from_matrix() -> numpy.flatiter:
     """
     Construct matrix of 25 by 25 random numbers
     and return its diagonal as an array
     :rtype: numpy.flatiter
     """
     mat = rand((25, 25))
     return mat.diagonal().flat
示例#15
0
def test_kfilter():
    model = setup()
    T = 100
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, XPred, PPred = model.kfilter(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
示例#16
0
def test_rtssmoother():
    model = setup()
    T = 100
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, M = model.rtssmooth(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
示例#17
0
文件: autoencoder.py 项目: ali01/qjam
  def __init__(self, trainingSet, outputFile):
    self.trainingSet_ = trainingSet
    self.outputFile_ = outputFile

    # initialize weights
    self.weights1_ = matlib.rand(self.HIDDEN, self.FEATURES)
    self.weights1_ = self.weights1_ / matlib.sqrt(self.FEATURES)

    self.weights2_ = matlib.rand(self.FEATURES, self.HIDDEN)
    self.weights2_ = self.weights2_ / matlib.sqrt(self.HIDDEN)

    # initialize bias
    self.bias1_ = matlib.zeros((self.HIDDEN, ))
    self.bias2_ = matlib.zeros((self.FEATURES, ))

    # initialize rho estimate vector
    self.rho_est_ = matlib.zeros((self.HIDDEN, )).T
示例#18
0
def test_kfilter():
    model = setup()
    T = 100
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, XPred, PPred = model.kfilter(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
示例#19
0
def test_rtssmoother():
    model = setup()
    T = 100
    U = [mb.rand(2, 1) for t in range(T)]
    Xo, Y = model.simulate(U)
    X, P, K, M = model.rtssmooth(Y, U)
    for (x, xo, p) in zip(X, Xo, P):
        assert within_dist(x, p, xo)
    def init_subpopulations(self):
        # Main excitatory subpopulation
        self.e_size=int(self.params.network_group_size*.8)
        self.group_e=self.subgroup(self.e_size)
        self.group_e.C=self.pyr_params.C
        self.group_e.gL=self.pyr_params.gL
        self.group_e._refractory_time=self.pyr_params.refractory

        # Main inhibitory subpopulation
        self.i_size=int(self.params.network_group_size*.2)
        self.group_i=self.subgroup(self.i_size)
        self.group_i.C=self.inh_params.C
        self.group_i.gL=self.inh_params.gL
        self.group_i._refractory_time=self.inh_params.refractory

        # Input-specific sub-subpopulations
        self.groups_e=[]
        for i in range(self.params.num_groups):
            subgroup_e=self.group_e.subgroup(int(self.params.f*self.e_size))
            self.groups_e.append(subgroup_e)
        self.ns_e=self.group_e.subgroup(self.e_size-(self.params.num_groups*int(self.params.f*self.e_size)))

        # Initialize state variables
        self.vm = self.params.EL+randn(self.params.network_group_size)*mV
        self.group_e.g_ampa_b = rand(self.e_size)*self.pyr_params.w_ampa_ext_correct*2.0
        self.group_e.g_nmda = rand(self.e_size)*self.pyr_params.w_nmda*2.0
        self.group_e.g_gaba_a = rand(self.e_size)*self.pyr_params.w_gaba*2.0
        self.group_i.g_ampa_r = rand(self.i_size)*self.inh_params.w_ampa_rec*2.0
        self.group_i.g_ampa_b = rand(self.i_size)*self.inh_params.w_ampa_bak*2.0
        self.group_i.g_nmda = rand(self.i_size)*self.inh_params.w_nmda*2.0
        self.group_i.g_gaba_a = rand(self.i_size)*self.inh_params.w_gaba*2.0
    def __init_learner(self, outputFile):

        self.outputFile_ = outputFile

        # initialize weights
        self.weights1_ = matlib.rand(self.HIDDEN, self.FEATURES)
        self.weights1_ = self.weights1_ / matlib.sqrt(self.FEATURES)

        self.weights2_ = matlib.rand(self.FEATURES, self.HIDDEN)
        self.weights2_ = self.weights2_ / matlib.sqrt(self.HIDDEN)

        # initialize bias
        self.bias1_ = matlib.zeros((self.HIDDEN,))
        self.bias2_ = matlib.zeros((self.FEATURES,))

        # initialize rho estimate vector
        self.rho_est_ = matlib.zeros((self.HIDDEN,)).T
        self.errors = []
示例#22
0
    def __init_learner(self, outputFile):

        self.outputFile_ = outputFile

        # initialize weights
        self.weights1_ = matlib.rand(self.HIDDEN, self.FEATURES)
        self.weights1_ = self.weights1_ / matlib.sqrt(self.FEATURES)

        self.weights2_ = matlib.rand(self.FEATURES, self.HIDDEN)
        self.weights2_ = self.weights2_ / matlib.sqrt(self.HIDDEN)

        # initialize bias
        self.bias1_ = matlib.zeros((self.HIDDEN, ))
        self.bias2_ = matlib.zeros((self.FEATURES, ))

        # initialize rho estimate vector
        self.rho_est_ = matlib.zeros((self.HIDDEN, )).T
        self.errors = []
示例#23
0
 def getMfeature(self):
     mata = npmatrix.empty((3, 4))  # random data
     mata = npmatrix.zeros((3, 4))  # zeros
     mata = npmatrix.ones((3, 4))  # one
     mata = npmatrix.eye(3)  # one along diagonal
     mata = npmatrix.eye(3, 5)  # one along diagonal
     mata = npmatrix.identity(3)  # identity square matrix
     mata = npmatrix.rand(3, 7)  # rand data
     mata = npmatrix.ones((3, 1))  # one
     print(mata)
     print(mata.shape)
     print(mata.dtype)
def mate(p, q, cross=0.6):
    # Performs genetic algorithm mating on two polynomials
    #   Favors p's chromosome if cross is large, agostic iff cross == 0.5
    # p, q :: (len q == len p) => [Float]
    # cross :: (0 < cross < 1) => Float
    assert len(p) == len(q)
    assert (0 < cross) & (cross < 1)
    babysChromosomes = np.array(matlib.rand(len(p)) < cross)
    babysChromosomes = np.ndarray.flatten(babysChromosomes)
    assert babysChromosomes.dtype == 'bool'
    baby = np.zeros(len(p))
    baby[babysChromosomes] = p[babysChromosomes]
    baby[np.invert(babysChromosomes)] = q[np.invert(babysChromosomes)]
    return baby
示例#25
0
  def testNormalizedCorrelationHorzShift2D_SameArray_AllAlgorithms(self):    
    nrow = 251
    ncol = 250
    maxLag = 3
    nloop = 4
    x1 = rand(ncol, nrow)
    x2 = deepcopy(x1)

    t1a = timeit.time.clock()
    for loop in xrange(0, nloop):
      arrayA = normalizedCorrelationHorzShift2D(x1, x2, maxLag, Algorithm.BRUTE_FORCE)
    t2a = timeit.time.clock()
    
    t1b = timeit.time.clock()
    for loop in xrange(0, nloop):
      arrayB = normalizedCorrelationHorzShift2D(x1, x2, maxLag, Algorithm.INNER_1D)
    t2b = timeit.time.clock()

    t1c = timeit.time.clock()
    for loop in xrange(0, nloop):
      arrayC = normalizedCorrelationHorzShift2D(x1, x2, maxLag, Algorithm.INNER_2D)
    t2c = timeit.time.clock()
    
    timeA = (t2a - t1a)
    timeB = (t2b - t1b)
    timeC = (t2c - t1c)
    
    print("\ntestSame_NormalizedCorrelationHorzShift2D")
    print("  Algorithm.BRUTE_FORCE -- time; %.6f sec -- ratio; %12.6f" % (timeA, (timeA / timeA)))
    print("  Algorithm.INNER_1D    -- time; %.6f sec -- ratio; %12.6f" % (timeB, (timeA / timeB)))
    print("  Algorithm.INNER_2D    -- time; %.6f sec -- ratio; %12.6f" % (timeC, (timeA / timeC)))

    print
    for shift in range(-maxLag, maxLag+1):
        print("shift,A,B,C; %+3d %+12.6f %+12.6f %+12.6f" % (shift, arrayA[shift + maxLag], arrayB[shift + maxLag], arrayC[shift + maxLag]))
    assert arrayA[maxLag] == 1
    assert arrayB[maxLag] == 1
    assert arrayC[maxLag] == 1
    arrayA = list(arrayA)
    arrayB = list(arrayB)
    arrayC = list(arrayC)
    maxIndexA = arrayA.index(max(arrayA)) - maxLag
    maxIndexB = arrayB.index(max(arrayB)) - maxLag
    maxIndexC = arrayC.index(max(arrayC)) - maxLag

    print
    print("Index of maximum A,B,C", maxIndexA, maxIndexB, maxIndexC)
    print("Correlation at maximum A,B,C", 
          arrayA[maxIndexA + maxLag], arrayB[maxIndexB + maxLag], arrayC[maxIndexC + maxLag])
示例#26
0
def trust_region_bfgs(T=1000, delta_hat=1, delta_0=0.1, eta=0.00001):
    xk = np.matrix(rand(2 * n * r, 1))
    Bk = np.matrix(np.identity(2 * n * r))
    delta_k = delta_0
    err = []
    for i in xrange(T):
        f_k = f(xk)
        g_k = grad(xk)

        pks = -delta_k * g_k / norm(g_k, 2)
        gBg = (g_k.T * Bk * g_k)[0, 0]
        if gBg <= 0:
            tau_k = 1
        else:
            tau_k = min(1, norm(g_k, 2)**3 / (delta_k * gBg))
        pk = tau_k * pks

        # Update radius
        rou_k = -(f_k - f(xk + pk)) / (g_k.T * pk + 0.5 * pk.T * Bk * pk)
        rou_k = rou_k[0, 0]
        if rou_k < 0.0001:
            delta_k *= 0.25
        else:
            if rou_k > 0.75 and norm(pk, 2) == delta_k:
                delta_k = min(2 * delta_k, delta_hat)
            else:
                delta_k = delta_k

        if rou_k > eta:
            xkp1 = xk + pk
            # Update Bk
            sk = xkp1 - xk
            yk = grad(xkp1) - g_k
            Bk += (yk * yk.T) / (yk.T * sk) - Bk * sk * sk.T * Bk / (sk.T *
                                                                     Bk * sk)
            xk = xkp1
        else:
            xk = xk

        err.append(f(xk))

    return xk, err
示例#27
0
    def init_subpopulations(self):
        # Main excitatory subpopulation
        self.e_size = int(self.params.network_group_size * .8)
        self.group_e = self.subgroup(self.e_size)
        self.group_e.C = self.pyr_params.C
        self.group_e.gL = self.pyr_params.gL
        self.group_e._refractory_time = self.pyr_params.refractory

        # Main inhibitory subpopulation
        self.i_size = int(self.params.network_group_size * .2)
        self.group_i = self.subgroup(self.i_size)
        self.group_i.C = self.inh_params.C
        self.group_i.gL = self.inh_params.gL
        self.group_i._refractory_time = self.inh_params.refractory

        # Input-specific sub-subpopulations
        self.groups_e = []
        for i in range(self.params.num_groups):
            subgroup_e = self.group_e.subgroup(int(self.params.f *
                                                   self.e_size))
            self.groups_e.append(subgroup_e)

        # Initialize state variables
        self.vm = self.params.EL + randn(self.params.network_group_size) * mV
        self.group_e.g_ampa_b = rand(
            self.e_size) * self.pyr_params.w_ampa_ext_correct * 2.0
        self.group_e.g_nmda = rand(self.e_size) * self.pyr_params.w_nmda * 2.0
        self.group_e.g_gaba_a = rand(
            self.e_size) * self.pyr_params.w_gaba * 2.0
        self.group_i.g_ampa_r = rand(
            self.i_size) * self.inh_params.w_ampa_rec * 2.0
        self.group_i.g_ampa_b = rand(
            self.i_size) * self.inh_params.w_ampa_ext * 2.0
        #self.group_i.g_nmda = self.inh_params.w_nmda*100.0+10.0*nS*randn(self.i_size)
        self.group_i.g_nmda = rand(self.i_size) * self.inh_params.w_nmda * 2.0
        self.group_i.g_gaba_a = rand(
            self.i_size) * self.inh_params.w_gaba * 2.0
def array_maxtrix_exercise():

    print('===part of numpy array===\n')
    '''
    2. 建立一个一维数组 a 初始化为[4,5,6], (1)输出a 的类型(type)
    (2)输出a的各维度的大小(shape)
    (3)输出 a的第一个元素(值为4)
    '''
    a = np.array([4, 5, 6])
    print(type(a), a.shape, a[0])

    '''
    3.建立一个二维数组 b,初始化为 [ [4, 5, 6],[1, 2, 3]] (1)输出各维度的大小(shape)
    (2)输出 b(0,0),b(0,1),b(1,1) 这三个元素(对应值分别为4,5,2)
    '''
    b = np.array([[4, 5, 6], [1, 2, 3]])
    print(type(b), b.shape)
    print(b[0][0], b[0][1], b[1][1])

    '''
    4. (1)建立一个全0矩阵 a, 大小为 3x3; 类型为整型(提示: dtype = int)
    (2)建立一个全1矩阵b,大小为4x5; 
    (3)建立一个单位矩阵c ,大小为4x4; 
    (4)生成一个随机数矩阵d,大小为 3x2.
    '''
    print('\n===part of numpy matlib===\n')
    c = mat.zeros((3, 3), dtype=int)
    print(c)

    d = mat.ones((4, 5), dtype=int)
    print(d)

    e = mat.identity(n=4, dtype=int)
    e1 = mat.eye(n=4, M=5, k=0, dtype=int)
    print(e)
    print(e1)

    f = mat.rand((3, 2))
    print(f)

    '''
    5. 建立一个数组 a,(值为[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] ) ,
    (1)打印a; (2)输出 下标为(2,3),(0,0) 这两个数组元素的值
    '''
    print('\n===part of array and matlib mix operation===\n')
    a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
    print(a)
    print(a[2][3], a[0][0])

    '''
    6.把上一题的 a数组的 0到1行 2到3列,放到b里面去,(此处不需要从新建立a,直接调用即可)
    (1),输出b;(2) 输出b 的(0,0)这个元素的值
    '''
    b = a[0:2, 2:4]
    print(b, b[0][0])

    '''
    7. 把第5题中数组a的最后两行所有元素放到 c中,(提示: a[1:2, :])(1)输出 c ; 
    (2) 输出 c 中第一行的最后一个元素(提示,使用 -1 表示最后一个元素)
    '''
    c = a[-2:-1, :]
    print(c[0][-1])

    '''
    8.建立数组a,初始化a为[[1, 2], [3, 4], [5, 6]],
    输出 (0,0)(1,1)(2,0)这三个元素(提示: 使用 print(a[[0, 1, 2], [0, 1, 0]]) )
    '''
    a = np.array([[1, 2], [3, 4], [5, 6]])
    print(a[[0, 1, 2], [0, 1, 0]])

    '''
    9.建立矩阵a ,初始化为[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],输出(0,0),(1,2),(2,0),(3,1) 
    (提示使用 b = np.array([0, 2, 0, 1]) print(a[np.arange(4), b]))
    '''
    a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
    b = np.array([0, 2, 0, 1])
    print(a[np.arange(4), b])
    '''
    10.对9 中输出的那四个元素,每个都加上10,然后重新输出矩阵a.(提示: a[np.arange(4), b] += 10 )
    '''
    c = a[np.arange(4), b] + 10
    print(c)

    print("======= end of exercise1 ===== \n")
示例#29
0
#-*-coding:utf-8-*-
"""
    NumPy - 矩阵库
    NumPy 包包含一个 Matrix库numpy.matlib。此模块的函数返回矩阵而不是返回ndarray对象。
"""
import numpy.matlib as mt
import numpy as np

if __name__ == '__main__':
    #numpy.matlib.empty(shape, dtype, order)函数返回一个新的矩阵,而不初始化元素
    print mt.empty((2, 2))
    #numpy.matlib.zeros()返回以零填充的矩阵。
    print mt.zeros((3, 3))
    #numpy.matlib.ones()返回以一填充的矩阵。
    print mt.ones((4, 4))
    #numpy.matlib.eye(n, M,k, dtype)返回一个矩阵,对角线元素为 1,其他位置为零。
    print mt.eye(n=5, M=5, k=0, dtype=float)
    #numpy.matlib.identity()函数返回给定大小的单位矩阵。单位矩阵是主对角线元素都为 1 的方阵。
    print mt.identity(5)
    #numpy.matlib.rand()函数返回给定大小的填充随机值的矩阵。
    print mt.rand(3, 3)
    #矩阵总是二维的,而ndarray是一个 n 维数组。 两个对象都是可互换的。
    m = np.matrix('1,2;3,4')
    print m
    print type(m)
    print np.asarray(m)
示例#30
0
文件: np_sc_calc.py 项目: y4su0/inapy
# np_sc_calc.py: NumPyとSciPy
import numpy as np  # NumPy
import numpy.matlib as npmat
import scipy.special as scspf  # SciPy.specialパッケージ

# relerr : 相対誤差
from tktools import relerr  # tktools.py: 私的作成関数

n = 10

# 乱数の種セット
np.random.seed(n)

# n個の乱数セット
a = npmat.rand(n)

print('a = ', a)

# 立方根
sc_c = scspf.cbrt(a)  # SciPy.special
np_c = np.cbrt(a)  # NumPy

# 相対誤差の最大値と最小値
relerr_vec = relerr(sc_c, np_c)
print('max(reldiff(sc_c, np_c)) = ', np.max(relerr_vec))
print('min(reldiff(sc_c, np_c)) = ', np.min(relerr_vec))

# expm1(a) = exp(a) - 1
sc_c = scspf.expm1(a)
np_c = np.exp(a) - 1
示例#31
0
print(nm.empty((3, 3)))

#matlib zeros function
print(
    "\n\nprinting the matrix with initializing the values with value \"0\" using zero function:"
)
print(nm.zeros((3, 3), dtype=int))

#matlib ones function
print(
    "\n\nprinting the matrix with initializing the values with value \"1\" using ones function:"
)
print(nm.ones((3, 4), dtype=int))

#matlib eye function
print("\n\nprinting the matrix with diagonal elements values equals to 1:")
print(nm.eye(n=4, k=0, dtype=int, M=5))

#matlib identity function
print(
    "\n\nprinting the identity matrix is the one with diagonal elements initializes to 1 and all other elements to zero:"
)
print(nm.identity(
    5,
    dtype=int,
))

#matlib random function
print("\n\nprinting the matrix with random values:")
print(nm.rand((5, 5)))
示例#32
0
文件: matrix.py 项目: mjfairch/test
import numpy as np
import numpy.matlib as mat
import numpy.linalg as linalg
import time
import sys

runs = 10
n = 1000
A = mat.rand((n,n))
b = mat.rand((n,1))
total_time = 0
for i in range(0,runs):
    start = time.perf_counter()
    x = linalg.solve(A,b)
    stop = time.perf_counter()
    print('.', end="",flush=True)
    total_time += (stop-start)
print()
print(runs, 'solves of Ax=b for A of size',n,'took',total_time,'seconds',
      'for an average time of',total_time/runs,'seconds per solve.')

total_time = 0
for i in range(0,runs):
    start = time.perf_counter()
    U,s,V = linalg.svd(A)
    stop = time.perf_counter()
    print('.', end="",flush=True)
    total_time += (stop-start)
print()
print(runs, 'SVD factorizations of A of size',n,'took',total_time,'seconds',
      'for an average time of',total_time/runs,'seconds per solve.')
示例#33
0
from scapy.all import *

import matplotlib.pyplot as plt
from numpy.matlib import rand

# matplotlib.get_backend()
# plt.ion()
a = rand(100)
b = rand(100)

ans, unans = sr(IP(dst="www.bbc.co.uk") / TCP(sport=[RandShort()] * 1000), timeout=1)

# plt.show(ans.plot(lambda x: x[1].id))

# plt.show(ans.scatter(lambda x: x[1].id))

plt.scatter(a, b)
plt.show()
示例#34
0
        plt.annotate(str(ch), xy=(ch + 1, p + .5), va='center')

    # cutomize ticks
    ticks = plt.yticks(pos + .5, city)
    xt = plt.xticks()[0]
    plt.xticks(xt, [' '] * len(xt))

    # minimize chartjunk
    remove_border(left=False, bottom=False)
    plt.grid(axis='x', color='white', linestyle='-')

    # set plot limits
    plt.ylim(pos.max(), pos.min() - 1)
    plt.xlim(0, 30)

    df2 = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd'])
    df2.plot(kind='bar')

    df2.plot(kind='bar',
             stacked=True)  #Пропорциональное распределение значений

    df2.plot(kind='barh', stacked=True)  #Отображ горизонтально

    ##########################################################

    years = np.arange(2004, 2009)
    heights = np.random.random(years.shape) * 7000 + 3000

    box_colors = brewer2mpl.get_map('Set1', 'qualitative', 5).mpl_colors

    plt.bar(years - .4, heights, color=box_colors)
示例#35
0
 def assign_account_number(self):
     
         self.account_number=round(float(rand(1))*100000,0);
         print(str('Your account number is {}').format(self.account_number));
示例#36
0
def test_suite_1():
    n1 = 100
    n2 = 100
    n = n1+n2
    d = 5
    eta = .1
    degree = 3
    iterations = 1
    results = mat.zeros((8,5)) 
    times = mat.zeros((1,5))
    sigma = 2
    # 1st col is non-kernelized
    # 2nd col is poly-kernel 

    for itr in xrange(iterations):
        X = mat.randn(n1,d)
        Phi_X = poly.phi(X, degree)

        D0 = X + mat.rand(n2,d) / 1000
        # Verify identity K(X,X) = 1
        D1 = mat.randn(n2,d) 
        # How does kernel perform iid data
        D2 = mat.rand(n2,d)
         # Uniform rather than normal distribution
        D3 = mat.randn(n2,d) * 2 + 2
        # Linear transformation
        D4 = mat.power(mat.randn(n2,d) + 1 ,3) 
        #Non-linear transformation
        D5 = mat.power(X+1,3) 
        #non-linear transformation of the D0 dataset;
        D6 = mat.rand(n2,d)/100 + mat.eye(n2,d) 
        #Totally different data - should have low similarity
        D7 = mat.rand(n2,d)/100 + mat.eye(n2,d)*5 
        # Scaled version of D7

        Data = [D0, D1, D2, D3, D4, D5, D6, D7]


        for idx in xrange(8):
            D = Data[idx]
            start = time.time()
            results[idx, 0] += nk_bhatta(X, D, 0)
            nk = time.time()
             emp = time.time()
            results[idx, 1] += Bhattacharrya(X,D,gaussk(sigma),eta,5)
            e5 = time.time()
            results[idx, 2] += Bhattacharrya(X,D,gaussk(sigma),eta,15)
            e15 = time.time()
            results[idx, 3] += Bhattacharrya(X,D,gaussk(sigma),eta,25)
            e25 = time.time()
            nktime = nk-start
            emptime = emp-nk
            e5time = e5-emp
            e15time = e15-e5
            e25time = e25-e15
            print "nk: {:.1f}, emp: {:.1f}, e5: {:.1f}, e15: {:.1f}, e25: {:.1f}".format(nktime, emptime, e5time, e15time, e25time)
            times[0,0]+= nktime
            times[0,4]+= emptime
            times[0,1]+= e5time
            times[0,2]+= e15time
            times[0,3]+= e25time
示例#37
0
# -*- encoding: utf-8 -*-
"""
4.6.1 创建矩阵
"""

import numpy as np
import numpy.matlib as mat

print(np.mat([[1, 2, 3], [4, 5, 6]], dtype=np.int))  # 使用列表创建矩阵
print(np.mat(np.arange(6).reshape((2, 3))))  # 使用数组创建矩阵
print(np.mat('1 4 7; 2 5 8; 3 6 9'))  # 使用Matlab风格的字符串创建矩阵

print(mat.zeros((2, 3)))  # 全0矩阵
print(mat.ones((2, 3)))  # 全1矩阵
print(mat.eye(3))  # 单位矩阵
print(mat.empty((2, 3)))  # 空矩阵
print(mat.rand((2, 3)))  # [0,1)区间随机数矩阵
print(mat.randn((2, 3)))  # 均值0方差1的高斯(正态)分布矩阵
示例#38
0
import numpy as np
from matplotlib.pyplot import plot, show
from numpy.matlib import rand

from proj_pocos.well_stimulation.fracking.productivity_index import PseudoSteadyFlow

re = 6400
rw = 1.

k = float(rand(1))
h = float(rand(1))
B = float(rand(1))
mu = float(rand(1))

j_no_skin = PseudoSteadyFlow(k, h, B, mu, re, rw, skin=0.0).productivity_index

skins = np.linspace(-5, 25, 100)
folds_of_increase = []
for s in skins:

    j = PseudoSteadyFlow(k, h, B, mu, re, rw, skin=s).productivity_index

    folds_of_increase.append(j / j_no_skin)

plot(skins, folds_of_increase)
show()
示例#39
0
def print_error(x_exact, x_approx):
    print("Approximation error: ",
          np.max(np.abs(x_exact - x_approx).A1 / np.abs(x_exact).A1))


if __name__ == '__main__':
    import time
    N_TESTS = 1000
    T = 0.001
    dt = 2e-3
    n = 16 * 3 * 2
    n2 = int(n / 2)
    stiffness = 1e5
    damping = 1e2
    x0 = matlib.rand((n, 1))
    a = matlib.rand((n, 1))
    U = matlib.rand((n2, n2))
    Upsilon = U * U.T
    K = matlib.eye(n2) * stiffness
    B = matlib.eye(n2) * damping
    A = matlib.block([[matlib.zeros((n2, n2)),
                       matlib.eye(n2)], [-Upsilon * K, -Upsilon * B]])
    #A  = matlib.rand((n, n))

    # print("x(0) is:", x0.T)
    # print("a is:   ", a.T)
    print("State size n:", n)
    print("Eigenvalues of A:", np.sort_complex(eigvals(A)).T)
    print("")
示例#40
0
    previousNbrOfNeurons = settings_obj.NetworkArch[i]

Chromosomes = [] #population size  * chromosome length
# for i in range(GA.populationSize):
#     l = []
#     for j in range(GA.chromosomeLength):
#         l.append(0)
#     Chromosomes.append(l)

#initialized by zero
Chromosomes_Fitness= [] #population size
for i in range(GA.populationSize):
    Chromosomes_Fitness.append(0)

BestFitness_perGeneration = [] #number of max generations
AvgFitness_perGeneration = [] #number of max generations
for i in range(GA.nbrOfGenerations_max):
    BestFitness_perGeneration.append(-1) #intialize with -1
    AvgFitness_perGeneration.append(-1) #intialize with -1

#Initializing chromosomes weights range with values from -1 to 1
for pop in range(GA.populationSize):
    l = numpy.array(GA.weightsRange * (2 * rand(1, GA.chromosomeLength) - 1)).reshape(-1,).tolist()# -1<value<1
    Chromosomes.append(l)

settings_obj.collison_value()

MoveCars(env, settings_obj.nbrOfTimeStepsToTimeout, GA, settings_obj.dt,sensor, car, settings_obj.num,
         settings_obj.smallXYVariance, Chromosomes_Fitness, Chromosomes, settings_obj.NetworkArch, settings_obj.unipolarBipolarSelector, settings_obj.collision_distance)

示例#41
0
文件: 2.py 项目: ehivan24/Pandas
def step():
    return sign(rand(1) - .5)
示例#42
0
input_conn_names = ['ee_input']
for name in input_connection_names:
    print 'create connections between', name[0], 'and', name[1]
    for connType in input_conn_names:
        connName = name[0] + connType[0] + name[1] + connType[1]
        weightMatrix = get_matrix_from_file(weight_path + connName + '.npy')
        print weightMatrix.shape
        synapses_XeAe = Synapses(input_groups[connName[0:2]],
                                 neuron_groups[connName[2:4]],
                                 model=eqs_stdp_ee,
                                 on_pre=eqs_stdp_pre_ee,
                                 on_post=eqs_stdp_post_ee,
                                 method='linear')
        synapses[connName] = synapses_XeAe
        synapses[connName].connect()
        synapses[connName].delay = rand() * delay['ee_input']
        synapses[connName].w = weightMatrix.flatten()

#------------------------------------------------------------------------------
# run the simulation and set inputs
#------------------------------------------------------------------------------
fig_num = 1
input_weight_monitor, fig_weights = plot_2d_input_weights()
#fig_num += 1

input_groups['Xe'].rates = 0 * Hz

defaultclock.dt = 0.5 * ms

run(0 * second)
j = 0
示例#43
0
# @Desc    : 矩阵
import numpy as np
import numpy.matlib as ml

print ("empty")
print(ml.empty((3, 3), dtype=np.int, order='F'))
print(ml.empty((3, 3), dtype=np.int, order='C'))

print ("\nzeros")
print(ml.zeros((3, 3), dtype=np.int, order='C'))

print ("\nones")
print(ml.ones((3, 3), dtype=np.int, order='C'))

print ("\neye")
print(ml.eye(3, dtype=np.int, order='C'))

print ("\nidentity")
print(ml.identity(3, dtype=np.int))

print ("\nrand")
print(ml.rand(2, 3))

print ("\nmatrix")
a = np.arange(12).reshape(3, 4)
mr = ml.matrix(a)
print (a)
print (mr)
print (type(a))
print (type(mr))
示例#44
0
        print Sm    #to get some insight before everything dies
        try:
            from scipy import linalg as sl
            evals = sl.eigvals(Cmat, Sm)
            evals.sort()        # in-place!
        except:      # we give up, and place -1's in the return 1d-array
            evals = ones(n).A1 * (-1)
    # default axis in cumsum works here:
    return evals.cumsum() 


#############################################################
# test cases:
if __name__ == '__main__':
    from numpy.matlib import rand
    data = rand((100, 3))
    print getDeterministics(100, 'ctl', 0.3).shape
    print getDeterministics(100, 'c', 5).shape
    print getDeterministics(100, 'ctlsi', 80).shape
    print getDeterministics(100, 'qmtl', 0.1).shape
    print autocovar(data, 10)
    print autocovar(data, 5, True)
    print longrunvar(data)
    # the following could raise exceptions due to non-pos-def matrices
    print commontrendstest(data)
    print commontrendstest(data,2,'t',0.3)
    print commontrendstest(data,4,'2a',0.3)
    print commontrendstest(data,3,'1',0.3)
    try: print commontrendstest(data,5,'2',0.8)
    except: print '5 lags failed'
    # check loop for sorting evals:
示例#45
0
            C[i, j] = acc
    return C


def matrix_multiply(A, B):
    m, n = A.shape
    n, r = B.shape
    C = zeros((m, r), float64)
    for i in range(m):
        for j in range(r):
            acc = 0
            for k in range(n):
                acc += A[i, k] * B[k, j]
            C[i, j] = acc
    return C


n = 1000
A = rand(n, n)
B = rand(n, n)

from time import time
t1 = time()
matrix_multiply_jit(A, B)
t2 = time()
print(t2 - t1)
matrix_multiply(A, B)
t3 = time()
print(t3 - t2)
print((t3 - t2) / (t2 - t1))
示例#46
0
def rand_file_name(ext):
    from numpy import *
    import numpy.matlib as npmlib
    index = int(floor((1000000*npmlib.rand((1,1)))))
    return 'file'+str(index)+ext