Beispiel #1
0
def awgn(input_signal, snr_dB, rate=1.0):
    """ Addditive White Gaussian Noise (AWGN) Channel.

    Parameters
    __________
    input_signal : 1D ndarray of floats
        Input signal to the channel.

    snr_dB : float 
        Output SNR required in dB. 
    
    rate : float
        Rate of the a FEC code used if any, otherwise 1.

    Returns
    _______
    output_signal : 1D ndarray of floats 
        Output signal from the channel with the specified SNR.
    """

    avg_energy = sum(input_signal * input_signal)/len(input_signal)
    snr_linear = 10**(snr_dB/10.0)
    noise_variance = avg_energy/(2*rate*snr_linear)
    
    if input_signal.dtype is complex:
        noise = sqrt(noise_variance) * randn(len(input_signal)) * (1+1j)
    else:
        noise = sqrt(2*noise_variance) * randn(len(input_signal))
    
    output_signal = input_signal + noise
    
    return output_signal 
 def setUp(self):
     self.ntime = 2048
     self.nfreq = 10
     self.data = sp.zeros((self.ntime, 4, self.nfreq))
     self.n_bins_cal = 64
     # Set channel dependant gain.
     self.level = 0.1*(self.nfreq + sp.arange(self.nfreq))
     # Add noise.
     self.data[:,:,:] += (0.1 * self.level
                          * rand.randn(self.ntime, 4, self.nfreq))
     # Add DC level.
     self.dc = 10 * self.level
     self.data += self.dc
     # First can transition.
     self.first_trans = rand.randint(0, self.n_bins_cal // 2)
     # The following randomly assigns self.neg to -1 or 1.
     self.neg = 0
     while not self.neg: self.neg = rand.randint(-1, 2)
     # First upward edge:
     if self.neg == 1:
         self.offset = self.first_trans
     else:
         self.offset = self.first_trans + self.n_bins_cal // 2
         self.data[:,0,:] += self.level
     for ii in range(self.ntime//self.n_bins_cal) :
         s = slice(self.first_trans + ii*self.n_bins_cal, self.first_trans +
                   (2*ii+1)*self.n_bins_cal//2)
         self.data[s, 0, :] += self.neg * self.level
     # Transition values and locations.
     self.t_slice = slice(self.first_trans, sys.maxint, self.n_bins_cal//2)
     self.t_vals = 0.5 + 0.1 * rand.randn(2*self.ntime//self.n_bins_cal,
                                          self.nfreq)
     self.t_vals *= - self.level
Beispiel #3
0
    def setUp(self):
        self.X = X = tf.Variable(tf.zeros([0]))
        self.cost = tf.exp(tf.reduce_sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n).astype(float32) * 1e-3
        A = self.A = rnd.randn(n).astype(float32) * 1e-3

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (column vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'right multiply' H by A
        self.correct_hess = np.array(Amat.dot(H)).squeeze()

        self.backend = TensorflowBackend()
Beispiel #4
0
    def noisy_gf(self, noise):
        # Init Gf using Flat Descriptor
        g = GfImFreq(indices = [0,1], beta = 1., n_points = 1000)
        H = np.array([[1.0, 0.1j],[-0.1j, 2.0]])
        g << inverse(iOmega_n - H)

        # Generate gt and add noise
        gt = make_gf_from_fourier(g)
        np.random.seed(666)
        gt.data[:] = gt.data + noise * ( randn(*np.shape(gt.data)) + 1j * randn(*np.shape(gt.data)) )

        # Resymmetrize gt
        gt.data[:,0,1] = gt.data[:,1,0].conjugate()

        # Fourier transform to Matsubara and back given the high-frequency information
        tail, err = g.fit_tail()
        gw = make_gf_from_fourier(gt, g.mesh, tail)
        gt2 = make_gf_from_fourier(gw, gt.mesh, tail)

        # Check that magnitude of error is of order noise
        err = np.max(np.abs(gt.data - gt2.data))
        # print "noise %.3e  err %.3e"%(noise, err)
        self.assertTrue(err < 10 * noise)

        # We can also go through a legendre basis to filter the noise
        gl = fit_legendre(gt)
        gw_from_leg = g.copy()
        gw_from_leg << LegendreToMatsubara(gl)
        gt3 = make_gf_from_fourier(gw_from_leg, len(gt.mesh))
        err = np.max(np.abs(gt.data - gt3.data))
        # print "noise %.3e  err %.3e"%(noise, err)
        self.assertTrue(err < 10 * noise)
Beispiel #5
0
    def setUp(self):
        self.X = X = tf.Variable(tf.zeros([0]))
        self.cost = tf.exp(tf.reduce_sum(X**2))

        m = self.m = 10
        n = self.n = 15

        Y = self.Y = rnd.randn(m, n).astype(float32) * 1e-3
        A = self.A = rnd.randn(m, n).astype(float32) * 1e-3

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (4th order)
        Y1 = Y.reshape(m, n, 1, 1)
        Y2 = Y.reshape(1, 1, m, n)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(m * n).reshape(m, n, m, n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, m, n)

        self.correct_hess = np.sum(H * Atensor, axis=(2, 3))
        self.backend = TensorflowBackend()
Beispiel #6
0
def make_KL_slip(fault,num_modes,eigenvals,V,mean_slip,max_slip,lognormal=True,maxiter=5):
    '''
    Make slip map using num_modes
    '''
    from numpy import sqrt,exp
    from numpy.random import randn
    
    iterations=0
    success=False
    while True:
        #Generate random numbers
        if len(fault)>num_modes:
            z = randn(num_modes) 
        else: #if fewer faults than requested modes then use all modes
            z = randn(len(fault)) 
        KL_slip = mean_slip.copy()  # start with the mean slip
        # add in the terms in the K-L expansion:
        for k in range(len(z)):
            KL_slip += z[k] * sqrt(eigenvals[k]) * V[:,k]
        # exponentiate for lognormal:
        if lognormal==True:
            KL_slip = exp(KL_slip)
        #Check if max_slip condition is met, if so then you're done
        if KL_slip.max()<=max_slip:
            success=True
            break
        iterations+=1
        if iterations>maxiter:
            print'... ... ... improper eigenvalues, recalculating...'
            break
    return KL_slip,success
Beispiel #7
0
    def setUp(self):
        self.X = X = tf.Variable(tf.zeros([0]))
        self.cost = tf.exp(tf.reduce_sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3).astype(float32) * 1e-3
        A = self.A = rnd.randn(n1, n2, n3).astype(float32) * 1e-3

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = TensorflowBackend()
Beispiel #8
0
def test_concatenate1(n=10):
    x1 = nr.randn(n, 2) 
    x2 = nr.randn(n, 2) 
    G1 = knn(x1, 5)
    G2 = knn(x2, 5) 
    G = concatenate_graphs(G1, G2)
    assert_true(G.cc().max() > 0)
Beispiel #9
0
def fastica_defl(X, nIC=None, guess=None,
             nonlinfn = pow3nonlin,
             termtol = 5e-7, maxiters = 2e3):
    nPC, siglen = X.shape
    nIC = nIC or nPC-1
    guess = guess or randn(nPC,nIC)

    if _orth_loaded:
        guess = orth(guess)

    B = zeros(guess.shape, np.float64)

    errvec = []
    icc = 0
    while icc < nIC:
        w = randn(nPC,1) - 0.5
        w -= dot(dot(B, transp(B)), w)
        w /= norm(w)

        wprev = zeros(w.shape)
        for i in xrange(long(maxiters) +1):
            w -= dot(dot(B, transp(B)), w)
            w /= norm(w)
            #wprev = w.copy()
            if (norm(w-wprev) < termtol) or (norm(w + wprev) < termtol):
                B[:,icc]  = transp(w)
                icc += 1
                break
            wprev = w.copy()
    return B.real, errvec
Beispiel #10
0
def noise():
    import matplotlib.pyplot as plt
    import numpy as np

    from numpy.random import randn

    # Make plot with vertical (default) colorbar
    fig = plt.figure()
    ax = fig.add_subplot(111)

    data = np.clip(randn(250, 250), -1, 1)

    cax = ax.imshow(data, interpolation='nearest')
    ax.set_title('Gaussian noise with vertical colorbar')

    # Add colorbar, make sure to specify tick locations to match desired ticklabels
    cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
    cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar

    # Make plot with horizontal colorbar
    fig = plt.figure()
    ax = fig.add_subplot(111)

    data = np.clip(randn(250, 250), -1, 1)

    cax = ax.imshow(data, interpolation='nearest')
    ax.set_title('Gaussian noise with horizontal colorbar')

    cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation='horizontal')
    cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])# horizontal colorbar

    return "Noise with a color bar"
Beispiel #11
0
def run(oiter):
    # ----- Variable for this run -----
    log_alpha_0 = all_log_alpha_0[oiter]

    print "Running job {0} on {1}".format(oiter + 1, socket.gethostname())
    train_images, train_labels, _, _, _ = load_data()
    train_images = train_images[:N_data, :]
    train_labels = train_labels[:N_data, :]
    batch_idxs = BatchList(N_data, batch_size)
    iter_per_epoch = len(batch_idxs)
    N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    def indexed_loss_fun(w, idxs):
        return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])

    V0 = npr.randn(N_weights) * velocity_scale
    losses = []
    d_losses = []
    alpha_0 = np.exp(log_alpha_0)
    for N_iters in all_N_iters:
        alphas = np.full(N_iters, alpha_0)
        betas = np.full(N_iters, beta_0)
        npr.seed(1)
        W0 = npr.randn(N_weights) * np.exp(log_param_scale)
        results = sgd(indexed_loss_fun, batch_idxs, N_iters, W0, V0, alphas, betas)
        losses.append(results['loss_final'])
        d_losses.append(d_log_loss(alpha_0, results['d_alphas']))

    return losses, d_losses
Beispiel #12
0
    def test_concat_series_axis1(self):
        ts = tm.makeTimeSeries()

        pieces = [ts[:-2], ts[2:], ts[2:-2]]

        result = concat(pieces, axis=1)
        expected = DataFrame(pieces).T
        assert_frame_equal(result, expected)

        result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
        expected = DataFrame(pieces, index=['A', 'B', 'C']).T
        assert_frame_equal(result, expected)

        # preserve series names, #2489
        s = Series(randn(5), name='A')
        s2 = Series(randn(5), name='B')

        result = concat([s, s2], axis=1)
        expected = DataFrame({'A': s, 'B': s2})
        assert_frame_equal(result, expected)

        s2.name = None
        result = concat([s, s2], axis=1)
        self.assertTrue(np.array_equal(
            result.columns, Index(['A', 0], dtype='object')))

        # must reindex, #2603
        s = Series(randn(3), index=['c', 'a', 'b'], name='A')
        s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
        result = concat([s, s2], axis=1)
        expected = DataFrame({'A': s, 'B': s2})
        assert_frame_equal(result, expected)
Beispiel #13
0
    def test_concat_series_axis1_same_names_ignore_index(self):
        dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
        s1 = Series(randn(len(dates)), index=dates, name='value')
        s2 = Series(randn(len(dates)), index=dates, name='value')

        result = concat([s1, s2], axis=1, ignore_index=True)
        self.assertTrue(np.array_equal(result.columns, [0, 1]))
Beispiel #14
0
    def setUp(self):
        self.X = X = T.vector()
        self.cost = T.exp(T.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (column vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'right multiply' H by A
        self.correct_hess = np.array(Amat.dot(H)).squeeze()

        self.backend = TheanoBackend()
Beispiel #15
0
    def setUp(self):
        self.X = X = T.tensor3()
        self.cost = T.exp(T.sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3)
        A = self.A = rnd.randn(n1, n2, n3)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = TheanoBackend()
Beispiel #16
0
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
    c1 = 10e8
    c2 = 0.0
    offset = np.ones(2)
    X = randn(2, 2)
    Y = randn(2)
    assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
Beispiel #17
0
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
    c1 = 0.0
    c2 = 10e8
    offset = np.ones(2)
    X = randn(2, 2)
    Y = randn(2)
    assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
 def test_contructor_W_and_biasx_dim_not_matching(self):
     d = 10  # number of visible units
     h = 3  # number of hidden units
     W = randn(h, d)
     biasx = randn(d + 1)
     biash = randn(h)
     self.assertRaises(ValueError, InfluenceCombination, W, biasx, biash)
Beispiel #19
0
def test_distribution():
  # Instantiate the class:
  sm = SoftMax()

  numClasses = 2 # k
  inputSize = 3 # dimensionality
  numCases = 5 # n
  inputData = randn(inputSize,numCases)
  #inputData = ones((inputSize, numCases))
  labels = randint(numClasses,size=(numCases,1))
  #labels = vint(ones((numCases,1)))
  labels[0] = 0

  ## Randomly initialize theta
  theta = 0.005 * randn(numClasses,inputSize)

  # Test the distribution:
  # print sm.softmaxDist(theta, inputData)

  # Another way of doing this:
  y_str_lst=['0','1']
  theta = np.reshape(theta,(-1,1))
  qx = np.reshape(inputData,(-1,1))
  # print theta.T
  # print qx
  print np.dot(theta.T, qx)
 def test_contructor_wrong_biasx_type_none(self):
     d = 10  # number of visible units
     h = 3  # number of hidden units
     W = randn(h, d)
     biasx = None
     biash = randn(h)
     self.assertRaises(TypeError, InfluenceCombination, W, biasx, biash)
 def test_contructor_wrong_biasx_dim(self):
     d = 10  # number of visible units
     h = 3  # number of hidden units
     W = randn(h, d)
     biasx = randn(d, 1)
     biash = randn(h)
     self.assertRaises(ValueError, InfluenceCombination, W, biasx, biash)
Beispiel #22
0
    def __init__(self, x_len, h_lens, w_scale, peek=True):
        self.x_len = x_len
        self.h_num = len(h_lens)
        self.peek = peek

        # set up perceptron layers

        # hyperparams
        self.clip_mag = 5
        self.rollback = 100
        self.freq = 100
        self.step_size = 1e-1
        self.padd = self.rollback

        # sample params
        self.sample_len = 1000
        self.text = False

        self.h= {}
        self.h[0] = Perceptron(x_len, x_len, h_lens[0], w_scale, False)
        for i in xrange(1, self.h_num):
            if peek:
                self.h[i] = Perceptron(x_len, h_lens[i-1], h_lens[i], w_scale, True)
            else:
                self.h[i] = Perceptron(x_len, h_lens[i-1], h_lens[i], w_scale, False)

        self.wi = w_scale * nr.randn(x_len, h_lens[-1])
        self.wb = w_scale * nr.randn(x_len, 1)
        self.y = np.zeros((x_len, 1))
        self.p = np.zeros((x_len, 1))

        self.grad_reset()
        self.mem_reset()
Beispiel #23
0
def test_sgd():
    N_weights = 5
    W0 = 0.1 * npr.randn(N_weights)
    V0 = 0.1 * npr.randn(N_weights)
    N_data = 12
    batch_size = 4
    num_epochs = 3
    batch_idxs = BatchList(N_data, batch_size)
    N_iter = num_epochs * len(batch_idxs)
    alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs)
    betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs)

    A = npr.randn(N_data, N_weights)

    def loss_fun(W, idxs):
        sub_A = A[idxs, :]
        return np.dot(np.dot(W, np.dot(sub_A.T, sub_A)), W)

    result = sgd(loss_fun, batch_idxs, N_iter, W0, V0, alphas, betas)
    d_x = result['d_x']
    d_v = result['d_v']
    d_alphas = result['d_alphas']
    d_betas = result['d_betas']

    def full_loss(W0, V0, alphas, betas):
        result = sgd(loss_fun, batch_idxs, N_iter, W0, V0, alphas, betas)
        x_final = result['x_final']
        return loss_fun(x_final, batch_idxs.all_idxs)

    d_an = (d_x, d_v, d_alphas, d_betas)
    d_num = nd(full_loss, W0, V0, alphas, betas)
    for i, (an, num) in enumerate(zip(d_an, d_num)):
        assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
            "Type {0}, diffs are: {1}".format(i, an - num)
Beispiel #24
0
def test_sgd_parser():
    N_weights = 6
    W0 = 0.1 * npr.randn(N_weights)
    N_data = 12
    batch_size = 4
    num_epochs = 4
    batch_idxs = BatchList(N_data, batch_size)

    parser = VectorParser()
    parser.add_shape('first',  [2,])
    parser.add_shape('second', [1,])
    parser.add_shape('third',  [3,])
    N_weight_types = 3

    alphas = 0.1 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    betas = 0.5 + 0.2 * npr.rand(len(batch_idxs) * num_epochs, N_weight_types)
    meta = 0.1 * npr.randn(N_weights*2)

    A = npr.randn(N_data, N_weights)
    def loss_fun(W, meta, i=None):
        idxs = batch_idxs.all_idxs if i is None else batch_idxs[i % len(batch_idxs)]
        sub_A = A[idxs, :]
        return np.dot(np.dot(W + meta[:N_weights] + meta[N_weights:], np.dot(sub_A.T, sub_A)), W)

    def full_loss(params):
        (W0, alphas, betas, meta) = params
        result = sgd_parsed(grad(loss_fun), kylist(W0, alphas, betas, meta), parser)
        return loss_fun(result, meta)

    d_num = nd(full_loss, (W0, alphas, betas, meta))
    d_an_fun = grad(full_loss)
    d_an = d_an_fun([W0, alphas, betas, meta])
    for i, (an, num) in enumerate(zip(d_an, d_num[0])):
        assert np.allclose(an, num, rtol=1e-3, atol=1e-4), \
            "Type {0}, diffs are: {1}".format(i, an - num)
    def setUpClass(self):
        task_map = {
            '12345p': {
                'modeltype': 'Python',
                'classname': 'CustomModel',
                'modelsource': '''
import numpy as np
class CustomModel(object):
    def fit(self, X, Y):
        return self
    def predict(self, X):
        return np.ones(len(X))
                '''
            }
        }

        self.python_vertex = { 'task_list': ['USERTASK id=12345p'], 'task_map': task_map, 'id':'a11b31','stored_files':{} }
        self.p_xdata = pandas.DataFrame({'x1':randn(350), 'x2':randn(350)})
        self.p_ydata = pandas.DataFrame({'y':randn(350)+2000})

        self.Z = Partition(350, total_size=350)
        self.Z.set(max_folds=0, max_reps=5)

        self.data = OutputData('_fit_and_act',
            {'X':self.p_xdata,'Y':self.p_ydata,'Z':self.Z, 'method':'predict'},
            {'vertex_index':1,'pid':'123','qid':'456'})

        self.input_dir = '/tmp/input'
        self.output_dir = '/tmp/output'
def test_predict(eng):
	X = randn(10, 2)
	y = fromarray(randn(10, 4).T, engine=eng)

	truth = asarray(predict_models(LR, X, y))
	predictions = LinearRegression().fit(X, y).predict(X).toarray()
	assert allclose(truth, predictions)
Beispiel #27
0
def run():
    train_images, train_labels, _, _, _ = load_data()
    train_images = train_images[:N_data, :]
    train_labels = train_labels[:N_data, :]
    batch_idxs = BatchList(N_data, batch_size)
    iter_per_epoch = len(batch_idxs)
    N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    def indexed_loss_fun(w, idxs):
        return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])

    log_alphas = np.full(N_iters, log_alpha_0)
    betas      = np.full(N_iters, beta_0)
    npr.seed(1)
    V0 = npr.randn(N_weights) * velocity_scale
    W0 = npr.randn(N_weights) * np.exp(log_param_scale)
    output = []
    for i in range(N_meta_iter):
        print "Meta iteration {0}".format(i)
        results = sgd(indexed_loss_fun, batch_idxs, N_iters,
                      W0, V0, np.exp(log_alphas), betas, record_learning_curve=True)
        learning_curve = results['learning_curve']
        d_log_alphas = np.exp(log_alphas) * results['d_alphas']
        output.append((learning_curve, log_alphas, d_log_alphas))
        log_alphas = log_alphas - meta_alpha * d_log_alphas

    return output
Beispiel #28
0
def run():
    train_images, train_labels, _, _, _ = load_data(normalize=True)
    train_images = train_images[:N_real_data, :]
    train_labels = train_labels[:N_real_data, :]
    batch_idxs = BatchList(N_fake_data, batch_size)
    parser, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg, return_parser=True)
    N_weights = parser.N

    fake_data = npr.randn(*(train_images[:N_fake_data, :].shape)) * init_fake_data_scale
    fake_labels = one_hot(np.array(range(N_fake_data)) % N_classes, N_classes)  # One of each.

    def indexed_loss_fun(x, meta_params, idxs):   # To be optimized by SGD.
        return loss_fun(x, X=meta_params[idxs], T=fake_labels[idxs])
    def meta_loss_fun(x):                         # To be optimized in the outer loop.
        return loss_fun(x, X=train_images, T=train_labels)
    log_alphas = np.full(N_iters, log_alpha_0)
    betas      = np.full(N_iters, beta_0)
    npr.seed(0)
    v0 = npr.randn(N_weights) * velocity_scale
    x0 = npr.randn(N_weights) * np.exp(log_param_scale)

    output = []
    for i in range(N_meta_iter):
        results = sgd2(indexed_loss_fun, meta_loss_fun, batch_idxs, N_iters,
                       x0, v0, np.exp(log_alphas), betas, fake_data)
        learning_curve = results['learning_curve']
        validation_loss = results['M_final']
        output.append((learning_curve, validation_loss, fake_data))
        fake_data -= results['dMd_meta'] * data_stepsize   # Update data with one gradient step.
        print "Meta iteration {0} Valiation loss {1}".format(i, validation_loss)
    return output
def test_score(eng):
	X = randn(10, 2)
	y = fromarray(randn(10, 4).T, engine=eng)

	truth = asarray(score_models(LR, X, y))
	scores = LinearRegression().fit(X, y).score(X, y).toarray()
	assert allclose(truth, scores)
Beispiel #30
0
    def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
                                 has_time_rule=True, preserve_nan=True):
        result = func(self.arr)

        assert_almost_equal(result[10],
                            static_comp(self.arr[:11]))

        if preserve_nan:
            assert(np.isnan(result[self._nan_locs]).all())

        arr = randn(50)

        if has_min_periods:
            result = func(arr, min_periods=30)
            assert(np.isnan(result[:29]).all())
            assert_almost_equal(result[-1], static_comp(arr[:50]))

            # min_periods is working correctly
            result = func(arr, min_periods=15)
            self.assertTrue(np.isnan(result[13]))
            self.assertFalse(np.isnan(result[14]))

            arr2 = randn(20)
            result = func(arr2, min_periods=5)
            self.assertTrue(isnull(result[3]))
            self.assertTrue(notnull(result[4]))

            # min_periods=0
            result0 = func(arr, min_periods=0)
            result1 = func(arr, min_periods=1)
            assert_almost_equal(result0, result1)
        else:
            result = func(arr)
            assert_almost_equal(result[-1], static_comp(arr[:50]))
Beispiel #31
0
print('使用切片访问和操作数组')
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(arr[1:6])  # 打印元素arr[1]到arr[5]
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(arr[:2])  # 打印第0、1行
print(arr[:2, 1:])  # 打印第0、1行的第1、2列
print(arr[:, :1])  # 打印第一列的所有元素
arr[:2, 1:] = 0  # 第0、1行,第1、2列的元素设置为0
print(arr)

# boolean_indexing
import numpy.random as np_random
print('使用布尔数组作为索引')
name_arr = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
rnd_arr = np_random.randn(7, 4)  # 随机7*4数组
print(rnd_arr)
print(name_arr == 'Bob')  # 返回布尔数组,元素等于'Bob'为True,否则False。
print(rnd_arr[name_arr == 'Bob'])  # 利用布尔数组选择行,把布尔数组中true的那几行打印出来
print(rnd_arr[name_arr == 'Bob', :2])  # 增加限制打印列的范围
print(rnd_arr[~(name_arr == 'Bob')])  # 对布尔数组的内容取反
mask_arr = (name_arr == 'Bob') | (name_arr == 'Will')  # 逻辑运算混合结果
print(rnd_arr[mask_arr])
rnd_arr[name_arr != 'Joe'] = 7  # 先布尔数组选择行,然后把每行的元素设置为7。
print(rnd_arr)

#fancy_indexing
print('Fancy Indexing: 使用整数数组作为索引')
arr = np.empty((8, 4))
for i in range(8):
    arr[i] = i  # 每行的每个数都等于数所在的行数
# shapiro wilk normality test

from numpy.random import seed
from numpy.random import randn
from scipy.stats import shapiro

seed(1)

data = 5 * randn(100) + 50

stat, p_value = shapiro(data)

print("statistic=%.3f, p_value=%.3f" % (stat, p_value))

alpha = 0.05

if p_value <= alpha:
	print('sample does not seem like Gaussian, Reject H0 (H1)')
else:
	print('sample seems like Gaussian, Fail to Reject H0 (H0)')
Beispiel #33
0
def generate_latent_points(latent_dim, n_samples):
    x_input = randn(latent_dim * n_samples)
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input
def make_simple_trace(bbfile='grism.fits',outname='grismtrace',ybox=None,xbox=None,noisemaxfact=0.05,alph=1.0,Q=1.0,rotate=False,resize=None):

    go=pyfits.open(bbfile)
    
    
    redshift=go['BROADBAND'].header['REDSHIFT']
    wfc3_pix_as=0.13
    g141_nm_per_pix=4.65
    
    min_lam=1.075
    max_lam=1.700
    
    hdu=go['CAMERA0-BROADBAND-NONSCATTER']
    cube=hdu.data #L_lambda units! 
    #cube=np.flipud(cube) ; print(cube.shape)
    
    fil=go['FILTERS']
    lamb=fil.data['lambda_eff']*1.0e6
    flux=fil.data['L_lambda_eff_nonscatter0']
    
    g141_i = (lamb >= min_lam) & (lamb <= max_lam)
    
    arcsec_per_kpc= gsu.illcos.arcsec_per_kpc_proper(redshift)
    kpc_per_arcsec=1.0/arcsec_per_kpc.value
    
    im_kpc=hdu.header['CD1_1']
    print('pix size kpc: ', im_kpc)
    
    wfc3_kpc_per_pix=wfc3_pix_as*kpc_per_arcsec
    total_width_pix=(1.0e3)*(max_lam-min_lam)/g141_nm_per_pix
    total_width_kpc=total_width_pix*wfc3_kpc_per_pix
    
    total_width_impix=int(total_width_kpc/im_kpc)
    
    delta_lam=(max_lam-min_lam)/total_width_impix  #microns/pix
    
    psf_arcsec=0.18
    psf_kpc=psf_arcsec*kpc_per_arcsec
    psf_impix=psf_kpc/im_kpc
    
    
    imw_cross=200
    imw_disp=total_width_impix+imw_cross
    Np=cube.shape[-1]
    mid = np.int64(Np/2)
    delt=np.int64(imw_cross/2)
    output_image=np.zeros_like( np.ndarray(shape=(imw_disp,imw_cross),dtype='float' ))
    #r = r[mid-delt:mid+delt,mid-delt:mid+delt]
    output_image.shape
    small_cube=cube[g141_i,mid-delt:mid+delt,mid-delt:mid+delt]
    
    for i,l in enumerate(lamb[g141_i]):
        di=int( (l-min_lam)/delta_lam )
        this_cube=small_cube[i,:,:]*l**2  #convert to Janskies-like
        if rotate is True:
            this_cube = np.rot90(this_cube)

        #if i==17:
        #    this_cube[30,30] = 1.0e3
        #print(i,l/(1.0+redshift),int(di),np.sum(this_cube),this_cube.shape,output_image.shape,output_image[di:di+imw_cross,:].shape)
        output_image[di:di+imw_cross,:]=output_image[di:di+imw_cross,:]+this_cube
        
        
    output_image=scipy.ndimage.gaussian_filter(output_image,sigma=[4,psf_impix/2.355])
    
    new_thing = np.transpose(np.flipud(output_image))
    if resize is not None:
        new_thing = congrid.congrid(new_thing, resize)
    
    nr = noisemaxfact*np.max(new_thing)*random.randn(new_thing.shape[0],new_thing.shape[1])
    
    #thing=make_color_image.make_interactive(new_thing+nr,new_thing+nr,new_thing+nr,alph=alph,Q=Q)
    #thing=1.0-np.fliplr(np.transpose(thing,axes=[1,0,2]))
    thing=np.fliplr(new_thing+nr)

    f=plt.figure(figsize=(25,6))
    f.subplots_adjust(wspace=0.0,hspace=0.0,top=0.99,right=0.99,left=0,bottom=0)
    axi=f.add_subplot(1,1,1)
    axi.imshow( (thing),aspect='auto',origin='left',interpolation='nearest',cmap='Greys_r')
    f.savefig(outname+'.png',dpi=500)
    plt.close(f)

    #[ybox[0]:ybox[1],xbox[0]:xbox[1]]
    #[50:125,120:820,:]

    new_hdu=pyfits.PrimaryHDU(thing)
    new_list=pyfits.HDUList([new_hdu])
    new_list.writeto(outname+'.fits',clobber=True)


    return thing, new_thing
Beispiel #35
0
 def evolve_state(self):
     x = self.state
     dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
     self.state = x + dx
     return self.state
Beispiel #36
0
 def _rand_ambient(self):
     return fr_ambient(
         randn(self.m, self.p),
         randn(self.n, self.p),
         randn(self.p, self.p))
Beispiel #37
0
def test_representations_api():
    from ..representation import SphericalRepresentation, \
        UnitSphericalRepresentation, PhysicsSphericalRepresentation, \
        CartesianRepresentation
    from ... coordinates import Angle, Longitude, Latitude, Distance

    #<-----------------Classes for representation of coordinate data--------------->
    # These classes inherit from a common base class and internally contain Quantity
    # objects, which are arrays (although they may act as scalars, like numpy's
    # length-0  "arrays")

    # They can be initialized with a variety of ways that make intuitive sense.
    # Distance is optional.
    UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
    UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg)
    SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)

    # In the initial implementation, the lat/lon/distance arguments to the
    # initializer must be in order. A *possible* future change will be to allow
    # smarter guessing of the order.  E.g. `Latitude` and `Longitude` objects can be
    # given in any order.
    UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
    SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc))

    # Arrays of any of the inputs are fine
    UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg)

    # Default is to copy arrays, but optionally, it can be a reference
    UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False)

    # strings are parsed by `Latitude` and `Longitude` constructors, so no need to
    # implement parsing in the Representation classes
    UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad'))

    # Or, you can give `Quantity`s with keywords, and they will be internally
    # converted to Angle/Distance
    c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)

    # Can also give another representation object with the `reprobj` keyword.
    c2 = SphericalRepresentation.from_representation(c1)

    #  distance, lat, and lon typically will just match in shape
    SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc)
    # if the inputs are not the same, if possible they will be broadcast following
    # numpy's standard broadcasting rules.
    c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc)
    assert len(c2.distance) == 2
    #when they can't be broadcast, it is a ValueError (same as Numpy)
    with raises(ValueError):
        c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg)

    # It's also possible to pass in scalar quantity lists with mixed units. These
    # are converted to array quantities following the same rule as `Quantity`: all
    # elements are converted to match the first element's units.
    c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]),
                                     lat=Angle([5*u.deg, (6*np.pi/180)*u.rad]))
    assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle
    npt.assert_almost_equal(c2.lon[1].value, 9)

    # The Quantity initializer itself can also be used to force the unit even if the
    # first element doesn't have the right unit
    lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle)
    lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg)
    c2 = UnitSphericalRepresentation(lon, lat)

    # regardless of how input, the `lat` and `lon` come out as angle/distance
    assert isinstance(c1.lat, Angle)
    assert isinstance(c1.lat, Latitude)  # `Latitude` is an `Angle` subclass
    assert isinstance(c1.distance, Distance)

    # but they are read-only, as representations are immutable once created
    with raises(AttributeError):
        c1.lat = Latitude(5, u.deg)
    # Note that it is still possible to modify the array in-place, but this is not
    # sanctioned by the API, as this would prevent things like caching.
    c2.lat[:] = [0] * u.deg  # possible, but NOT SUPPORTED

    # To address the fact that there are various other conventions for how spherical
    # coordinates are defined, other conventions can be included as new classes.
    # Later there may be other conventions that we implement - for now just the
    # physics convention, as it is one of the most common cases.
    c3 = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc)

    # first dimension must be length-3 if a lone `Quantity` is passed in.
    c1 = CartesianRepresentation(randn(3, 100) * u.kpc)
    assert c1.xyz.shape[0] == 3
    assert c1.xyz.unit == u.kpc
    assert c1.x.shape[0] == 100
    assert c1.y.shape[0] == 100
    assert c1.z.shape[0] == 100
    # can also give each as separate keywords
    CartesianRepresentation(x=randn(100)*u.kpc, y=randn(100)*u.kpc, z=randn(100)*u.kpc)
    # if the units don't match but are all distances, they will automatically be
    # converted to match `x`
    xarr, yarr, zarr = randn(3, 100)
    c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc)
    c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc)
    assert c1.xyz.unit ==  c2.xyz.unit == u.kpc
    npt.assert_allclose((c1.z / 1000) - c2.z, 0, atol=1e-10)

    # representations convert into other representations via  `represent_as`
    srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc)
    crep = srep.represent_as(CartesianRepresentation)
    npt.assert_allclose(crep.x.value, 0, atol=1e-10)
    npt.assert_allclose(crep.y.value, 1, atol=1e-10)
    npt.assert_allclose(crep.z.value, 0, atol=1e-10)
Beispiel #38
0
 def test_new_empty_index(self):
     df1 = DataFrame(randn(0, 3))
     df2 = DataFrame(randn(0, 3))
     df1.index.name = 'foo'
     self.assertIsNone(df2.index.name)
Beispiel #39
0
    def simulate(self, deltaT=None):
        #if deltaT is provided then in blocks of deltaT we compute the counterfactual trace... the evolution without spiking.
        v = np.zeros((self.params.n, self.T))
        if deltaT is not None:
            u = np.zeros((self.params.n, self.T))
        else:
            u = None
        h = np.zeros((self.params.n, self.T))
        if not self.keepstate:
            self.vt = np.zeros(self.params.n)
            self.ut = np.zeros(self.params.n)
        self.sh = np.zeros((self.params.n, self.T))
        vt = self.vt
        ut = self.ut
        sh = self.sh
        r = np.zeros(self.params.n)
        #Generate new noise with each sim
        if self.t_total is None:
            xi = self.params.sigma * rand.randn(
                self.params.n1 + 1, self.T) / np.sqrt(self.params.tau)
            xi[0, :] = xi[0, :] * np.sqrt(self.params.c)
            xi[1:, :] = xi[1:, :] * np.sqrt(1 - self.params.c)
            xi_l2 = self.params.sigma * rand.randn(
                self.params.n2, self.T) / np.sqrt(self.params.tau)
        else:
            #Select noise from precomputed noise
            xi = self.xi[:,
                         (self.T * (self.count)):(self.T * (self.count + 1))]
            xi_l2 = self.xi_l2[:, (self.T * (self.count)):(self.T *
                                                           (self.count + 1))]

        self.count += 1
        #Simulate t seconds
        for i in range(self.T):
            #ut is not reset by spiking. ut is set to vt at the start of each block of deltaT
            if deltaT is not None:
                if i % deltaT == 0:
                    ut = vt
            dv = -vt / self.params.tau + np.dot(self.U, sh[:, i])
            dv[0:self.params.n1] += np.multiply(
                self.W1, (self.x + xi[0, i] + xi[1:, i]))
            dv[self.params.n1:] += np.multiply(self.W2, (xi_l2[:, i]))
            vt = vt + self.params.dt * dv
            ut = ut + self.params.dt * dv
            #Find neurons that spike
            s = vt > self.params.mu
            #Update sh based on spiking.....
            for s_idx in np.nonzero(s)[0]:
                convolve_online_v2(sh, s_idx, i, self.params.kernel, 0)
            #Save the voltages and spikes
            h[:, i] = s.astype(int)
            v[:, i] = vt
            if deltaT is not None:
                u[:, i] = ut
            #Make spiking neurons refractory
            r[s] = self.Tr
            #Set the refractory neurons to v_reset
            vt[r > 0] = self.params.reset
            vt[vt < self.params.reset] = self.params.reset
            ut[ut < self.params.reset] = self.params.reset
            #Decrement the refractory counters
            r[r > 0] -= 1

        self.vt = vt
        #self.sh = sh
        return (v, h, u, sh)
Beispiel #40
0
# Build 3D dataset:

# In[4]:


import numpy.random as rnd

rnd.seed(4)
m = 200
w1, w2 = 0.1, 0.3
noise = 0.1

angles = rnd.rand(m) * 3 * np.pi / 2 - 0.5
data = np.empty((m, 3))
data[:, 0] = np.cos(angles) + np.sin(angles)/2 + noise * rnd.randn(m) / 2
data[:, 1] = np.sin(angles) * 0.7 + noise * rnd.randn(m) / 2
data[:, 2] = data[:, 0] * w1 + data[:, 1] * w2 + noise * rnd.randn(m)


# Normalize the data:

# In[5]:


from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(data[:100])
X_test = scaler.transform(data[100:])

Beispiel #41
0
    def simulate_allbutonecorrelated(self, deltaT=None):

        #if deltaT is provided then in blocks of deltaT we compute the counterfactual trace... the evolution without spiking.
        v = np.zeros((self.params.n, self.T))

        if deltaT is not None:
            u = np.zeros((self.params.n, self.T))
        else:
            u = None

        h = np.zeros((self.params.n, self.T))

        if not self.keepstate:
            self.vt = np.zeros(self.params.n)
            self.ut = np.zeros(self.params.n)

        vt = self.vt
        ut = self.ut

        r = np.zeros(self.params.n)

        #Generate new noise with each sim
        if self.t_total is None:
            xi = self.params.sigma * rand.randn(
                self.params.n + 1, self.T) / np.sqrt(self.params.tau)
            xi[0, :] = xi[0, :] * np.sqrt(self.params.c)
            xi[1:, :] = xi[1:, :] * np.sqrt(1 - self.params.c)
            phi = self.params.sigma * rand.randn(self.T) / np.sqrt(
                self.params.tau)
        else:
            #Select noise from precomputed noise
            xi = self.xi[:,
                         (self.T * (self.count)):(self.T * (self.count + 1))]
            phi = self.params.sigma * rand.randn(self.T) / np.sqrt(
                self.params.tau)
        #print xi.shape
        #print self.T

        self.count += 1

        #Simulate t seconds
        for i in range(self.T):

            #ut is not reset by spiking. ut is set to vt at the start of each block of deltaT
            if deltaT is not None:
                if i % deltaT == 0:
                    ut = vt

            #print self.W.shape
            #print xi.shape

            noise = xi[0, i] + xi[1:, i]
            noise[0] = phi[i]
            inp = self.x + noise
            dv = -vt / self.params.tau + np.multiply(self.W, inp)
            #print vt.shape
            vt = vt + self.params.dt * dv
            ut = ut + self.params.dt * dv
            #Find neurons that spike
            s = vt > self.params.mu
            #print vt.shape
            #Save the voltages and spikes
            h[:, i] = s.astype(int)
            v[:, i] = vt

            if deltaT is not None:
                u[:, i] = ut

            #Make spiking neurons refractory
            r[s] = self.Tr
            #Set the refractory neurons to v_reset
            vt[r > 0] = self.params.reset
            vt[vt < self.params.reset] = self.params.reset
            ut[ut < self.params.reset] = self.params.reset
            #Decrement the refractory counters
            r[r > 0] -= 1

        #Cost function per time point
        C = (self.V[0] * h[0, :] + self.V[1] * h[1, :] - self.x**2)**2

        #True causal effect for each unit
        beta1 = self.V[0]**2 + 2 * self.V[0] * self.V[1] * np.mean(
            h[1, :]) - 2 * self.V[0] * self.x**2
        beta2 = self.V[1]**2 + 2 * self.V[0] * self.V[1] * np.mean(
            h[0, :]) - 2 * self.V[1] * self.x**2
        betas = [beta1, beta2]

        self.vt = vt

        return (v, h, C, betas, u)
Beispiel #42
0
    def test_hist_df_legacy(self):
        from matplotlib.patches import Rectangle
        with tm.assert_produces_warning(UserWarning):
            _check_plot_works(self.hist_df.hist)

        # make sure layout is handled
        df = DataFrame(randn(100, 3))
        with tm.assert_produces_warning(UserWarning):
            axes = _check_plot_works(df.hist, grid=False)
        self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
        assert not axes[1, 1].get_visible()

        df = DataFrame(randn(100, 1))
        _check_plot_works(df.hist)

        # make sure layout is handled
        df = DataFrame(randn(100, 6))
        with tm.assert_produces_warning(UserWarning):
            axes = _check_plot_works(df.hist, layout=(4, 2))
        self._check_axes_shape(axes, axes_num=6, layout=(4, 2))

        # make sure sharex, sharey is handled
        with tm.assert_produces_warning(UserWarning):
            _check_plot_works(df.hist, sharex=True, sharey=True)

        # handle figsize arg
        with tm.assert_produces_warning(UserWarning):
            _check_plot_works(df.hist, figsize=(8, 10))

        # check bins argument
        with tm.assert_produces_warning(UserWarning):
            _check_plot_works(df.hist, bins=5)

        # make sure xlabelsize and xrot are handled
        ser = df[0]
        xf, yf = 20, 18
        xrot, yrot = 30, 40
        axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
        self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
                                ylabelsize=yf, yrot=yrot)

        xf, yf = 20, 18
        xrot, yrot = 30, 40
        axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
        self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
                                ylabelsize=yf, yrot=yrot)

        tm.close()
        # make sure kwargs to hist are handled
        ax = ser.hist(normed=True, cumulative=True, bins=4)
        # height of last bin (index 5) must be 1.0
        rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
        self.assertAlmostEqual(rects[-1].get_height(), 1.0)

        tm.close()
        ax = ser.hist(log=True)
        # scale of y must be 'log'
        self._check_ax_scales(ax, yaxis='log')

        tm.close()

        # propagate attr exception from matplotlib.Axes.hist
        with pytest.raises(AttributeError):
            ser.hist(foo='bar')
def generate_latent_points(latent_dim, n_samples):
	# generate points in the latent space
	x_input = randn(latent_dim * n_samples)
	# reshape into a batch of inputs for the network
	x_input = x_input.reshape(n_samples, latent_dim)
	return x_input
Beispiel #44
0
    def setup(self, params=None, t=None):
        #print("I am setting up LSM")
        #print(params)
        if t is not None:
            self.t = t
        if params is not None:
            self.params = params

        mu_w = 50
        sigma_w = 30

        mu_u = 5
        sigma_u = 2

        #Fix the random seed
        #rand.seed(42)

        q = self.params.q
        p = self.params.p

        #Initialize voltage and spike train variables
        self.T = np.ceil(self.params.t / self.params.dt).astype(int)
        self.Tr = np.ceil(self.params.tr / self.params.dt).astype(int)
        self.times = np.linspace(0, self.params.t, self.T)

        #Generate sparse, random connectivity matrices
        self.W = np.zeros((q, q))
        self.U = np.zeros((q, p))

        #Sparse connectivity, only 10% neurons are connected to one another
        #q = 10

        m = np.ceil(0.1 * q).astype(int)
        #80% excitatory, 20% inhibitory
        q_e = np.ceil(0.8 * q).astype(int)
        q_i = q - q_e

        #Choose excitatory neurons
        ex = rand.choice(q, q_e, replace=False)
        #W = np.zeros((q,q))

        for idx in range(q):
            conn = rand.choice(q, m, replace=False)
            weights = np.maximum(rand.randn(m) * sigma_w + mu_w, 0)
            if idx not in ex:
                weights *= -1
            self.W[conn, idx] = weights

        #Scale by spectral radius...
        #Compute the spectral radius of the weights and rescale
        radius = np.max(np.abs(np.linalg.eigvals(self.W)))
        self.W = self.W * (self.params.spectral_radius / radius)

        ex = rand.choice(q, np.ceil(0.5 * q).astype(int), replace=False)
        for idx in range(q):
            weights = np.maximum(rand.randn(p) * sigma_u + mu_u, 0)
            if idx not in ex:
                weights *= -1
            self.U[idx, :] = weights

        self.I0 = np.zeros((q, 1))
        self.I0[:, 0] = np.maximum(rand.randn(q) * sigma_u + mu_u, 0)
        self.st = np.zeros((self.params.q, 1))
Beispiel #45
0
def get_rise_times(M0,
                   slip,
                   fault_array,
                   rise_time_depths,
                   stoc_rake,
                   rise_time_std=0.1):
    '''
    Calculate individual subfault rise times
    '''

    from numpy import diff, ones, where, exp
    from numpy.random import randn
    from numpy import arctan2, rad2deg, zeros

    #Moment to dyne-cm (Because old seismologists...)
    M0 = M0 * 1e7

    #Determine average rise time based on total moment of the event (Graves,Pitarka, 2010, eq. 8)
    #tau_average=1.6*1e-9*M0**(1./3) #This is what Graves and Pitarka use in GP 2010
    tau_average = 1.45 * 1e-9 * M0**(1. / 3)  #This is GP2015
    #tau_average=2.0*1e-9*M0**(1./3)  #This is the original from Sommerville 1999 SRL, page 74

    #Determine slope and intercept of k-scaling line
    slope = 1. / (rise_time_depths[0] - rise_time_depths[1])
    intercept = 1 - slope * rise_time_depths[1]

    #Get rakes
    rake = stoc_rake.copy()
    i = where(rake < 0)[0]
    rake[i] = 360 + rake[i]
    rake = rake.mean()

    #Apply GP 2015 FR value
    if (rake > 0 and rake < 180):
        FR = 1 - (rake - 90) / 90.
    else:
        FR = 0

    #GP 2015 FD value
    dip = fault_array[:, 5]
    dip = dip.mean()
    if dip > 45:
        FD = 1 - (dip - 45) / 45.
    else:
        FD = 1

    #GP 2015 alpha_t
    alpha = 1. / (1 + FD * FR * 0.1)

    #rescale average rise time
    tau_average = tau_average * alpha

    #For each depth determine the value of depth scaling (this is for GP2010 eq.7)
    depth_scale = ones(len(fault_array))
    ishallow = where(fault_array[:, 3] <= rise_time_depths[0])[0]
    depth_scale[ishallow] = 2
    itransition = where((fault_array[:, 3] > rise_time_depths[0])
                        & (fault_array[:, 3] < rise_time_depths[1]))[0]
    depth_scale[itransition] = slope * fault_array[itransition, 3] + intercept

    #Now determine the scaling constant k
    k = (len(slip) * tau_average) / (sum(depth_scale * slip**0.5))

    #Stochastic perturbations
    rand_num = randn(len(slip))
    perturbations = exp(rise_time_std * rand_num)

    #And on to the actual subfault rise times
    rise_times = perturbations * depth_scale * k * (slip**0.5)
    #rise_times=depth_scale*k*(slip**0.5)

    return rise_times
Beispiel #46
0
    def simulate_perturbed(self, sigma_perturb):

        v = np.zeros((self.params.n, self.T))
        h = np.zeros((self.params.n, self.T))

        if not self.keepstate:
            self.vt = np.zeros(self.params.n)
            self.ut = np.zeros(self.params.n)

        vt = self.vt

        r = np.zeros(self.params.n)

        if self.T_total is not None:
            #print self.T_total
            #print self.count
            if self.count == self.T_total:
                print('HI')
            self.count = self.count % int(self.T_total / self.T)

        #Generate new noise with each sim
        if self.t_total is None:
            xi = self.params.sigma * rand.randn(
                self.params.n + 1, self.T) / np.sqrt(self.params.tau)
            xi[0, :] = xi[0, :] * np.sqrt(self.params.c)
            xi[1:, :] = xi[1:, :] * np.sqrt(1 - self.params.c)
            xi_perturb = sigma_perturb * rand.randn(
                self.params.n, self.T) / np.sqrt(self.params.tau)
        else:
            #Select noise from precomputed noise
            #The same stim noise for each sim
            xi = self.xi[:, 0:self.T]
            #Or update the noise with each sim
            #xi = self.xi[:,(self.T*(self.count)):(self.T*(self.count+1))]
            xi_perturb = sigma_perturb * self.xi_perturb[:, (
                self.T * (self.count)):(self.T * (self.count + 1))]

        #print xi.shape
        #print self.T

        s_input = self.x + np.vstack((xi[0, :], xi[0, :])) + xi[1:, :]

        self.count = self.count + 1
        #print self.count

        #Simulate t seconds
        #print xi.shape
        #print xi_perturb.shape
        for i in range(self.T):
            dv = -vt / self.params.tau + np.multiply(
                self.W, (self.x + xi[0, i] + xi[1:, i])) + xi_perturb[:, i]
            vt = vt + self.params.dt * dv
            #Find neurons that spike
            s = vt > self.params.mu
            #Save the voltages and spikes
            h[:, i] = s.astype(int)
            v[:, i] = vt
            #Make spiking neurons refractory
            r[s] = self.Tr
            #Set the refractory neurons to v_reset
            vt[r > 0] = self.params.reset
            vt[vt < self.params.reset] = self.params.reset
            #Decrement the refractory counters
            r[r > 0] -= 1

        #Cost function per time point
        C = (self.V[0] * h[0, :] + self.V[1] * h[1, :] - self.x**2)**2
        #True causal effect for each unit
        beta1 = self.V[0]**2 + 2 * self.V[0] * self.V[1] * np.mean(
            h[1, :]) - 2 * self.V[0] * self.x**2
        beta2 = self.V[1]**2 + 2 * self.V[0] * self.V[1] * np.mean(
            h[0, :]) - 2 * self.V[1] * self.x**2
        betas = [beta1, beta2]
        self.vt = vt

        #Keep
        eligibility = np.sum(np.multiply(s_input, xi_perturb), 1)

        return (v, h, C, betas, eligibility)
Beispiel #47
0
    def suggest(self):
        sys.stderr.write('Getting suggestion...\n')
        assert not np.any(self.grid < 0)
        assert not np.any(self.grid > 1)

        if not self.isFit:
            raise Exception("You must call fit() before calling suggest()")

        if self.objective['inputs'].shape[0] < DEFAULT_NUMDESIGN:
            suggestion = self.task_group.from_unit(self.grid[self.design_index])
            sys.stderr.write("\nSuggestion:     ")
            self.task_group.paramify_and_print(suggestion.flatten(), left_indent=16)
            return suggestion

        # print 'inputs: %s' % self.objective['inputs']
        # if self.objective.has_key('pending'):
            # print 'pending: %s' % self.objective['pending']

        # Compute the current best
        current_best, current_best_location = self.best()

        # Add some extra candidates around the best so far (a useful hack)
        spray_points = npr.randn(self.num_spray, self.num_dims)*self.spray_std + current_best_location
        spray_points = np.minimum(np.maximum(spray_points,0.0),1.0)
        
        # Compute EI on the grid
        grid_pred = np.vstack((self.grid, spray_points))
        grid_ei = self.acquisition_function_over_hypers(grid_pred, current_best, compute_grad=False)

        # Find the points on the grid with highest EI
        best_grid_inds = np.argsort(grid_ei)[-self.grid_subset:]
        best_grid_pred = grid_pred[best_grid_inds]

        # The index and value of the top grid point
        best_grid_ind = np.argmax(grid_ei)
        best_grid_ei  = grid_ei[best_grid_ind]
        
        if VERBOSE:
            print('Best EI before optimization: %f' % best_grid_ei)

        if self.check_grad:
            check_grad(lambda x: self.acq_optimize_wrapper(x, current_best, True), 
                best_grid_pred[0], verbose=True)

        # Optimize the top points from the grid to get better points
        cand = []
        b = [(0,1)]*best_grid_pred.shape[1]# optimization bounds

        if self.parallel_opt:
            # Optimize each point in parallel
            pool = multiprocessing.Pool(self.grid_subset)
            results = [pool.apply_async(self.optimize_pt,args=(
                    c,b,current_best,True)) for c in best_grid_pred]

            for res in results:
                cand.append(res.get(1e8))
            pool.close()
        else: 
            # Optimize in series
            for c in best_grid_pred:
                cand.append(self.optimize_pt(c,b,current_best,compute_grad=True))
        # Cand now stores the optimized points

        # Compute one more time (re-computing is unnecessary, oh well... TODO)
        cand = np.vstack(cand)
        opt_ei = self.acquisition_function_over_hypers(cand, current_best, compute_grad=False)

        # The index and value of the top optimized point
        best_opt_ind  = np.argmax(opt_ei)
        best_opt_ei   = opt_ei[best_opt_ind]

        # Optimization should always be better unless the optimization
        # breaks in some way.
        if VERBOSE:
            print('Best EI after  optimization: %f' % best_opt_ei)
            print('Suggested input %s' % cand[best_opt_ind])

        if best_opt_ei >= best_grid_ei:
            suggestion = cand[best_opt_ind]
        else:
            suggestion = grid_pred[best_grid_ind]

        # Make sure BFGS didn't do anything weird with the boudns
        suggestion[suggestion > 1] = 1.0
        suggestion[suggestion < 0] = 0.0

        suggestion = self.task_group.from_unit(suggestion)

        sys.stderr.write("\nSuggestion:     ")
        self.task_group.paramify_and_print(suggestion.flatten(), left_indent=16)
        return suggestion
Beispiel #48
0
def get_rupture_onset(home,
                      project_name,
                      slip,
                      fault_array,
                      model_name,
                      hypocenter,
                      rise_time_depths,
                      M0,
                      sigma_rise_time=0.2):
    '''
    Using a custom built tvel file ray trace from hypocenter to determine rupture
    onset times
    '''

    from numpy import genfromtxt, zeros, arctan2, sin, r_, where, log10, isnan, argmin, setxor1d, exp
    from numpy.random import rand, randn
    from obspy.geodetics import gps2dist_azimuth

    #Load velocity model
    vel = genfromtxt(home + project_name + '/structure/' + model_name)

    # Convert from thickness to depth to bottom of layer
    depth_to_top = r_[0, vel[:, 0].cumsum()[0:-1]]

    #Get rupture speed shear-wave multipliers
    rupture_multiplier = zeros(len(vel))
    # Shallow
    i = where(depth_to_top <= rise_time_depths[0])[0]
    rupture_multiplier[i] = 0.56
    # Deep
    i = where(depth_to_top >= rise_time_depths[1])[0]
    rupture_multiplier[i] = 0.8
    # Transition
    i = where((depth_to_top < rise_time_depths[1])
              & (depth_to_top > rise_time_depths[0]))[0]
    slope = (0.8 - 0.56) / (rise_time_depths[1] - rise_time_depths[0])
    intercept = 0.8 - slope * rise_time_depths[1]
    rupture_multiplier[i] = slope * depth_to_top[i] + intercept

    #Perturb depths of the hypocenter so that faults at the same depth are not zero onset
    delta = 0.00001
    i_same_as_hypo = where(fault_array[:, 3] == hypocenter[2])[0]
    dist = ((fault_array[:, 1] - hypocenter[0])**2 +
            (fault_array[:, 2] - hypocenter[1])**2)**0.5
    i_hypo = argmin(dist)
    #Get faults at same depth that are NOT the hypo
    i_same_as_hypo = setxor1d(i_same_as_hypo, i_hypo)
    #perturb
    R = rand(1)
    fault_array[i_hypo, 3] = fault_array[i_hypo, 3] - delta * R
    hypocenter[2] = hypocenter[2] - delta * R
    R = rand(len(i_same_as_hypo))
    fault_array[i_same_as_hypo, 3] = fault_array[i_same_as_hypo, 3] + delta * R

    #Loop over all faults
    t_onset = zeros(len(slip))
    #Perturb all subfault depths a tiny amount by some random number so that they NEVER lie on a layer interface
    z_perturb = (rand(len(fault_array)) - 0.5) * 1e-6
    fault_array[:, 3] = fault_array[:, 3] + z_perturb
    for kfault in range(len(slip)):
        D, az, baz = gps2dist_azimuth(hypocenter[1], hypocenter[0],
                                      fault_array[kfault,
                                                  2], fault_array[kfault, 1])
        D = D / 1000
        #Start and stop depths
        if fault_array[kfault, 3] <= hypocenter[2]:
            zshallow = fault_array[kfault, 3]
            zdeep = hypocenter[2]
        else:
            zdeep = fault_array[kfault, 3]
            zshallow = hypocenter[2]
        #Get angle between depths
        theta = arctan2(zdeep - zshallow, D)
        # get hypotenuse distance on all layers
        delta_ray = vel[:, 0] / sin(theta)
        # Calculate distance in each layer
        depth1 = 0
        depth2 = vel[0, 0]
        length_ray = zeros(len(vel))
        for klayer in range(len(vel)):
            if zshallow > depth1 and zdeep < depth2:  #both points in same layer
                length_ray[klayer] = abs(zshallow - zdeep) / sin(theta)
            elif zshallow > depth1 and zshallow < depth2:  #This is the top
                length_ray[klayer] = abs(depth2 - zshallow) / sin(theta)
            elif zdeep > depth1 and zdeep < depth2:  #This is the bottom
                length_ray[klayer] = abs(depth1 - zdeep) / sin(theta)
            elif depth1 > zshallow and depth2 < zdeep:  #Use full layer thickness for ray path length
                length_ray[klayer] = delta_ray[klayer]
            else:  #Some other layer, do nothing
                pass
            #Update reference depths
            if klayer < len(vel) - 1:  #last layer:
                depth1 = depth2
                depth2 = depth2 + vel[klayer + 1, 0]
            else:
                depth1 = depth2
                depth2 = 1e6

        #Now multiply ray path length times rupture velocity
        ray_times = length_ray / (vel[:, 1] * rupture_multiplier)
        t_onset[kfault] = ray_times.sum()

    #Now perturb onset times according to Graves-Pitarka eq 5 and 6 (assumes 1:1 corelation with slip)
    delta_t0 = ((M0 * 1e7)**(1. / 3)) * 1.8e-9

    #GP 2015 extra perturbation to destroy the 1:1 correlation with slip
    rand_numb = randn()
    delta_t = delta_t0 * exp(sigma_rise_time * rand_numb)

    #Now apply total perturbation
    slip_average = slip.mean()
    i = where(slip > 0.05 * slip_average)[0]
    perturbation = (log10(slip) - log10(slip_average)) / (log10(slip.max()) -
                                                          log10(slip_average))
    t_onset_final = t_onset.copy()
    t_onset_final[i] = t_onset[i] - delta_t * perturbation[i]

    #Check for negative times
    i = where(t_onset_final < 0)[0]
    t_onset_final[i] = t_onset[i]
    #Check for nan times
    i = where(isnan(t_onset_final) == True)[0]
    t_onset_final[i] = 0

    return t_onset_final
Beispiel #49
0
                 if link[j][1] == -1:
                     a1_max = 2 * y_max_now * (a[j][2] *
                                               ((2 * 3.1416)**0.5))
                     a0_min = 0.01 * 1.2 * y_max_now * (a[j][2] * (
                         (2 * 3.1416)**0.5))
                     if a1[j][1] > a1_max:
                         a1_now[j][1] = a1_max
                         a0_now[j][1] = a0_min
 breakt = 0
 if statst[0] != statst[1] or statst[0] != 0:
     n_max = 5
     max_time = 10
     i_max = 0
     while i_ter < n_loops and breakt == 0:
         chi_iter = chi_sq_now
         pdl_grandom = ran.randn(nm)
         pdl_flux_fit = pdl_flux + pdl_grandom * pdl_e_flux * 0.5
         [a_out_now, chi_sq_now,
          pdl_model] = ssp.fit_elines_grad_rnd_new_guided(
              pdl_wave, pdl_flux_fit, pdl_e_flux, n_mod, chi_goal,
              d_chi_goal, typef, a_out_now, ia_now, a0_now, a1_now,
              link, n_mc, pdl_masked, deft, scale_ini, guided_type,
              guided_disp_type)
         if chi_sq_now < chi_iter:
             ############################################################
             # Close to a result, narrow the range
             for i in range(0, n_mod):
                 for j in range(0, 9):
                     if typef[i] == "eline\n":
                         if ia_now[i][j] == 1:
                             if link[i][j] == -1:
Beispiel #50
0
    def assimilate(self, HMM, xx, yy):
        Dyn, Obs, chrono, X0, stats, N = \
            HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats, self.N
        R, KObs, N1 = HMM.Obs.noise.C, HMM.t.KObs, N - 1
        Rm12 = R.sym_sqrt_inv

        assert Dyn.noise.C == 0, (
            "Q>0 not yet supported."
            " See Sakov et al 2017: 'An iEnKF with mod. error'")

        if self.bundle:
            EPS = 1e-4  # Sakov/Boc use T=EPS*eye(N), with EPS=1e-4, but I
        else:
            EPS = 1.0  # prefer using  T=EPS*T, yielding a conditional cloud shape

        # Initial ensemble
        E = X0.sample(N)

        # Loop over DA windows (DAW).
        for kObs in progbar(np.arange(-1, KObs + self.Lag + 1)):
            kLag = kObs - self.Lag
            DAW = range(max(0, kLag + 1), min(kObs, KObs) + 1)

            # Assimilation (if ∃ "not-fully-assimlated" obs).
            if 0 <= kObs <= KObs:

                # Init iterations.
                X0, x0 = center(E)  # Decompose ensemble.
                w = np.zeros(N)  # Control vector for the mean state.
                T = np.eye(N)  # Anomalies transform matrix.
                Tinv = np.eye(N)
                # Explicit Tinv [instead of tinv(T)] allows for merging MDA code
                # with iEnKS/EnRML code, and flop savings in 'Sqrt' case.

                for iteration in np.arange(self.nIter):
                    # Reconstruct smoothed ensemble.
                    E = x0 + (w + EPS * T) @ X0
                    # Forecast.
                    for kCycle in DAW:
                        for k, t, dt in chrono.cycle(kCycle):
                            E = Dyn(E, t - dt, dt)
                    # Observe.
                    Eo = Obs(E, t)

                    # Undo the bundle scaling of ensemble.
                    if EPS != 1.0:
                        E = inflate_ens(E, 1 / EPS)
                        Eo = inflate_ens(Eo, 1 / EPS)

                    # Assess forecast stats; store {Xf, T_old} for analysis assessment.
                    if iteration == 0:
                        stats.assess(k, kObs, 'f', E=E)
                        Xf, xf = center(E)
                    T_old = T

                    # Prepare analysis.
                    y = yy[kObs]  # Get current obs.
                    Y, xo = center(Eo)  # Get obs {anomalies, mean}.
                    dy = (y - xo) @ Rm12.T  # Transform obs space.
                    Y = Y @ Rm12.T  # Transform obs space.
                    Y0 = Tinv @ Y  # "De-condition" the obs anomalies.
                    V, s, UT = svd0(Y0)  # Decompose Y0.

                    # Set "cov normlzt fctr" za ("effective ensemble size")
                    # => pre_infl^2 = (N-1)/za.
                    if self.xN is None:
                        za = N1
                    else:
                        za = zeta_a(*hyperprior_coeffs(s, N, self.xN), w)
                    if self.MDA:
                        # inflation (factor: nIter) of the ObsErrCov.
                        za *= self.nIter

                    # Post. cov (approx) of w,
                    # estimated at current iteration, raised to power.
                    def Cowp(expo):
                        return (V * (pad0(s**2, N) + za)**-expo) @ V.T

                    Cow1 = Cowp(1.0)

                    if self.MDA:  # View update as annealing (progressive assimilation).
                        Cow1 = Cow1 @ T  # apply previous update
                        dw = dy @ Y.T @ Cow1
                        if 'PertObs' in self.upd_a:  # == "ES-MDA". By Emerick/Reynolds
                            D = mean0(randn(*Y.shape)) * np.sqrt(self.nIter)
                            T -= (Y + D) @ Y.T @ Cow1
                        elif 'Sqrt' in self.upd_a:  # == "ETKF-ish". By Raanes
                            T = Cowp(0.5) * np.sqrt(za) @ T
                        elif 'Order1' in self.upd_a:  # == "DEnKF-ish". By Emerick
                            T -= 0.5 * Y @ Y.T @ Cow1
                        # Tinv = eye(N) [as initialized] coz MDA does not de-condition.

                    else:  # View update as Gauss-Newton optimzt. of log-posterior.
                        grad = Y0 @ dy - w * za  # Cost function gradient
                        dw = grad @ Cow1  # Gauss-Newton step
                        # ETKF-ish". By Bocquet/Sakov.
                        if 'Sqrt' in self.upd_a:
                            # Sqrt-transforms
                            T = Cowp(0.5) * np.sqrt(N1)
                            Tinv = Cowp(-.5) / np.sqrt(N1)
                            # Tinv saves time [vs tinv(T)] when Nx<N
                        # "EnRML". By Oliver/Chen/Raanes/Evensen/Stordal.
                        elif 'PertObs' in self.upd_a:
                            D = mean0(randn(*Y.shape)) if iteration == 0 else D
                            gradT = -(Y + D) @ Y0.T + N1 * (np.eye(N) - T)
                            T = T + gradT @ Cow1
                            # Tinv= tinv(T, threshold=N1)  # unstable
                            Tinv = sla.inv(T + 1)  # the +1 is for stability.
                        # "DEnKF-ish". By Raanes.
                        elif 'Order1' in self.upd_a:
                            # Included for completeness; does not make much sense.
                            gradT = -0.5 * Y @ Y0.T + N1 * (np.eye(N) - T)
                            T = T + gradT @ Cow1
                            Tinv = tinv(T, threshold=N1)

                    w += dw
                    if dw @ dw < self.wtol * N:
                        break

                # Assess (analysis) stats.
                # The final_increment is a linearization to
                # (i) avoid re-running the model and
                # (ii) reproduce EnKF in case nIter==1.
                final_increment = (dw + T - T_old) @ Xf
                # See docs/snippets/iEnKS_Ea.jpg.
                stats.assess(k, kObs, 'a', E=E + final_increment)
                stats.iters[kObs] = iteration + 1
                if self.xN:
                    stats.infl[kObs] = np.sqrt(N1 / za)

                # Final (smoothed) estimate of E at [kLag].
                E = x0 + (w + T) @ X0
                E = post_process(E, self.infl, self.rot)

            # Slide/shift DAW by propagating smoothed ('s') ensemble from [kLag].
            if -1 <= kLag < KObs:
                if kLag >= 0:
                    stats.assess(chrono.kkObs[kLag], kLag, 's', E=E)
                for k, t, dt in chrono.cycle(kLag + 1):
                    stats.assess(k - 1, None, 'u', E=E)
                    E = Dyn(E, t - dt, dt)

        stats.assess(k, KObs, 'us', E=E)
Beispiel #51
0
def main():
    # RNG params
    # seed(0)

    # Simulation setup
    t0 = 0
    tf = 10000
    dt = 2

    td = 200

    sigma_omega = 1e-6

    t_meas_stop = 40000
    td_idx = int(td / dt)
    tdc_idx = int((td + cam_dt) / dt)

    observer = Observer(0.01)

    # Containers
    t_hist = []
    td_hist = []
    td_hat = []
    omega_hist = []
    R_hist = []

    # Step through time
    t = t0
    R = eye(3)
    while (t <= tf):
        # Print and store things
        print("%d %d %d" % (t, td, int(observer.td * 1000)))
        t_hist += [t / 1000.0]
        td_hist += [td]
        td_hat += [int(observer.td * 1000)]

        # True kinematics
        if t < t_meas_stop:
            omega = Omega(t / 1000.0) + sigma_omega * randn(3)
        else:
            omega = sigma_omega * randn(3)
        omega_hist += [omega]
        R_hist += [R]

        # Update observer
        if t > 1000 and mod(t, 30) == 0:
            # Start observer at delayed measurement propagated to current time by delay estimate
            R2 = R_hist[-td_idx]
            R1 = R_hist[-tdc_idx]
            R_meas = R1.dot(R2.T)

            # Run observers
            observer.update(t_hist, omega_hist, R_meas)

        # Increment time
        t += dt

        # Update rotation
        R = expR(omega * dt / 1000.0).dot(R)
        R, r = qr(R)

    # Plot results
    fig, axs = plt.subplots(figsize=(18, 10), nrows=3, ncols=1)
    fig.set_facecolor('white')
    vectorize(lambda ax: ax.grid(True))(axs)  # add grid to all subplots

    axs[0].set_ylabel('$\\alpha$')
    axs[1].set_ylabel('$\\beta$')
    axs[2].set_ylabel('$t_d$')
    axs[2].set_xlabel('Time (s)')

    # axs[0].plot(t_hist, az_hist, 'b-')
    # axs[1].plot(t_hist, el_hist, 'b-')
    axs[2].plot(t_hist, td_hist, 'b-')

    # axs[0].plot(t_hist, az_hat, 'r--')
    # axs[1].plot(t_hist, el_hat, 'r--')
    axs[2].plot(t_hist, td_hat, 'r--')

    omega_plot = vstack(omega_hist)
    axs[0].plot(t_hist, omega_plot[:, 0] * 180 / pi, 'g-.')
    axs[1].plot(t_hist, omega_plot[:, 1] * 180 / pi, 'g-.')

    plt.show()
Beispiel #52
0
        pylab.axis([0, 2, 0, 1.4e4])
        pylab.show()


if __name__ == "__main__":
    x = N.arange(-3, 4, .005)
    #y=fp_gaussian(x,1,0,.5)
    p = [500, 0.7, .2]
    y = matlab_gaussian(x, p)
    p = [1000, 1, .4]
    y = y + matlab_gaussian(x, p)
    p = [1000, 1.2, .1]
    y = y + matlab_gaussian(x, p)
    yerr = N.sqrt(y) + 1
    y += randn(len(y)) * yerr
    #yerr=N.sqrt(y)+1
    #fig=Figure()
    #fig=pylab.Figure()
    #canvas = FigureCanvas(fig)
    #axes = fig.add_subplot(111)
    driver(x, y)
    sys.exit()

    if 0:
        kern, DW = optimize_DW(y)  #choose the right window size
        pylab.plot(kern, DW, 's')
        pylab.show()

    npeaks = 2
    nlist = []
Beispiel #53
0
y_real.shape

# In[58]:

#print(x_real)
for epoch in range(1):
    idx = random.randint(0, (len(x_real1) - 1))
    #print("idx=")
    print(idx)
    x_real = x_real1[idx]
    x = x_real.reshape(1, 70, 1)
    print(x.shape)
    print(len(x_real))

    seed = random.randn(70, 1)
    print(seed.shape)
    x_fake = generator.predict(seed)  #shape(70,70,1)
    #print(x_fake)
    print(x_fake.shape)
    # print(len(x_fake))

    discriminator_metric_real = discriminator.train_on_batch(x_real, y_real)
    #discriminator_metric_genereted = discriminator.train_on_batch(x_fake,y_fake)

# In[54]:

print(seed)
print(len(seed))

# In[ ]:
Beispiel #54
0
import numpy as np 
import matplotlib.pyplot as plt 
from numpy.random import randn
X = np.linspace(-3, 2, 200) 
Y = X ** 2 - 2 * X + 1. 
plt.plot(X, Y) 
plt.show() 
fig = plt.figure(); ax = fig.add_subplot(1, 1, 1)

ax.plot(np.random.randn(1000).cumsum())
ax.plot(randn(1000).cumsum(), 'k', label='one')
ax.plot(randn(1000).cumsum(), 'k--', label='two')
ax.plot(randn(1000).cumsum(), 'k', label='three')

ax.legend(loc='best')

Beispiel #55
0
 def __init_W(self):
     self.__W = randn(self.__dim)
Beispiel #56
0
        if len(x) > 1:
            return tuple(x)
        else:
            return x[0]
    # discretize(xs) takes a list of vectors and makes it a list of tuples or scalars
    return [discretize_one(x) for x in xs]

#if __name__ == "__main__":
#    print("NPEET: Non-parametric entropy estimation toolbox. See readme.pdf for details on usage.")

from time import clock
from numpy import random as rand
def test(t0):
    return knn_search_parallel(data, K,t0=t0)

if __name__ == '__main__':
    dY, dX = (3072,3072)
    ndata = 6000
    A = np.block([[10*nr.randn(dX,dX), nr.randn(dX,dY)], [nr.randn(dY,dX), 10*nr.randn(dY,dY)]])
    A = A.T@A
    AX = A[0:dX,0:dX]
    AY = A[dX:,dX:]
    data = 10*nr.multivariate_normal(np.zeros(dX+dY), A, ndata)
    eps = 3*nr.rand(ndata)
    ss.cKDTree(data.copy())
    #print('avgdigamma with random eps: %f' % test3)
    print('Testing mutual information calculation:')
    mi(data[:,0:dX],data[:,dX:])
    print('Theoretical Value')
    print((la.slogdet(AX)[1]+la.slogdet(AY)[1]-la.slogdet(A)[1])/2)
    def test_rolling_cov(self):
        A = self.series
        B = A + randn(len(A))

        result = mom.rolling_cov(A, B, 50, min_periods=25)
        assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
Beispiel #58
0
import numpy as np
import pandas as pd
from numpy.random import randn

np.random.seed(101)

# Part 1
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
print(df)

print(df['W'])
print(type(df['W']))

print(df[['Y', 'Z']])

df['Q'] = df['W'] + df['Y']
print(df)
df.drop('Q', axis=1, inplace=True)
# df.drop('E', axis=0, inplace=True)
print(df.shape)
print(df)

print(df.loc['A'])
print(df.iloc[3])
print(df.loc['D', 'W'])
print(df.loc[['A', 'B'], ['W', 'Y']])

# Part 2
print(df[df > 0])
print(df['W'] > 0)
print(df[df['W'] > 0])
    def _check_ndarray(self, func, static_comp, window=50,
                       has_min_periods=True,
                       preserve_nan=True,
                       has_center=True,
                       fill_value=None):

        result = func(self.arr, window)
        assert_almost_equal(result[-1],
                            static_comp(self.arr[-50:]))

        if preserve_nan:
            assert(np.isnan(result[self._nan_locs]).all())

        # excluding NaNs correctly
        arr = randn(50)
        arr[:10] = np.NaN
        arr[-10:] = np.NaN

        if has_min_periods:
            result = func(arr, 50, min_periods=30)
            assert_almost_equal(result[-1], static_comp(arr[10:-10]))

            # min_periods is working correctly
            result = func(arr, 20, min_periods=15)
            self.assert_(np.isnan(result[23]))
            self.assert_(not np.isnan(result[24]))

            self.assert_(not np.isnan(result[-6]))
            self.assert_(np.isnan(result[-5]))

            arr2 = randn(20)
            result = func(arr2, 10, min_periods=5)
            self.assert_(isnull(result[3]))
            self.assert_(notnull(result[4]))

            # min_periods=0
            result0 = func(arr, 20, min_periods=0)
            result1 = func(arr, 20, min_periods=1)
            assert_almost_equal(result0, result1)
        else:
            result = func(arr, 50)
            assert_almost_equal(result[-1], static_comp(arr[10:-10]))

        if has_center:
            if has_min_periods:
                result = func(arr, 20, min_periods=15, center=True)
                expected = func(arr, 20, min_periods=15)
            else:
                result = func(arr, 20, center=True)
                expected = func(arr, 20)

            assert_almost_equal(result[1], expected[10])
            if fill_value is None:
                self.assert_(np.isnan(result[-9:]).all())
            else:
                self.assert_((result[-9:] == 0).all())
            if has_min_periods:
                self.assert_(np.isnan(expected[23]))
                self.assert_(np.isnan(result[14]))
                self.assert_(np.isnan(expected[-5]))
                self.assert_(np.isnan(result[-14]))
def simulate_nascar():
    assert K_true == 4
    As = [
        random_rotation(D_latent, np.pi / 24.),
        random_rotation(D_latent, np.pi / 48.)
    ]

    # Set the center points for each system
    centers = [np.array([+2.0, 0.]), np.array([-2.0, 0.])]
    bs = [
        -(A - np.eye(D_latent)).dot(center) for A, center in zip(As, centers)
    ]

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([+0.1, 0.]))

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([-0.25, 0.]))

    # Construct multinomial regression to divvy up the space
    w1, b1 = np.array([+1.0, 0.0]), np.array([-2.0])  # x + b > 0 -> x > -b
    w2, b2 = np.array([-1.0, 0.0]), np.array([-2.0])  # -x + b > 0 -> x < b
    w3, b3 = np.array([0.0, +1.0]), np.array([0.0])  # y > 0
    w4, b4 = np.array([0.0, -1.0]), np.array([0.0])  # y < 0

    reg_W = np.column_stack((100 * w1, 100 * w2, 10 * w3, 10 * w4))
    reg_b = np.concatenate((100 * b1, 100 * b2, 10 * b3, 10 * b4))

    # Make a recurrent SLDS with these params #
    dynamics_distns = [
        Regression(
            A=np.column_stack((A, b)),
            sigma=1e-4 * np.eye(D_latent),
            nu_0=D_latent + 2,
            S_0=1e-4 * np.eye(D_latent),
            M_0=np.zeros((D_latent, D_latent + 1)),
            K_0=np.eye(D_latent + 1),
        ) for A, b in zip(As, bs)
    ]

    init_dynamics_distns = [
        Gaussian(mu=np.array([0.0, 1.0]), sigma=1e-3 * np.eye(D_latent))
        for _ in range(K)
    ]

    C = np.hstack((npr.randn(D_obs, D_latent), np.zeros((D_obs, 1))))
    emission_distns = \
        DiagonalRegression(D_obs, D_latent+1,
                           A=C, sigmasq=1e-5 *np.ones(D_obs),
                           alpha_0=2.0, beta_0=2.0)

    model = SoftmaxRecurrentOnlySLDS(trans_params=dict(W=reg_W, b=reg_b),
                                     init_state_distn='uniform',
                                     init_dynamics_distns=init_dynamics_distns,
                                     dynamics_distns=dynamics_distns,
                                     emission_distns=emission_distns,
                                     alpha=3.)

    #########################
    # Sample from the model #
    #########################
    inputs = np.ones((T, 1))
    y, x, z = model.generate(T=T, inputs=inputs)

    # Maks off some data
    if mask_start == mask_stop:
        mask = None
    else:
        mask = np.ones((T, D_obs), dtype=bool)
        mask[mask_start:mask_stop] = False

    # Print the true parameters
    np.set_printoptions(precision=2)
    print("True W:\n{}".format(model.trans_distn.W))
    print("True logpi:\n{}".format(model.trans_distn.logpi))

    return model, inputs, z, x, y, mask