Exemple #1
0
def create_C_W_X_d(bias=True):
    C_train, C_val, X_train, X_val = data_factory(
        'GMMData')  # options: 'swiss','PeaksData','GMMData'
    W = randn(X_val.shape[0], C_val.shape[0])
    d_w = rand(*W.shape)
    d_x = randn(*X_val.shape)
    return C_val, W, X_val, d_w, d_x
Exemple #2
0
    def _monte_carlo_error_propagation(self, vr, m):
        lambda_total = self._lambda_t
        el = self._lambda_ec
        f = self._f

        vel = nominal_value(el) + std_dev(el) * randn(self._n)
        vt = nominal_value(lambda_total) + std_dev(lambda_total) * randn(self._n)
        vf = nominal_value(f) + std_dev(f) * randn(self._n)

        vt_mc = ones(1, m) * vt
        vf_mc = ones(1, m) * vf
        vel_mc = ones(1, m) * vel

        t_mc = log(vt_mc / vel_mc * vf_mc * vr + 1) / vt_mc
        return mean(t_mc), std(t_mc)
 def default_weight_initializer(self):
     """
     Initialize each weight using a Gaussian distribution with mean 0
     and standard deviation 1 over the square root of the number of
     weights connecting to the same neuron. Initialize the biases
     using a Gaussian distribution with mean 0 and standard
     deviation 1.
     Note that the first layer is assumed to be an input layer, and
     by convention we won't set any biases for those neurons, since
     biases are only ever used in computing the outputs from later
     layers.
     """
     self.biases = [randn(y, 1) for y in self.sizes[1:]]
     self.weights = [randn(y, x) / np.sqrt(x)
                     for x, y in zip(self.sizes[:-1], self.sizes[1:])]
Exemple #4
0
    def _monte_carlo_error_propagation(self, vr, m):
        lambda_total = self._lambda_t
        el = self._lambda_ec
        f = self._f

        vel = nominal_value(el) + std_dev(el) * randn(self._n)
        vt = nominal_value(lambda_total) + std_dev(lambda_total) * randn(
            self._n)
        vf = nominal_value(f) + std_dev(f) * randn(self._n)

        vt_mc = ones(1, m) * vt
        vf_mc = ones(1, m) * vf
        vel_mc = ones(1, m) * vel

        t_mc = log(vt_mc / vel_mc * vf_mc * vr + 1) / vt_mc
        return mean(t_mc), std(t_mc)
Exemple #5
0
    def convert(self, age, error):
        if hasattr(age, '__iter__'):
            m = len(age)
        else:
            m = 1

        torig = self._original_total_decay_constant
        t = self._lambda_t

        r, ex, ex_orig = self._calculate_r(age)

        sr = torig * exp(torig * age * 1e6) * error * 1e6 / ex_orig

        r_mc = ones(self._n) * r
        sr_mc = ones(self._n) * sr
        # return age, error

        vr = r_mc + sr_mc * randn(self._n, m)

        age = umath.log(((t / self._lambda_ec) * self._f * r) + 1) / (t * 1e6)

        # linear error propagation

        e = self._linear_error_propagation(age * 1e6, r, sr)
        e *= 1e-6

        # tm, tme = self._monte_carlo_error_propagation()
        # tm *= 1e-6
        # tme *= 1e-6
        tm, tme = 0, 0
        return age, e, tm, tme
Exemple #6
0
    def convert(self, age, error):
        if hasattr(age, '__iter__'):
            m = len(age)
        else:
            m = 1

        torig = self._original_total_decay_constant
        t = self._lambda_t

        r, ex, ex_orig = self._calculate_r(age)

        sr = torig * exp(torig * age * 1e6) * error * 1e6 / ex_orig

        r_mc = ones(self._n) * r
        sr_mc = ones(self._n) * sr
        # return age, error

        vr = r_mc + sr_mc * randn(self._n, m)

        age = umath.log(((t / self._lambda_ec) * self._f * r) + 1) / (t * 1e6)

        # linear error propagation

        e = self._linear_error_propagation(age * 1e6, r, sr)
        e *= 1e-6

        # tm, tme = self._monte_carlo_error_propagation()
        # tm *= 1e-6
        # tme *= 1e-6
        tm, tme = 0, 0
        return age, e, tm, tme
Exemple #7
0
def query():
    df = pd.DataFrame(randn(10, 2), columns=list('ab'))
    print('\ndf-------------------------------------')
    print(df)
    print('\n-------------------------------------')
    print(df.query('a > b'))
    print('\n-------------------------------------')
    print(df[df.a > df.b])
Exemple #8
0
def eval():
    df = pd.DataFrame(randn(10, 2), columns=list('ab'))
    print('\ndf-------------------------------------')
    print(df)
    print('\n-------------------------------------')
    print(df.eval('a + b'))
    print('\n-------------------------------------')
    print(df.eval('c = a + b'))
 def large_weight_initializer(self):
     """
     Initialize the weights using a Gaussian distribution with mean 0
     and standard deviation 1. Initialize the biases using a
     Gaussian distribution with mean 0 and standard deviation 1.
     Note that the first layer is assumed to be an input layer, and
     by convention we won't set any biases for those neurons, since
     biases are only ever used in computing the outputs from later
     layers.
     This weight and bias initializer uses the same approach as in
     Chapter 1, and is included for purposes of comparison. It
     will usually be better to use the default weight initializer
     instead.
     """
     self.biases = [randn(y, 1) for y in self.sizes[1:]]
     self.weights = [randn(y, x)
                     for x, y in zip(self.sizes[:-1], self.sizes[1:])]
 def rff_generate(self, mbags=20, mdata=20, dim=1):
     '''
     mbags:: number of random features for bag kernel
     mdata:: number of random features for data kernel
     dim:: data dimensionality
     '''
     self.data_kernel.rff_generate(mdata, dim=dim)
     self.rff_num = mbags
     self.unit_rff_freq = randn(mbags / 2, mdata)
     self.rff_freq = self.unit_rff_freq / self.width
Exemple #11
0
 def rff_generate(self,mbags=20,mdata=20,dim=1):
     '''
     mbags:: number of random features for bag kernel
     mdata:: number of random features for data kernel
     dim:: data dimensionality
     '''
     self.data_kernel.rff_generate(mdata,dim=dim)
     self.rff_num=mbags
     self.unit_rff_freq=randn(mbags/2,mdata)
     self.rff_freq=self.unit_rff_freq/self.width
Exemple #12
0
def generate_global_variable_consensus_lp(params):
    nb_agents = params['nb_agents']
    nb_vars = params['nb_vars']
    nb_constraints_per_agent = params['nb_constraints_per_agent']
    seed = params['seed']

    if seed is not None:
        np.random.seed(seed)
        random.seed(seed)

    cs = []
    As = []
    bs = []
    c_cent = np.zeros((nb_vars * nb_agents, 1))
    A_cent = np.zeros((nb_agents * nb_constraints_per_agent, nb_vars * nb_agents))
    b_cent = np.ones((nb_agents * nb_constraints_per_agent, 1))
    n = nb_vars
    m = nb_constraints_per_agent
    for i in range(nb_agents):
        c = rand(n, 1) + 0.5
        x0 = abs(randn(n, 1))
        A = abs(randn(m, n))
        b = A @ x0
        cs.append(c)
        As.append(A)
        bs.append(b)
        A_cent[i * nb_constraints_per_agent:(i + 1) * nb_constraints_per_agent, i * nb_vars:(i + 1) * nb_vars] = A
        c_cent[i * nb_vars:(i + 1) * nb_vars, :] = c
        b_cent[i * nb_vars:(i + 1) * nb_vars, :] = b

    # consensus constraints
    for j in range(nb_vars):
        for (i, k) in itertools.combinations(range(nb_agents), r=2):
            if i != k:
                line = np.zeros((1, nb_vars * nb_agents))
                line[0, i * nb_vars + j] = 1
                line[0, k * nb_vars + j] = -1
                A_cent = np.concatenate([A_cent, line])
                b_cent = np.concatenate([b_cent, np.zeros((1, 1))])
    return cs, As, bs, c_cent, A_cent, b_cent
from numpy.random.mtrand import randn
import numpy as np

if __name__ == "__main__":
    arr = randn(8)
    print 'arr : ', arr

    arr.sort()
    print 'arr : ', arr

    brr = randn(2,3,4)
    # brr, y = np.meshgrid(np.arange(4) + 1,[1,1,1,1])
    print 'brr: \n',  brr

    brr.sort(0)
    print 'brr: \n', brr
Exemple #14
0
#!/usr/bin/env python
"""
Uses surprise on a toy example.
"""

from surprise import Surprise
from numpy.random.mtrand import randn, randint
from matplotlib.mlab import entropy

sc = Surprise()

# Create two samples from a standard normal distribution
# Reshape into (# of samples, # of dimensions) array
n = 100
sample1 = randn(n).reshape(-1,1)
sample2 = randn(n).reshape(-1,1)

# Mode is 'replace', i.e. we are assuming that the two distributions
# are separately analysed posteriors.
mode = 'replace'

# Calculate entropy numbers with surprise
rel_ent, exp_rel_ent, S, sD, p = sc(sample1, sample2, mode = mode)

print('Entropy estimates for two standard normal distributions.')
print('Relative entropy D: %f'%rel_ent)
print('Expected relative entropy <D>: %f'%exp_rel_ent)
print('Surprise S: %f'%S)
print('Expected fluctuations of relative entropy sigma(D): %f'%sD)
print('p-value of Surprise: %f'%p)
M = 4

xlab = arange(0, 1, 0.01)
ylab = sin(2 * pi * xlab)

x = arange(0, 1, 1. / N)
tlab = sin(2 * pi * x)


fig, ax = plt.subplots(nrows = 2, ncols = 2)
plt.ylim(-1.5, 1.5)

mDegrees = [1, 2, 3, 9]

std_deviation = 0.3
noise = std_deviation * randn(N)
t = tlab + noise

for idx, val in enumerate(mDegrees):
    w = polyfit(x, t, val)
    row = idx / 2
    col = idx % 2
    fig = ax[row, col]
    fig.set_title('M=%d' % val)
    fig.plot(xlab, ylab)
    fig.plot(x, t, 'ro')
    fig.plot(xlab, polynomial(w, xlab), 'g')



NTest = 8
from datetime import datetime

from numpy import cumsum, log
from numpy.random.mtrand import randn

from Lecture3.homework3.tools import hurst
import pandas_datareader.data as web

# creating a Gometric Brownian Motion , Mean-Reverting and Trending Series

gbm = log(cumsum(randn(100000)) + 1000)
mr = log(randn(100000) + 1000)
tr = log(cumsum(randn(100000) + 1) + 1000)

# output the Hurst Exponent for each of the above series
# and the price of Amazon (the Adjusted Close price) for
# the ADF test given in the other test
print 'Hurst(GBM):  %s' % hurst(gbm)
print 'Hurst(MR):  %s' % hurst(mr)
print 'Hurst(TR):  %s' % hurst(tr)

amzn = web.DataReader("AMZN", "yahoo", datetime(2000, 1, 1),
                      datetime(2015, 1, 1))
print 'Hurst(AMAZON):  %s' % hurst(amzn['Adj Close'])
Exemple #17
0
#!/usr/bin/env python
"""
Uses surprise on a toy example.
"""

from surprise import Surprise
from numpy.random.mtrand import randn, randint
from matplotlib.mlab import entropy

sc = Surprise()

# Create two samples from a standard normal distribution
# Reshape into (# of samples, # of dimensions) array
n = 100
sample1 = randn(n).reshape(-1, 1)
sample2 = randn(n).reshape(-1, 1)

# Mode is 'replace', i.e. we are assuming that the two distributions
# are separately analysed posteriors.
mode = 'replace'

# Calculate entropy numbers with surprise
rel_ent, exp_rel_ent, S, sD, p = sc(sample1, sample2, mode=mode)

print('Entropy estimates for two standard normal distributions.')
print('Relative entropy D: %f' % rel_ent)
print('Expected relative entropy <D>: %f' % exp_rel_ent)
print('Surprise S: %f' % S)
print('Expected fluctuations of relative entropy sigma(D): %f' % sD)
print('p-value of Surprise: %f' % p)
import numpy as np
import pandas as pd
import random

from numpy.random.mtrand import randn

outside = ['g1', 'g1', 'g1', 'g2', 'g2', 'g2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
print(hier_index)
hier_index = pd.MultiIndex.from_tuples(hier_index)
df = pd.DataFrame(randn(6, 2), hier_index,
                  ['A', 'B'])  # this is how you make multilevel
# indexing or better known as hierarchy indexing

print(hier_index)
print(df)

print(df.loc['g1'].loc[1])  # This is the way to get the data from inside

##############GROUPING
data = {
    'Company': ['Google', 'Google', 'MSFT', 'MSFT', 'FB', 'FB'],
    'Person': ['Sam', 'Charles', 'Amy', 'Viennese', 'Carl', 'Sarah'],
    'Sales': [200, 120, 340, 124, 243, 350]
}
da = pd.DataFrame(data)  # This will create a dataframe
print(da)
grouping = da.groupby('Company')  # This will group the table by Company
print(grouping.sum())  # you should assign a variable to your da.group()
# bcoz this will not printout anything it will store the result in the memory
Exemple #19
0
def random_ABCD(n,
                p,
                q,
                pRepeat=0.01,
                pReal=0.5,
                pBCmask=0.90,
                pDmask=0.8,
                pDzero=0.5):
    """
	Generate ONE n-th order random  stable state-spaces, with q inputs and p outputs
	copy/adapted from control-python library (Richard Murray): https://sourceforge.net/projects/python-control/
	(thanks guys!)
	possibly already adpated/copied from Mathworks or Octave

	Parameters:
	- n: number of states (default:  random between 5 and 10)
	- p: number of outputs (default: 1)
	- q: number of inputs (default: 1)

	- pRepeat: Probability of repeating a previous root (default: 0.01)
	- pReal: Probability of choosing a real root (default: 0.5). Note that when choosing a complex root,
		the conjugate gets chosen as well. So the expected proportion of real roots is pReal / (pReal + 2 * (1 - pReal))
	- pBCmask: Probability that an element in B or C will not be masked out (default: 0.90)
	- pDmask: Probability that an element in D will not be masked out (default: 0.8)
	- pDzero: Probability that D = 0 (default: 0.5)

	Returns a four numpy matrices A,B,C,D
	"""

    # Make some poles for A.  Preallocate a complex array.
    poles = zeros(n) + zeros(n) * 0.j
    i = 0

    while i < n:

        if rand() < pRepeat and i != 0 and i != n - 1:
            # Small chance of copying poles, if we're not at the first or last  element.
            if poles[i - 1].imag == 0:
                poles[i] = poles[i - 1]  # Copy previous real pole.
                i += 1

            else:
                poles[i:i + 2] = poles[
                    i - 2:i]  # Copy previous complex conjugate pair of poles.
                i += 2

        elif rand() < pReal or i == n - 1:
            poles[i] = 2. * rand() - 1.  # No-oscillation pole.
            i += 1

        else:
            mag = rand()  # Complex conjugate pair of oscillating poles.
            phase = 2. * pi * rand()
            poles[i] = complex(mag * cos(phase), mag * sin(phase))
            poles[i + 1] = complex(poles[i].real, -poles[i].imag)
            i += 2

    # Now put the poles in A as real blocks on the diagonal.

    A = zeros((n, n))
    i = 0

    while i < n:

        if poles[i].imag == 0:
            A[i, i] = poles[i].real
            i += 1

        else:
            A[i, i] = A[i + 1, i + 1] = poles[i].real
            A[i, i + 1] = poles[i].imag
            A[i + 1, i] = -poles[i].imag
            i += 2

    while True:  # Finally, apply a transformation so that A is not block-diagonal.
        T = randn(n, n)

        try:
            A = dot(solve(T, A), T)  # A = T \ A * T
            break

        except LinAlgError:
            # In the unlikely event that T is rank-deficient, iterate again.
            pass

    # Make the remaining matrices.
    B = randn(n, q)
    C = randn(p, n)
    D = randn(p, q)

    # Make masks to zero out some of the elements.
    while True:
        Bmask = rand(n, q) < pBCmask
        if not Bmask.all():  # Retry if we get all zeros.
            break

    while True:
        Cmask = rand(p, n) < pBCmask
        if not Cmask.all():  # Retry if we get all zeros.
            break

    if rand() < pDzero:
        Dmask = zeros((p, q))
    else:
        while True:
            Dmask = rand(p, q) < pDmask
            if not Dmask.all():  # Retry if we get all zeros.
                break

    # Apply masks.
    B *= Bmask
    C *= Cmask
    # D *= Dmask

    return A, B, C, D
Exemple #20
0
#coding:utf-8
import pandas, matplotlib
import matplotlib.pyplot as plt
# import Scipy
import pandas as pd
import numpy as np
from numpy.random.mtrand import randn
import scipy

data = {i: randn() for i in range(10)}
data1 = [i**2 for i in xrange(10)]

arry1 = np.array(data1)
data2 = [[[
    1,
    2,
    3,
    3,
], [34, 234, 43, 34]], [[23, 44, 3, 2], [23, 44, 3, 2]],
         [[23, 44, 3, 2], [2, 4, 6, 4]]]
arry2 = np.array(data2)
arry4 = randn(20, 3)
# plot(arry4)
# img=pd.DataFrame(arry4)
# # img=img.cumsum()
# img.plot()
# plt.show()

print arry2.ndim
print arry2.shape
print arry1.sort()
def gaussian_noise(imageBatch):
    b, c, x, y = imageBatch.shape
    gaussian = randn(b, c, x, y) * 0.05
    return imageBatch + torch.Tensor(gaussian)
# Degree of polynomial
M = 4

xlab = arange(0, 1, 0.01)
ylab = sin(2 * pi * xlab)

x = arange(0, 1, 1. / N)
tlab = sin(2 * pi * x)

fig, ax = plt.subplots(nrows=2, ncols=2)
plt.ylim(-1.5, 1.5)

mDegrees = [1, 2, 3, 9]

std_deviation = 0.3
noise = std_deviation * randn(N)
t = tlab + noise

for idx, val in enumerate(mDegrees):
    w = polyfit(x, t, val)
    row = idx / 2
    col = idx % 2
    fig = ax[row, col]
    fig.set_title('M=%d' % val)
    fig.plot(xlab, ylab)
    fig.plot(x, t, 'ro')
    fig.plot(xlab, polynomial(w, xlab), 'g')

NTest = 8
xTest = random_sample(NTest)
yTest = sin(2 * pi * xTest) + randn(NTest) * std_deviation