Exemplo n.º 1
0
def cv2tflite(model, input_shape, tflite_path, edgetpu=False):
    """
    convert torch model to tflite model using onnx
    """
    onnx_file = "tmp.onnx"
    tmp_pb_file = "tmp.pb"
    cv2onnx(model, input_shape, onnx_file)
    onnx_model = onnx.load(onnx_file)
    onnx_input_names = [input.name for input in onnx_model.graph.input]
    onnx_output_names = [output.name for output in onnx_model.graph.output]
    tf_rep = prepare(onnx_model)
    tf_rep.export_graph(tmp_pb_file)

    converter = tf.lite.TFLiteConverter.from_saved_model(tmp_pb_file)

    if edgetpu:
        if type(input_shape[0]) == tuple:
            if check_model_is_cuda(model):
                dummy_input = tuple(
                    [np.randn(ishape) for ishape in input_shape])
            else:
                dummy_input = tuple(
                    [np.randn(ishape) for ishape in input_shape])
        elif type(input_shape) == tuple:
            if check_model_is_cuda(model):
                dummy_input = np.randn(input_shape)
            else:
                dummy_input = np.randn(input_shape)
        else:
            raise Exception("input_shape must be tuple")
        train = tf.convert_to_tensor(input_data)
        my_ds = tf.data.Dataset.from_tensor_slices((train)).batch(10)

        def representative_dataset_gen():
            for input_value in my_ds.take(10):
                yield [input_value]

        converter.representative_dataset = representative_dataset_gen
        converter.allow_custom_ops = True
        converter.experimental_new_converter = True
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8

    # convert tensorflow to tflite model
    tflite_model = converter.convert()

    with open(tflite_path, "wb") as f:
        f.write(tflite_model)
    os.remove(onnx_file)
    shutil.rmtree(tmp_pb_file)

    if edgetpu:
        subprocess.check_call(f"edgetpu_compiler {tflite_path}", shell=True)
Exemplo n.º 2
0
Arquivo: tests.py Projeto: eteq/nbodpy
def normal_ics(nparticles,pscale=1,vscale=1,masses=None):
    """
    Generates `nparticles` particles with normally distributed locations and
    speeds.
    """
    from core import Particles
    
    pos = pscale*np.randn(3,nparticles)
    vel = vscale*np.randn(3,nparticles)
    
    if masses is None:
        return Particles(pos,vel)
    else:
        return Particles(pos,vel,masses)
Exemplo n.º 3
0
Arquivo: tests.py Projeto: eteq/nbodpy
def normal_ics(nparticles, pscale=1, vscale=1, masses=None):
    """
    Generates `nparticles` particles with normally distributed locations and
    speeds.
    """
    from core import Particles

    pos = pscale * np.randn(3, nparticles)
    vel = vscale * np.randn(3, nparticles)

    if masses is None:
        return Particles(pos, vel)
    else:
        return Particles(pos, vel, masses)
def SimulateOrnsteinUhlenbeck(S0, mu, sigma, _lambda, deltat, t):
    # NOT WORKING YET!!!
    periods = np.floor(t / deltat)
    S = np.zeros([periods, 1])
    S[0] = S0
    exp_minus_lambda_deltat = np.exp(-_lambda * deltat)


    # Calculate the random term.
    if (_lambda == 0):
        # Handle the case of lambda = 0 i.e. no mean reversion.
        dWt = np.sqrt(deltat) * np.randn(periods, 1)
    else:
        dWt = np.sqrt((1 - np.exp(-2 * _lambda * deltat)) / (2 * _lambda)) * np.random.randn(periods, 1)
        
        # And iterate through time calculating each price.
        for t in np.linspace(2,1,periods):
            S[t] = S[t - 1] * exp_minus_lambda_deltat + mu * (1 - exp_minus_lambda_deltat) + sigma * dWt[t]
            # OPTIM Note : % Precalculating all dWt's rather than one-per loop makes this function
            # approx 50% faster. Useful for Monte-Carlo simulations.
            # OPTIM Note : calculating exp(-lambda*deltat) makes it roughly 50% faster
            # again.
            # OPTIM Note : this is only about 25% slower than the rough calculation
            # without the exp correction.      
    return S
Exemplo n.º 5
0
def rednoise(N, g, a=1.):
    """
    Red noise generator using filter.

    Parameters
    ----------
    N : int
    Length of the desired time series.
    g : float
    Lag-1 autocorrelation coefficient.
    a : float, optional
    Noise innovation variance parameter.

    Returns
    -------
    y : numpy.ndarray
    Red noise time series.

    """
    if g == 0:
        yr = np.randn(N, 1) * a
    else:
        # Twice the decorrelation time.
        tau = np.ceil(-2 / np.log(np.abs(g)))
        yr = lfilter([1, 0], [1, -g], np.random.randn(N + tau, 1) * a)
        yr = yr[tau:]

    return yr.flatten()
Exemplo n.º 6
0
def rednoise(N, g, a=1.) :
    """
    Red noise generator using filter.

    Parameters
    ----------
    N : int
    Length of the desired time series.
    g : float
    Lag-1 autocorrelation coefficient.
    a : float, optional
    Noise innovation variance parameter.

    Returns
    -------
    y : numpy.ndarray
    Red noise time series.

    """
    if g == 0:
        yr = np.randn(N, 1) * a;
    else:
        # Twice the decorrelation time.
        tau = np.ceil(-2 / np.log(np.abs(g)))
        yr = lfilter([1, 0], [1, -g], np.random.randn(N + tau, 1) * a)
        yr = yr[tau:]

    return yr.flatten()
Exemplo n.º 7
0
def __test_radial():
    cortex = create_spherical_cortex(200)
    white, pial = cortex.surfaces
    target_affine = np.eye(4)
    target_affine[range(3), range(3)] = 0.2
    target_affine[:-1, -1] = -2
    target_shape = (21, 21, 21)
    wo = orientation.WeightedOrientation(white, pial,
                                         np.randn(white.nvertices), 0.12,
                                         target_affine)
    orient = wo.closest_vertex_grid(target_shape)
    coords = np.stack(np.meshgrid(*((np.arange(21) * 0.2 - 2, ) * 3)), -1)
    radius = np.sqrt(np.sum(coords**2, -1))
    coords /= radius[..., None]
    coords[radius == 0] = 0
    print(np.sum(orient[radius < 1.9, :, 0] * coords[radius < 2], -1).mean())
    print(np.sum(orient[radius < 1, :, 0] * coords[radius < 1], -1).mean())
    assert np.sum(orient[radius < 1, :, 0] * coords[radius < 1],
                  -1).mean() > 0.3
    assert np.sum(orient[radius < 2, :, 0] * coords[radius < 2],
                  -1).mean() > 0.3
    assert abs(np.sum(orient[radius < 1, :, 1] * coords[radius < 1],
                      -1)).mean() < 0.3
    assert abs(np.sum(orient[radius < 2, :, 1] * coords[radius < 2],
                      -1)).mean() < 0.3
    assert abs(np.sum(orient[..., 1] * coords, -1)).max() > 0.1
Exemplo n.º 8
0
def bounce_fun(p, v, arena_shape, L, dt, theta_sigma=0):
    """
  Bounce against walls.  Usage:
  v_new=bounce_fun(p,v,arena_shape,L)
  theta=arctan2(v_new[1],v_new[0])
  """
    if arena_shape == 'square':
        if p[0] < -L / 2. or p[0] >= L / 2.:
            v_new = np.array([-v[0], v[1]])
        elif p[1] < -L / 2. or p[1] >= L / 2.:
            v_new = np.array([v[0], -v[1]])
        else:
            v_new = v
    elif arena_shape == 'circle':
        n = p / norm(p)
        v_new = v - 2 * n * np.dot(n, v)
    else:
        v_new = v

    theta = np.arctan2(v_new[1], v_new[0])
    if theta_sigma > 0:
        theta = theta_sigma * np.randn() + theta

    p = p + v_new * dt
    return p, theta
Exemplo n.º 9
0
def smooth_demo():
    t = _np.linspace(-4, 4, 100)
    x = _np.sin(t)
    xn = x + _np.randn(len(t)) * 0.1
    ws = 31

    _plt.subplot(211)
    _plt.plot(_np.ones(ws))

    windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']

    _plt.hold(True)
    for w in windows[1:]:
        eval('plot(' + w + '(ws) )')

    _plt.axis([0, 30, 0, 1.1])

    _plt.legend(windows)
    _plt.title("The smoothing windows")
    _plt.subplot(212)
    _plt.plot(x)
    _plt.plot(xn)
    for w in windows:
        _plt.plot(smooth(xn, 10, w))
    l = ['original signal', 'signal with noise']
    l.extend(windows)

    _plt.legend(l)
    _plt.title("Smoothing a noisy signal")
    _plt.show()
Exemplo n.º 10
0
def pink_noise(n, scale=1., alpha=1.):
    # not exactly pink
    if n <= 1:
        return np.randn(n)
    spec = np.random.randn(n)
    # power \prop 1/f^alpha ==> sqrt(power) \prop 1/sqrt(f^alpha)
    spec = 1. / (scale * np.sqrt(np.arange(1, n + 1)**alpha)) * spec
    return np.fft.irfft(spec)[:n]
Exemplo n.º 11
0
Arquivo: linreg.py Projeto: stnma7e/ml
 def fit(self, X: np.array, y: np.array):
     X_b = self.poly_features.fit_transform(X)
     self.theta = la.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
     try:
         pass
         # self.theta = la.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
     except np.linalg.LinAlgError:
         # probably a singular matrix error
         self.theta = np.randn(self.N + 1, 1)
Exemplo n.º 12
0
    def __init(self, input_size, output_size, hidden_size=64):

        # init weights
        self.W_f = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # forget gate weights
        self.W_i = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # input gate weights
        self.W_c = np.randn(input_size + hidden_size,
                        hidden_size) / 1000  # keep gate weights
        self.W_o = np.rand(input_size + hidden_size,
                       hidden_size) / 1000  # output gate weights

        self.W_y = np.rand(input_size + hidden_size, hidden_size) / 1000

        self.b_f = np.zeros((hidden_size, 1))  # forget gate bias
        self.b_i = np.zeros((hidden_size, 1))  # input gate bias
        self.b_c = np.zeros((hidden_size, 1))  # candidate gate bias
        self.b_o = np.zeros((hidden_size, 1))  # output gate
        self.b_y = np.zeros((hidden_size, 1))  # y bias
Exemplo n.º 13
0
def generate_latent_points(latent_dim, n_samples, n_attributes):
    # generate points in the latent space
    x_input = np.randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    z_input = x_input.reshape(n_samples, latent_dim)
    # generate labels
    labels = []
    for _ in range(n_samples):
        labels.append(np.randint(0, 2, n_attributes))
    return [z_input, labels]
Exemplo n.º 14
0
    def _gen_XPR(self):
        """
        Step 9
        """
        for m in range(M):
            for n in range(N):
                X = mu + siama*np.randn()        
                kappa[m][n] = 10**(X/10)

        return kappa
def createDateSet(nData):
    #-1-1之间产生100个点
    xGrid = linespace(-1, 1, 100)
    #随机产生nData个x轴的坐标
    x = 2 * (np.randn(nData) - 0.5)
    #matlab的inline函数,python的lamabda函数
    #DEFINE AND TARGET FUNCTION f(x)
    f = lamabda
    y = f(x) + noiseSTD * randn(len(x))
    return x, y
Exemplo n.º 16
0
 def monte_carlo_paths(self, dt, n, callback=lambda *x: None):
     #TODO: make this work with callback, etc
     raise NotImplementedError
     dW = np.randn(t / dt + 1, npaths)
     dW[0, :] = 0
     W = np.cumsum(dW, axis=0)
     del dW
     rate_process = np.arange(t / dt + 1) * (r - 0.5 * sig**2) * dt
     paths = spot * np.exp(rate_process[:, None] + sig * W * sqrt(dt))
     del rate_process, W
     barrier_paths = callback(paths, state)
     sT = barrier_paths[-1, :]
Exemplo n.º 17
0
 def monte_carlo_paths(self, dt, n, callback=lambda *x: None):
     #TODO: make this work with callback, etc
     raise NotImplementedError
     dW = np.randn(t/dt + 1, npaths)
     dW[0,:] = 0
     W = np.cumsum(dW, axis=0)
     del dW
     rate_process = np.arange(t/dt + 1) * (r - 0.5*sig**2)*dt
     paths = spot * np.exp(rate_process[:,None] + sig*W*sqrt(dt))
     del rate_process, W
     barrier_paths = callback(paths, state)
     sT = barrier_paths[-1,:]
Exemplo n.º 18
0
    def black_scholes(self,data):
        data_len = len(data)
        data = np.log(data)
        data_diff = [data[i + 1] - data[i] for i in range(data_len - 1)]
        data_diff.sort()
        
        sigma = np.std(data_diff[10:-10])
        mu = np.mean(data_diff[10:-10])

        pred_price = data[-1] + (mu + sigma * np.randn())
        pred_price = np.exp(pred_price)

        return pred_price
Exemplo n.º 19
0
Arquivo: tests.py Projeto: eteq/nbodpy
def uniform_normal_ics(nparticles, pscale=1, vscale=1, masses=None):
    """
    Generates `nparticles` particles with uniformly distributed locations
    (centered at the origin with box size `pscale`) and gaussian velocities.
    """
    from core import Particles

    pos = pscale * (np.rand(3, nparticles) - .5)
    vel = vscale * np.randn(3, nparticles)

    if masses is None:
        return Particles(pos, vel)
    else:
        return Particles(pos, vel, masses)
Exemplo n.º 20
0
Arquivo: tests.py Projeto: eteq/nbodpy
def uniform_normal_ics(nparticles,pscale=1,vscale=1,masses=None):
    """
    Generates `nparticles` particles with uniformly distributed locations
    (centered at the origin with box size `pscale`) and gaussian velocities.
    """
    from core import Particles
    
    pos = pscale*(np.rand(3,nparticles)-.5)
    vel = vscale*np.randn(3,nparticles)
    
    if masses is None:
        return Particles(pos,vel)
    else:
        return Particles(pos,vel,masses)
Exemplo n.º 21
0
def noise(shape: Tuple[int, ...], norm: numpy.ndarray) -> numpy.ndarray:
    """
    Creates Gaussian noise of the given shape with the given norm.

    Modified from function 'diffeo_imgs' at
    https://github.com/leonardopetrini/diffeo-sota/blob/15941397685cdb1aa3ffb3ee718f5a6dde14bab3/results/utils.py#L179.

    :param shape:   The shape of the noise array to create.
    :param norm:    The norm(s) that the created array should have.
    :return:        The noise array.
    """
    # Create noise with arbitrary norm
    unnormalised_noise = numpy.randn(shape)

    # Normalise it
    return unnormalised_noise / offset_norm(unnormalised_noise) * norm
Exemplo n.º 22
0
    def moving_average_model(self,data):

        data_len = len(data)

        indicator = Indicator()
        ema = indicator.exponential_moving_average(data)
        
        data_ema_diff = [data[i] - ema[i - 12] for i in range(12,data_len)]

        data_ema_diff.sort()

        mu = np.mean(data_ema_diff[10:-10])
        sigma = np.std(data_ema_diff[10:-10])

        pred_price = ema[-1] + (mu + sigma * np.randn())

        return pred_price
 def __init__(self, height, width, BATCH_SIZE, learning_rate, dict, name):
     self.BATCH_SIZE = BATCH_SIZE
     self.learning_rate = learning_rate
     self.name = name
     self.dict = dict
     self.dict[name + "_w"] = torch.randn(
         (height, width), device=device).double() / (width)
     self.dict[name + "_b"] = torch.zeros(height, device=device).double()
     self.weights = dict[name + "_w"]
     self.bias = dict[name + "_b"]
     self.weights_grad = torch.zeros(self.weights.shape,
                                     device=device).unsqueeze(0).repeat(
                                         self.BATCH_SIZE, 1, 1)
     self.bias_grad = torch.zeros(self.bias.shape,
                                  device=device).unsqueeze(0).repeat(
                                      self.BATCH_SIZE, 1)
     self.adam_w = Adam(self.weights.shape, self.learning_rate)
     self.adam_b = Adam(self.bias.shape, self.learning_rate)
Exemplo n.º 24
0
def alg2(x, c):
    
    n,m = x.shape

    var23 = np.random.randint(low=0,high=x.shape[0],size=(c,))
    v = x[var23,:] + 1e-10
    var25 = x[var23+1,:] - 1e-10
    J = []
    itr = 0
    f0 = np.zeros((x.shape[0],c))

    while np.prod(np.max(abs(v-var25),0)):
        itr += 1
        var25 = v
        dist = np.zeros((x.shape[0],c))
        
        for i in range(c):
            dist[:,i] = ((x-v[i,:])**2).sum(1)
        
        m = np.min(dist,axis=1)
        label = np.argmin(dist,axis=1)
        distout = dist**0.5
        
        for i in range(c):
            var23 = find(label==i)
            if len(var23)>0:
                v[i,:] = x[var23,:].mean(0)
            else:
                ind = round(np.randn()*m-1)
                v[i,:] = x[ind,:]
            f0[var23,i] = 1
        
        J.append(np.sum(f0*dist))
    
    f0 = np.zeros((x.shape[0],c))

    for i in range(c):
        var23 = find(label==i)
        f0[var23,i] = 1

    result = Results(v=v,distout=distout,f0=f0,itr=itr,cost=J)
    return result
Exemplo n.º 25
0
    def computeBestThresh(self,values,labels):
        """find the optimal threshold for the current node,split the 
            node by max info gain
        
        Arguments:
            values {[type]} -- [description]
            labels {list of int} -- follows the order in the values
            node {[type]} -- [description]
        """
        candidates = np.randn(self.numThreshold) * np.std(values) + np.mean(values)
        info_gain = np.ones(self.numThreshold) * float('-inf')# should be replace by MIN_INT
        
        # compute info gain for each threshold
        # for left and right branch, calculate shannon entropy for each class seperately
        for i in range(self.numThreshold):
            gain = 0
            lid = np.where(values < candidates[i])[0] # id of left brance
            rid = np.where(values >= candidates[i])[0] # id of right brance
            ltmp = np.zeros(self.num_class)
            rtmp = np.zeros(self.num_class)

            N = len(values)
            NL = len(lid) + le-4
            NR = len(rid) + le-4

            for j in range(len(ltmp)):
                ltmp[j] = np.sum((labels[lid] == j) * 1)
                rtmp[j] = np.sum((labels[rid] == j) * 1) 
            
            ltmp = ltmp / NL
            rtmp = rtmp / NR

            EL = -np.sum(ltmp * math.log2(ltmp)) 
            ER = -np.sum(rtmp * math.log2(rtmp)) 

            info_gain[i] = -1/N * (NL * EL + NR * ER)

        best_infoGain = np.max(info_gain)        

        return candidates[np.where(info_gain == best_infoGain)[0]], best_infoGain
Exemplo n.º 26
0
import numpy as np
import scipy.stats as ss
from matplotlib import pyplot as plt
from math import e


eps = np.linspace(0, 0.02, 100)
numbers = np.randn(100)

plt.plot(b, a)
plt.show()
Exemplo n.º 27
0
Arquivo: deadtime.py Projeto: FHe/tdl
    return ocr

def deadtime_residual(params,Io,ocr,offset):
    """ compute residual """
    ocr_calc = calc_ocr(params,Io,offset)
    return ocr - ocr_calc

##############################################################################
if __name__ == '__main__':
    # test fit
    Io  = 10000. * num.arange(500.0)
    a   = 0.1
    tau = 0.00001
    print 'a= ', a, ' tau= ', tau
    ocr = a*Io*num.exp(-a*Io*tau)
    ocr_meas = ocr + 2*num.randn(len(ocr))

    (params,msg) = fit(Io,ocr_meas)
    tau = params[0]
    a   = params[1]
    #print msg
    print 'a_fit= ',a,' tau_fit=', tau

    ocr = 0.3 * 1/tau
    icr = calc_icr(ocr,tau)
    print 'max icr = ', 1/tau
    print 'max ocr = ', num.exp(-1)/tau
    print 'ocr= ', ocr, ' icr_calc= ',icr

    rt = 1.
    lt = 1.
Exemplo n.º 28
0
         marker='o',
         label='Autoencoder(7,4)')
'''
BPSK ERROR RATE
'''
N = 5000000
EbNodB_range = range(0, 11)
itr = len(EbNodB_range)
ber = [None] * itr

for n in range(0, itr):
    EbNodB = EbNodB_range[n]
    EbNo = 10.0**(EbNodB / 10.0)
    x = 2 * (np.rand(N) >= 0.5) - 1
    noise_std = 1 / np.sqrt(2 * EbNo)
    y = x + noise_std * np.randn(N)
    y_d = 2 * (y >= 0) - 1
    errors = (x != y_d).sum()
    ber[n] = 1.0 * errors / N

    print "EbNodB:", EbNodB
    print "Error bits:", errors
    print "Error probability:", ber[n]

plt.plot(EbNodB_range, ber, 'bo', EbNodB_range, ber, 'k')
plt.title('BPSK Modulation')

# plt.plot(EbNodB_range, ber, linestyle='', marker='o', color='r')
# plt.plot(EbNodB_range, ber, linestyle='-', color = 'b')

# plt.plot(list(EbNodB_range), ber_theory, 'ro-',label='BPSK BER')
Exemplo n.º 29
0
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np

map_data = pd.DataFrame(
    np.randn(150, 2) / [100, 100] + [24.986867, 121.576216])
import numpy as np

print('Hello World')

x = np.randn(100,2)

plot(x)
Exemplo n.º 31
0
 def generate_latent_points(self):
     x_input = randn(self.latent_dim * self.n_points)
     x_input = x_input.reshape(self.n_points, self.latent_dim)
     return x_input
Exemplo n.º 32
0
import numpy as np

print('Hello World')

x = np.randn(100, 2)

plot(x)
dates = pd.date_range('2012-07-16','2012-07-21')
atemps = Series([101.4,99,90,232,233,123],index = dates)
atemps.index[2]

sdtemps = Series([73,78,77,78,78,77],index = dates)
temps = DataFrame({'Austin':atemps,'San Diego':sdtemps})
temps['diff'] = temps['San Diego'] - temps['Austin']

del temps['diff']
temps['Austin']
idx = temps.index[2]
temps.ix[[1,2,3],'Austin']

temps.mean()
#compute mean over the row
np.randn(5,5).mean(0)
#compute mean over the column
np.randn(5,5).mean(1)












Exemplo n.º 34
0
 def gradcheck_data():
     return (lambda x: (np.sum(x ** 2), x * 2), 
         [np.array(123.456), np.random.randn(3, ), np.randn((4, 5))]
     )
Exemplo n.º 35
0
np.arange(5)[:2]

# index in Series
index = ['a', 'b', 'c', 'd', 'e']
s = Series(np.arange(5), index=index)
s[:3]
s['d']
s['b':]
s[[4]]
s[['a', 'c']]

# create date_range by day
dates = pd.date_range('2012-07-16', '2012-07-21')
atemps = Series([101.4, 99, 90, 232, 233, 123], index=dates)
atemps.index[2]

sdtemps = Series([73, 78, 77, 78, 78, 77], index=dates)
temps = DataFrame({'Austin': atemps, 'San Diego': sdtemps})
temps['diff'] = temps['San Diego'] - temps['Austin']

del temps['diff']
temps['Austin']
idx = temps.index[2]
temps.ix[[1, 2, 3], 'Austin']

temps.mean()
#compute mean over the row
np.randn(5, 5).mean(0)
#compute mean over the column
np.randn(5, 5).mean(1)
Exemplo n.º 36
0
        start0 = end0
        if UUA is not None:
            end1 = UUA.shape[0] * (piece_index+1) // piece_count
            AKA += (UUA[start1:end1,:] * UUA[start1:end1,:]).sum(0) / denom
            start1 = end1
    return AKA

#def _elementwise_mult_and_sum(a,b,start,end):
#    s =  (a[start:end,:] * b[start:end,:]).sum(0)
#    return s


if 0:
    N = 7
    D = 2
    X = np.randn(N,D)

    X_K = np.randn(N,N)
    K = np.dot(X_K,X_K.T) + np.eye(N)

    Kinv = la.inv(K)

    linreg = linreg(X=X)
    Kinv_ = linreg.regress(Kinv)
    Kinv_ = linreg.regress(Kinv_.T)
    P_ = Kinv_#this one does not match with P

    X_K_ = linreg.regress(X_K)
    S_x = linreg.regress(sp.eye(N))
    S_x = linreg.regress(S_x.T)
    K_ = X_K_.dot(X_K_.T) + S_x
Exemplo n.º 37
0
import numpy as np

print(np.randn(0))
Exemplo n.º 38
0
    def synthesize(self, tau=50, mode=None):
        """Synthesize obervations.
        
        Parameters
        ----------
        tau : int (default = 50)
            Synthesize tau frames. 
            
        mode : Combination of ['s','q','r']
            's' - Use the original states
            'q' - Do NOT add state noise
            'r' - Add observations noise

            In case 's' is specified, 'tau' is ignored and the number of 
            frames equals the number of state time points.
            
        Returns
        -------
        I : numpy array, shape = (D, tau)
            Matrix with N D-dimensional column vectors as observations.
            
        X : numpy array, shape = (N, tau) 
            Matrix with N tau-dimensional state vectors.        
        """
        
        if not self._ready:
            raise ErrorDS("LDS not ready for synthesis!")
        
        Bhat = None
        Xhat = self._Xhat
        Qhat = self._Qhat
        Ahat = self._Ahat
        Chat = self._Chat
        Rhat = self._Rhat
        Yavg = self._Yavg
        initM0 = self._initM0
        initS0 = self._initS0
        nStates = self._nStates
        
        if mode is None:
            raise ErrorDS("No synthesis mode specified!")
        
        # use original states -> tau is restricted
        if mode.find('s') >= 0:
            tau = Xhat.shape[1]
        
        # data to be filled and returned     
        I = np.zeros((len(Yavg), tau))
        X = np.zeros((nStates, tau))
        
        if mode.find('r') >= 0:
            stdR = np.sqrt(Rhat)
        
        # add state noise, unless user explicitly decides against
        if not mode.find('q') >= 0:
            stdS = np.sqrt(initS0)
            (U, S, V) = np.linalg.svd(Qhat, full_matrices=False)
            Bhat = U*np.diag(np.sqrt(S)) 
    
        t = 0 
        Xt = np.zeros((nStates, 1))
        while (tau<0) or (t<tau):  
            # uses the original states
            if mode.find('s') >= 0:
                Xt1 = Xhat[:,t]
            # first state
            elif t == 0:
                Xt1 = initM0;
                if mode.find('q') < 0:
                    Xt1 += stdS*np.rand(nStates)
            # any further states (if mode != 's')
            else:
                Xt1 = Ahat*Xt
                if not mode.find('q') >= 0:
                    Xt1 = Xt1 + Bhat*np.rand(nStates)
            
            # synthesizes image
            It = Chat*Xt1 + np.reshape(Yavg,(len(Yavg),1))
         
            # adds observation noise
            if mode.find('r') >= 0:
                It += stdR*np.randn(length(Yavg))
            
            # save ...
            Xt = Xt1;
            I[:,t] = It.reshape(-1)
            X[:,t] = Xt.reshape(-1)
            t += 1
            
        return (I, X)
Exemplo n.º 39
0
    def average_line_vox(self, mm_index, norient=1000, power_dist=-1.):
        """Computes the radial/tangential hemisphere at the given point

        This uses the main FOTACS algorithm:

        1. Draw straight lines through the point of interest connecting the cortical surfaces at both sides
        2. Linearly interpolate the normal/sulcal depth gradient along this line

        Repeat these steps for `norient` random orientations.
        Average these orientations with the weighting set by the line length ** `power_dist`.

        :param mm_index: (3, ) vector of position in mm
        :param norient: number of random orientations to try
        :param power_dist: power-law used to downweight longer faces (`weight = dist ** power_dist`)
        :return: Tuple with 4 elements:

            1. interpolated normal
            2. interpolated sulcal depth gradient
            3. length of shortest line hitting surface on both sides
            4. number between 0 and 0.5 indicating location along shortest line (0 if at edge, 0.5 if in middle of gyrus)
        """
        if self.smooth_orient.shape[0] != norient:
            rand_orient = np.randn(3, norient)
            rand_orient /= np.sqrt(np.sum(rand_orient ** 2, 0))

            self.smooth_orient = gps(LOAD, ndir=3000)

        orientations = np.concatenate((self.smooth_orient, -self.smooth_orient), 0)
        w_ix, w_pos = self.white_hit.ray_intersect(mm_index, orientations)
        normal_inpr = np.sum(self.white.normal()[:, w_ix] * orientations.T, 0)
        segment = 1 if normal_inpr[w_ix != -1].sum() < 0 else 2
        if segment == 1:
            o_ix, o_pos = self.pial_hit.ray_intersect(mm_index, -orientations, pos_inpr=1)
            use = (w_ix != -1) & (o_ix != -1) & (normal_inpr <= 0)
        else:
            o_ix, o_pos = w_ix[norient:], w_pos[norient:]
            w_ix, w_pos = w_ix[:norient], w_pos[:norient]
            use = (w_ix != -1) & (o_ix != -1) & (normal_inpr[:norient] >= 0) & (normal_inpr[norient:] >= 0)

        other_grad = self.white_grad if segment == 2 else self.pial_grad
        other_normal = -self.white_normal if segment == 2 else self.pial_normal

        # linear interpolation of the orientations
        dist_white = np.sqrt(np.sum((w_pos[use, :] - mm_index) ** 2, -1))[:, None]
        dist_other = np.sqrt(np.sum((o_pos[use, :] - mm_index) ** 2, -1))[:, None]
        dist = dist_white + dist_other
        weight = dist ** power_dist

        res = []
        for other_inp, white_inp in [(other_normal, self.white_normal),
                                     (other_grad, self.white_grad)]:
            linear_interp = (dist_white * other_inp.T[o_ix[use], :] + dist_other * white_inp.T[w_ix[use], :]) / dist
            linear_interp *= weight / np.sqrt(np.sum(linear_interp ** 2, -1))[:, None]
            linear_interp[~np.isfinite(linear_interp)] = 0.
            cov = np.dot(linear_interp.T, linear_interp)
            val, vec = linalg.eigh(cov)
            res.append(vec[:, np.argmax(val)])
        res.append(np.inf if dist.size == 0 else dist.min())
        ratio_length = np.nan
        if dist.size != 0:
            idx = np.argmin(dist)
            ratio_length = min((dist_white[idx], dist_other[idx])) / dist[idx]
        res.append(ratio_length)
        return tuple(res)
Exemplo n.º 40
0
import numpy as np
import scipy.misc

q_1 = np.zeros(8)
q_2 = np.ones(7)
q_3 = 5*np.ones(6)

r_1 = np.arange(6)
r_2 = np.arange(0,6,0.5)
r_3 = np.arange(5,-1,-1)

s_1 = []

t_1 = np.linspace(0,5,90)
t_2 = np.linspace(5,0,80)

u_1 = np.logspace(-2,2,9)
u_2 = np.log10(u_1)

v_1 = np.exp(np.arange(-2,4))
v_2 = np.log(v_1)

w_1 = 2**np.arange(0,11)
w_2 = 1 / 2**np.arange(0,6)

x_1 = scipy.misc.factorial(np.arange(0,7))

y_1 = np.rand(10)
y_2 = np.randn(10)
y_3 = np.randint(5,15, size = 10)
Exemplo n.º 41
0
    def simulate(self, mesh, scheme, res, **kwargs):
        """Simulate an ERT measurement.

        Perform the forward task for a given mesh, a resistivity distribution
        (per cell), a measurement
        scheme and will return data (apparent resistivity) or potential fields.

        This function can also operate on complex resistivity models, thereby
        computing complex apparent resistivities.

        The forward operator itself only calculate potential values
        for the given scheme file.
        To calculate apparent resistivities, geometric factors (k) are needed.
        If there are no values k in the DataContainerERT scheme, then we will
        try to calculate them, either analytic or by using a p2-refined
        version of the given mesh.

        TODO
        ----
        * 2D + Complex + SR

        Args
        ----
        mesh : :gimliapi:`GIMLI::Mesh`
            2D or 3D Mesh to calculate for.

        res : float, array(mesh.cellCount()) | array(N, mesh.cellCount()) | list
            Resistivity distribution for the given mesh cells can be:
            . float for homogeneous resistivity
            . single array of length mesh.cellCount()
            . matrix of N resistivity distributions of length mesh.cellCount()
            . resistivity map as [[regionMarker0, res0],
                                  [regionMarker0, res1], ...]

        scheme : :gimliapi:`GIMLI::DataContainerERT`
            Data measurement scheme.

        Keyword Args
        ------------
        verbose: bool[False]
            Be verbose. Will override class settings.
        calcOnly: bool [False]
            Use fop.calculate instead of fop.response. Useful if you want
            to force the calculation of impedances for homogeneous models.
            No noise handling. Solution is put as token 'u' in the returned
            DataContainerERT.
        noiseLevel: float [0.0]
            add normally distributed noise based on
            scheme('err') or on noiseLevel if scheme did not contain 'err'
        noiseAbs: float [0.0]
            Absolute voltage error in V
        returnArray: bool [False]
            Returns an array of apparent resistivities instead of
            a DataContainerERT
        returnFields: bool [False]
            Returns a matrix of all potential values (per mesh nodes)
            for each injection electrodes.

        Returns
        -------
        DataContainerERT | array(N, data.size()) | array(N, data.size()) |
        array(N, data.size()):
            Data container with resulting apparent resistivity data and
            errors (if noiseLevel or noiseAbs is set).
            Optional returns a Matrix of rhoa values
            (for returnArray==True forces noiseLevel=0).
            In case of a complex valued resistivity model, phase values will be
            returned in the DataContainerERT (see example below), or as an
            additional returned array.

        Examples
        --------
        # TODO: Remove pybert dependencies
        # >>> import pybert as pb
        # >>> import pygimli as pg
        # >>> import pygimli.meshtools as mt
        # >>> world = mt.createWorld(start=[-50, 0], end=[50, -50],
        # ...                        layers=[-1, -5], worldMarker=True)
        # >>> scheme = pb.createData(
        # ...                     elecs=pg.utils.grange(start=-10, end=10, n=21),
        # ...                     schemeName='dd')
        # >>> for pos in scheme.sensorPositions():
        # ...     _= world.createNode(pos)
        # ...     _= world.createNode(pos + [0.0, -0.1])
        # >>> mesh = mt.createMesh(world, quality=34)
        # >>> rhomap = [
        # ...    [1, 100. + 0j],
        # ...    [2, 50. + 0j],
        # ...    [3, 10.+ 0j],
        # ... ]
        # >>> ert = pb.ERTManager()
        # >>> data = ert.simulate(mesh, res=rhomap, scheme=scheme, verbose=True)
        # >>> rhoa = data.get('rhoa').array()
        # >>> phia = data.get('phia').array()
        """
        verbose = kwargs.pop('verbose', self.verbose)
        calcOnly = kwargs.pop('calcOnly', False)
        returnFields = kwargs.pop("returnFields", False)
        returnArray = kwargs.pop('returnArray', False)
        noiseLevel = kwargs.pop('noiseLevel', 0.0)
        noiseAbs = kwargs.pop('noiseAbs', 1e-4)
        seed = kwargs.pop('seed', None)

        #segfaults with self.fop (test & fix)
        fop = self.createForwardOperator(useBert=self.useBert, sr=self.sr)
        fop.data = scheme
        fop.setMesh(mesh, ignoreRegionManager=True)
        fop.verbose = verbose

        rhoa = None
        phia = None

        isArrayData = False
        # parse the given res into mesh-cell-sized array
        if isinstance(res, int) or isinstance(res, float):
            res = np.ones(mesh.cellCount()) * float(res)
        elif isinstance(res, complex):
            res = np.ones(mesh.cellCount()) * res
        elif hasattr(res[0], '__iter__'):  # ndim == 2
            if len(res[0]) == 2:  # res seems to be a res map
                # check if there are markers in the mesh that are not defined in
                # the rhomap. better signal here before it results in some error
                meshMarkers = list(set(mesh.cellMarkers()))
                mapMarkers = [m[0] for m in res]
                if any([mark not in mapMarkers for mark in meshMarkers]):
                    left = [m for m in meshMarkers if m not in mapMarkers]
                    pg.critical(
                        "Mesh contains markers without assigned resistivities {}. Please fix given rhomap."
                        .format(left))
                res = pg.solver.parseArgToArray(res, mesh.cellCount(), mesh)
            else:  # probably nData x nCells array
                # better check for array data here
                isArrayData = True

        if isinstance(res[0], np.complex) or isinstance(res, pg.CVector):
            pg.info("Complex resistivity values found.")
            fop.setComplex(True)
        else:
            fop.setComplex(False)

        if not scheme.allNonZero('k') and not calcOnly:
            if verbose:
                pg.info('Calculate geometric factors.')
            scheme.set('k', fop.calcGeometricFactor(scheme))

        ret = pg.DataContainerERT(scheme)
        ## just be sure that we don't work with artifacts
        ret['u'] *= 0.0
        ret['i'] *= 0.0
        ret['r'] *= 0.0

        if isArrayData:
            rhoa = np.zeros((len(res), scheme.size()))
            for i, r in enumerate(res):
                rhoa[i] = fop.response(r)
                if verbose:
                    print(i, "/", len(res), " : ", pg.dur(), "s", "min r:",
                          min(r), "max r:", max(r), "min r_a:", min(rhoa[i]),
                          "max r_a:", max(rhoa[i]))
        else:  # res is single resistivity array
            if len(res) == mesh.cellCount():

                if calcOnly:
                    fop.mapERTModel(res, 0)

                    dMap = pg.core.DataMap()
                    fop.calculate(dMap)
                    if fop.complex():
                        pg.critical('Implement me')
                    else:
                        ret["u"] = dMap.data(scheme)
                        ret["i"] = np.ones(ret.size())

                    if returnFields:
                        return pg.Matrix(fop.solution())
                    return ret
                else:
                    if fop.complex():
                        res = pg.utils.squeezeComplex(res)

                    resp = fop.response(res)

                    if fop.complex():
                        rhoa, phia = pg.utils.toPolar(resp)
                    else:
                        rhoa = resp
            else:
                print(mesh)
                print("res: ", res)
                raise BaseException(
                    "Simulate called with wrong resistivity array.")

        if not isArrayData:
            ret['rhoa'] = rhoa

            if phia is not None:
                ret.set('phia', phia)
        else:
            ret.set('rhoa', rhoa[0])
            if phia is not None:
                ret.set('phia', phia[0])

        if returnFields:
            return pg.Matrix(fop.solution())

        if noiseLevel > 0:  # if errors in data noiseLevel=1 just triggers
            if not ret.allNonZero('err'):
                # 1A  and #100µV
                ret.set(
                    'err',
                    self.estimateError(ret,
                                       relativeError=noiseLevel,
                                       absoluteUError=noiseAbs,
                                       absoluteCurrent=1))
                print("Data error estimate (min:max) ", min(ret('err')), ":",
                      max(ret('err')))

            rhoa *= 1. + pg.randn(ret.size(), seed=seed) * ret('err')
            ret.set('rhoa', rhoa)

            ipError = None
            if phia is not None:
                if scheme.allNonZero('iperr'):
                    ipError = scheme('iperr')
                else:
                    # np.abs(self.data("phia") +TOLERANCE) * 1e-4absoluteError
                    if noiseLevel > 0.5:
                        noiseLevel /= 100.

                    if 'phiErr' in kwargs:
                        ipError = np.ones(
                            ret.size()) * kwargs.pop('phiErr') / 1000
                    else:
                        ipError = abs(ret["phia"]) * noiseLevel

                    if verbose:
                        print("Data IP abs error estimate (min:max) ",
                              min(ipError), ":", max(ipError))

                phia += np.randn(ret.size(), seed=seed) * ipError
                ret['iperr'] = ipError
                ret['phia'] = phia

        # check what needs to be setup and returned

        if returnArray:
            if phia is not None:
                return rhoa, phia
            else:
                return rhoa

        return ret
Exemplo n.º 42
0
def drawPath(xys_raw, color):
    e = 0.1 * np.randn(xys_raw.shape[0], 2)  #add random deviations to prettify
    xys = .5 + xys_raw + e  #.5 to center in boxes
    plt.plot(xys[:, 0], xys[:, 1], color)
Exemplo n.º 43
0
def main():
    print(np.randn([3, 4]))
Exemplo n.º 44
0
def drawPath(xys_raw, color):
    e = 0.1*np.randn( xys_raw.shape[0], 2)  #add random deviations to prettify
    xys = .5 + xys_raw+e   #.5 to center in boxes
    plt.plot(xys[:,0], xys[:,1], color)
Exemplo n.º 45
0
import numpy as np

x = np.randn(10)
print np.mean(x)