Ejemplo n.º 1
0
    def init_particles(self,
                       particle_no,
                       mass,
                       r_sigma,
                       v_sigma,
                       r_centre=[0, 0, 0],
                       v_centre=[0, 0, 0],
                       seed=None):
        """
        Create particles for simulation.

        Args:
        particle_no: int
        mass: float
        r_sigma: std for particle position distribution
        v_sigma: std for particle velocity distribution
        r_centre: where to centre positions (default origin)
        v_centre: where to centre velocities (default origin)
        seed: if not None, sets the numpy seed

        Output:
        None, populates self._particles
        """

        self._mass = mass  # Save for use in temperature calculations see TODO

        if seed is not None:
            np.seed(seed)

        # Clean sigma input into np arrays of length 3
        if type(r_sigma) in [float, int, np.float64]:
            r_sigma = [r_sigma, r_sigma, r_sigma]
        if type(v_sigma) in [float, int, np.float64]:
            v_sigma = [v_sigma, v_sigma, v_sigma]
        r_sigma = utils.clean_vector(r_sigma)
        v_sigma = utils.clean_vector(v_sigma)

        # Clean centre inputs into np arrays of length 3
        r_centre = utils.clean_vector(r_centre)
        v_centre = utils.clean_vector(v_centre)

        # Generate particles
        self._particles = [
            particle.Particle([
                np.random.normal(r_centre[0], r_sigma[0]),
                np.random.normal(r_centre[1], r_sigma[1]),
                np.random.normal(r_centre[2], r_sigma[2])
            ], [
                np.random.normal(v_centre[0], v_sigma[0]),
                np.random.normal(v_centre[1], v_sigma[1]),
                np.random.normal(v_centre[2], v_sigma[2])
            ], mass, self._t_0, self._t_end, self._dt, self._sample_points)
            for i in range(int(particle_no))
        ]

        # Save no. of particles for reference
        self._N_particles = particle_no
Ejemplo n.º 2
0
    def __init__(self, xl, yl, batchNorm=True, dropout=True):
        self.xl = xl
        self.yl = yl
        self.batchNorm = batchNorm
        self.dropout = dropout

        np.seed(42)
        self.weights = np.random.randn(xl, yl) / np.sqrt(2 / xl)
        self.bias = np.random.randn
Ejemplo n.º 3
0
def generate_internal_weights(N,
                              spectral_radius=None,
                              proba=0.1,
                              Wstd=1.0,
                              seed=None,
                              randomize_seed_afterwards=False,
                              verbose=False,
                              typefloat=np.float64):
    """
    Method that generate the weight matrix that will be used for the internal connections of the Reservoir.

    Inputs :
        - N: number of neurons
        - spectral_radius: SR
        - proba: probability of non-zero connections (sparsity), usually between 0.05 to 0.30
        - verbose: print( in the console detailed information.
        - seed: if not None, set the seed of the numpy.random generator to the given value.
        - randomize_seed_afterwards: as the module mdp.numx.random may not be used only by this method,
            the user may want to run several experiments with the same seed only for this method
            (generating the internal weights of the Reservoir), but have random seed for all other
            methods that will use mdp.numx.random.
    """
    if seed is not None:
        # mdp.numx.random.seed(seed)
        np.random.seed(seed)
    # mask = 1*(mdp.numx_rand.random((N,N))<proba)
    # mat = mdp.numx.random.normal(0, 1, (N,N)) #equivalent to mdp.numx.random.randn(n, m) * sd + mu
    # w = mdp.numx.multiply(mat, mask)
    mask = 1 * (np.random.rand(N, N) < proba)
    mat = np.random.normal(
        0, Wstd, (N, N))  #equivalent to mdp.numx.random.randn(n, m) * sd + mu
    w = np.multiply(mat, mask, dtype=typefloat)
    # Computing the spectral radius of W matrix
    rhoW = max(abs(linalg.eig(w)[0]))
    if verbose:
        # print( "Spectra radius of generated matrix before applying another spectral radius: "+str(Oger.utils.get_spectral_radius(w)))
        print(
            "Spectra radius of generated matrix before applying another spectral radius: "
            + str(rhoW))
    if spectral_radius is not None:
        w *= spectral_radius / rhoW
        rhoW_after = max(abs(linalg.eig(w)[0]))
        if verbose:
            print(
                "Spectra radius matrix after applying another spectral radius: "
                + str(rhoW_after))
    if randomize_seed_afterwards:
        """ redifine randomly the seed in order to not fix the seed also for other methods that are using numpy.random methods.
        """
        warnings.warn(
            "Have to check if you really want to randomize the seed, \
            because this makes the whole experiment not reproducible.",
            UserWarning)
        # mdp.numx.random.seed(int(time.time()*10**6))
        np.seed(int(time.time() * 10**6))
    return w
Ejemplo n.º 4
0
def test_run_cv_evaluation():
    import numpy as np
    td = load_data('data/examples/rasa/demo-rasa.json')
    n_folds = 3
    nlu_config = RasaNLUConfig("sample_configs/config_defaults.json")

    np.seed(2018)
    results = run_cv_evaluation(td, n_folds, nlu_config)

    rel_tol = 1e-09
    abs_tol = 0.01

    acc = np.mean(results["accuracy"])
    exp_acc = 0.65  # expected result
    np.testing.assert_approx_equal(acc, exp_acc, significant=5)
Ejemplo n.º 5
0
def test_run_cv_evaluation():
    import numpy as np
    td = training_data.load_data('data/examples/rasa/demo-rasa.json')
    n_folds = 3
    nlu_config = RasaNLUConfig("sample_configs/config_defaults.json")

    np.seed(2018)
    results = run_cv_evaluation(td, n_folds, nlu_config)

    rel_tol=1e-09
    abs_tol=0.01

    acc = np.mean(results["accuracy"])
    exp_acc = 0.65 # expected result
    np.testing.assert_approx_equal(acc, exp_acc, significant=5)
Ejemplo n.º 6
0
def publisher(time, translation, rotation, kp2):

    global cb_matrix, odom_pub, odom, base_link_odom
    #Create a homogenous transfomation matrix
    trans_mat = np.hstack((rotation, translation.reshape(3, 1)))
    trans_mat = np.vstack((trans_mat, np.array([0, 0, 0, 1]).reshape(1, 4)))
    #Transfoming the information obtained to the base-link frame of reference
    trans_mat = np.matmul(inverse_matrix(cb_matrix), trans_mat)
    print("Time:")
    print(time)
    print("Translation")
    print(trans_mat[0:3, 3])
    print("Rotation")
    print(trans_mat[0:3, 0:3])

    quat = quaternion_from_matrix(trans_mat)
    position = list(trans_mat[0:3, 3].reshape(3, ))
    np.seed(1)
    #Random covariance generated on the basis of number of keypoints tracked
    if kp2.shape[0] <= 1000:
        p_cov = np.random.random((6, 6)) + 5
    elif kp2.shape[0] > 1000 and kp2.shape[0] <= 1500:
        p_cov = np.random.random((6, 6)) + 4

    elif kp2.shape[0] > 1500 and kp2.shape[0] <= 2000:
        p_cov = np.random.random((6, 6)) + 3
    elif kp2.shape[0] > 2000:
        p_cov = np.random.random((6, 6)) + 2

    odom.header.stamp.secs = time
    odom.child_frame_id = "base_link"
    odom.header.frame_id = "map"
    odom.pose.pose.position.x = position[0]
    odom.pose.pose.position.y = position[1]
    odom.pose.pose.position.z = position[2]
    odom.pose.pose.orientation.x = quat[0]
    odom.pose.pose.orientation.y = quat[1]
    odom.pose.pose.orientation.z = quat[2]
    odom.pose.pose.orientation.w = quat[3]
    odom.pose.covariance = list(p_cov.reshape(36, ))
    odom_pub.publish(odom)
    def simulate(self, n, seed=1):
        """Simulate from hindcast distribution

    **Parameters**:
    
    `n` (`n`): number of simulations

    `seed` (`int`): random seed

    """

        np.seed(seed)
        gen_simulation = self.gen.simulate(n).reshape((n, 1))
        nd_simulation = self._simulate_nd(n).reshape((n, 1))

        margin_simulation = gen_simulation - nd_simulation

        return {
            "margin": margin_simulation,
            "generation": gen_simulation,
            "net_demand": nd_simulation
        }
Ejemplo n.º 8
0
 def __init__(self, loader: DataLoader) -> None:
     self._dataset = loader.dataset
     self.IterableDataset_len_called = loader.IterableDataset_len_called
     self._dataset_kind = loader.dataset_kind
     self._auto_collation = loader.auto_collation
     self._drop_last = loader.drop_last
     self._index_sampler = loader.index_sampler
     self._num_workers = loader.num_workers
     self._prefetch_factor = loader.prefetch_factor
     self._sampler_iter = iter(self._index_sampler)
     self._base_seed = np.seed(124)
     self._num_yielded = 0
     self._pin_memory = False
     self._timeout = loader.timeout
     self._persistent_workers = loader.persistent_workers
Ejemplo n.º 9
0
# Example evaluate (x ^ 2 + y ^ 2) ^ (1 / 2) across a regular grid of values
points = np.arange(-5, 5, 0.01)
xs, ys = np.meshgrid(points, points)
z = np.sqrt(np.square(xs) + np.square(ys))

points = np.arange(4)
xs, ys = np.meshgrid(points, points)
z = np.sqrt(np.square(xs) + np.square(ys))

import matplotlib.pyplot as plt
plt.imshow(z, cmap=plt.cm.gray); plt.colorbar()
plt.title("Image plot of $\sqrt{x^2 + y^2}$ for a grid of values")
plt.show()

# Random number operations
# Example: Random walk. Starting from 0 add 1 with probability .5 and -1 otherwise
import random
np.seed(1999)
position = 0.0
n_steps = int(1e3)
walk = list()
for i in range(n_steps):
    cond = random.randint(0, 1)
    print("Condition:", cond)
    step = 1 if cond else -1
    print("Step:", step)
    position += step
    walk.append(position)
plt.plot(walk)
plt.show()
Ejemplo n.º 10
0
 def seed(self, value):
     np.seed(value)
Ejemplo n.º 11
0
#particle filter implement
from filterpy.monte_carlo.resampling import *
import scipy.stats
import numpy as np
np.seed(7)


def create_particles(u, v, s, r, du, dv, ds, dr):
    '''(center_x,center_y,scale,aspect_ratio,...) first-order movement model'''
    N = 8
    particles = np.empty((N, 1))


#bbox: (x1,y1,x2,y2)
def bbox_to_states(bbox):
    '''(x1,y1,x2,y2)->(cx,cy,w,h)'''


def state_to_bbox(state):
    '''(cx,cy,w,h)->(x1,y1,x2,y2)'''


class PFfilter:
    def __init__(self, state):
        '''state:u,v,s,r,du,dv,ds,dr'''
        self.state0 = state
        self.num_particles = 50
        self.weights = np.ones((self.num_particles, 1)) / self.num_particles
        self.dt = 0.1
        self.gaussian_cov = np.array([[10, 0, 0, 0, 0, 0, 0, 0],
                                      [0, 10, 0, 0, 0, 0, 0, 0],
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 22:31:01 2018

@author: yoon
"""

import pandas.util.testing as tm
tm.N = 3
import numpy as np

np.seed(111)


# 연습용 데이터셋 생성 함수
def unpivot(frame):
    N, K = frame.shape
    data = {
        'value': frame.values.ravel('F'),
        'variable': np.asarray(frame.columns).repeat(N),
        'date': np.tile(np.asarray(frame.index), K)
    }
    return pd.DataFrame(data, columns=['date', 'variable', 'value'])


df = unpivot(tm.makeTimeDataFrame())

# 부분 변수 살펴보기
df[df['variable'] == 'A']
Ejemplo n.º 13
0
Archivo: v2.py Proyecto: hsfzxjy/WRSFKM
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--orig-gamma', type=float)
parser.add_argument('--epsilon', type=float, default=0.03)
parser.add_argument('--orig-epsilon', type=float)
parser.add_argument('--seed', type=int)
args = parser.parse_args()

# np.random.seed(int(os.environ.get('seed', '42')))
# print('Using seed:', os.environ.get('seed', '42'))

epsilon = args.epsilon
gamma = args.orig_gamma / args.orig_epsilon / epsilon
np.seed(args.seed)

# epsilon = 0.03
# gamma = .2 / 30 / epsilon
# np.random.seed(42)

# Download t10k_* from http://yann.lecun.com/exdb/mnist/
# Change to directory containing unzipped MNIST data
mndata = mnist.MNIST('data/MNIST-10K/')


def solve_huang_eq_24(u):

    n = len(u)

    def f(x):
dataframe = pd.read_csv("housing.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:13]
y = dataset[:,13]


# define base model
def baseline_model():
	# create model
	model = Sequential()
	model.add(Dense(13, input_dim=13, kernel_initializer='normal', activation='relu'))
	model.add(Dense(1, kernel_initializer='normal'))
	# Compile model
	model.compile(loss='mean_squared_error', optimizer='adam')
	return model
 
#fix random seed for reproducibility   
seed = 7
np.seed(seed)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=5, verbose=0)








Ejemplo n.º 15
0
 def _set_and_update_seed(self):
     if self.seed is not None:
         np.seed(self.seed)
         self.seed += 1
Ejemplo n.º 16
0
def MCImpS(f, Ginv, N=10, lambdai=.05, tests=100, seed=False):

    #Intervalo uniforme [0,1]
    if seed == False:
        r = np.random.uniform(0, 1, N)
    elif seed == True:
        r = np.random.uniform(0, 1, N)
    else:
        np.seed(seed)
        r = np.random.uniform(0, 1, N)

    variancia = []
    lambdas = [i * .05 for i in range(1, tests + 1)]

    for lamb in lambdas:

        #calcular f(Ginv)/g(Ginv)

        GinI = Ginv(r, lamb)[1]
        F = f(GinI)
        g = g_and_Ginv(r, lamb)[0]

        f_over_g = []
        f_over_g2 = []

        for i in range(len(r)):
            f_over_g.append(F[i] / g[i])

        for i in range(len(r)):
            f_over_g2.append((F[i] / g[i])**2)

        #definir variância, média e média quadrática
        var = 0
        mean = 0
        meanquad = 0

        #calcular média
        mean = 1 / N * sum(f_over_g)

        #calcular média quadrática

        meanquad = 1 / N * sum(f_over_g2)

        #calcular variância
        var = meanquad - mean**2
        variancia.append(var)

    U = [variancia, lambdas]

    #Escolher menor lambda
    df = pd.DataFrame(np.transpose(U), columns='variância lambdas'.split())
    df = df[df['variância'] == df['variância'].min()]

    minlambda = df['lambdas'].values[0]

    #Aplicar o método

    F = f(Ginv(r, minlambda)[1])
    G = Ginv(r, minlambda)[0]

    f_over_gfinal = [F[i] / G[i] for i in range(len(Ginv(r, minlambda)[0]))]

    I = 1 / N * sum(f_over_gfinal)

    return I
Ejemplo n.º 17
0
    ctk.showmapping(sMap,
                    donnee,
                    bmus=[],
                    seecellid=1,
                    subp=True,
                    override=False)
    plt.suptitle('Etat final ')
    #tmp = ctk.classifperf(sMap, Xapp, Xapplabels)
    return sMap, donnee, classname


dataApp = data[108:300, 1:5]
dateApp = data[108:300, 0]
dataTest = data[300:436, 1:5]
dateTest = data[300:436, 0]
np.seed(0)
classname = ["SST1", "SST2", "SST3", "SST4"]
#data_70_83 =
sMap, donnee, classename = app_donnees(dataApp, classname)

ctk.showmap(sMap)

#annee = data[108:436,0]
#data1 = data[108:436,1:5]
data_72_83 = np.concatenate((dateApp[24:36, ], dateApp[156:168, ]), axis=0)
donnees_72_83 = np.concatenate((dataApp[24:36, ], dataApp[156:168, ]), axis=0)

#data_73_82 =np.concatenate((annee[36:48,],annee[144:156,]), axis=0)
#donnees_73_82 = np.concatenate((data1[36:48,],data1[144:156,]), axis=0)

bmus1 = ctk.mbmus(sMap, Data=donnees_72_83)
Ejemplo n.º 18
0
def rand_weights(n):
    ''' Produces n random weights that sum to 1 '''
    np.seed(1000)
    k = np.random.rand(n)
    return k / sum(k)
Ejemplo n.º 19
0
 def seed(self, seed=None):
     if seed is not None:
         np.seed(seed)
         random.seed(seed)
Ejemplo n.º 20
0
import os
os.environ['KERAS_BACKEND'] = "tensorflow"
from keras import backend as K
K.set_image_data_format('channels_first')
from keras.layers import ZeroPadding2D, Lambda, GlobalMaxPooling2D, Concatenate
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
                              inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
np.seed(2)


def tf_roll(x, shift, axis, axis_shape):
    n = axis_shape
    shift[0] %= n[0]
    shift[1] %= n[1]
    indexes = [None] * 2
    indexes[0] = np.concatenate(
        [np.arange(n[0] - shift[0], n[0]),
         np.arange(n[0] - shift[0])])
    indexes[1] = np.concatenate(
        [np.arange(n[1] - shift[1], n[1]),
         np.arange(n[1] - shift[1])])
    res = tf.gather(x, indexes[0], axis=axis[0])
    res = tf.gather(res, indexes[1], axis=axis[1])
    return res


def fft_shift(x, filter_shape, axis):
    # TODO
    # provide bitdata in x_recon (0, 3, ...)
    x_recon = 

    # count bit errors- this code is a bit messy 
    diff = x_recon^x_raw # bitwise comparison
    bit_errors = np.sum(error_values[diff])
    ber = bit_errors/(NUM_SAMPLES*BITS_PER_SYMBOL)
    return ber

    
##################################################
# Run analysis 

# generate data for all experiments
np.seed(SEED)
x_train_raw, x_train = #TODO
x_test_raw, x_test = #TODO 
ebn0_values = np.linspace(EBN0_RANGE*)

# generate sequence of Eb/N0 values
bers = []
print("Train points: %d | Test points: %d" % (TRAIN_LENGTH, NUM_SAMPLES))
print("Eb/N0 [dB], BER")

for EbN0 in ebn0_values: # do experiment for all Eb/N0 values

    # calculate N0 value to meet Eb/N0 requirement and add noise to sample
    mean_Es = #TODO: mean of squared amplitudes 
    EbN0_lin = 10**(0.1*EbN0)
    N0 = mean_Es/(EbN0_lin*BITS_PER_SYMBOL)