def learn_cone(self, vectors, class_values):
        # Factorise vectors = V into V ~= WH, minimise ||W - VH||
        orig_dims = len(vectors[0])
        num_docs = len(vectors)
        V = vectors.T

        W = random.random_sample(orig_dims * self.dimensions) * 2 - 1
        W = W.reshape((orig_dims, self.dimensions))

        H = random.random_sample(self.dimensions * num_docs) * 2 - 1
        H = H.reshape((self.dimensions, num_docs))

        mu_w = 1
        mu_h = 1

        objectives = []
        for i in range(500):
            logging.debug("Iteration %d", i)
            new_obj = obj = np.linalg.norm(V - np.dot(W, H))
            logging.debug("Objective: %f", new_obj)

            # If the objective hasn't decreased much recently, stop
            if i > 3 and (sum(objectives) / len(objectives) - new_obj) / new_obj < 1e-5:
                logging.info("Objective stopped decreasing at: %f", new_obj)
                return
            objectives.append(new_obj)
            objectives = objectives[-5:]
            while True:
                logging.debug("Objective: %f, mu_w: %f", new_obj, mu_w)
                W_new = W - mu_w * np.dot(np.dot(W, H) - V, H.T)
                new_obj = np.linalg.norm(V - np.dot(W_new, H))
                self.W = W_new
                if new_obj < obj:
                    break
                mu_w *= 0.5
                if mu_w <= 1e-200:
                    logging.info("Convergence at objective: %f", new_obj)
                    return
            W = W_new
            mu_w *= 1.2
            logging.debug("Better W found at obj: %f", new_obj)

            obj = new_obj
            while True:
                logging.debug("Objective: %f, mu_h: %f", new_obj, mu_h)
                H_new = H - mu_h * np.dot(W.T, np.dot(W, H) - V)
                # Make compatible with classifications
                # logging.debug("Before project: %s", str(H_new))
                H_new = self.project(H_new.T, class_values).T
                # logging.debug("After project: %s", str(H_new))
                new_obj = np.linalg.norm(V - np.dot(W, H_new))
                if new_obj < obj:
                    break
                mu_h *= 0.5
                if mu_h <= 1e-200:
                    logging.info("Convergence at objective: %f", new_obj)
                    return
            H = H_new
            mu_h *= 1.2
            logging.debug("Better H found at obj: %f", new_obj)
예제 #2
0
파일: csta.py 프로젝트: Nikmort/CSTA
	def Sobol_function_with_noise(x):
		# 6D Sobol function
		answer = 1.0
		a = [0.0, 0.5, 3.0, 9.0, 99.0, 99.0]
		for i in range(6):
			answer *= (abs(4*x[i]-2)+a[i])/(1+a[i])
		return answer * (1.0 + 0.5 * (nrand.random_sample()-0.5)) + 2 * (nrand.random_sample()-0.5)
def setup_and_init_weights(nn_structure):
    W = {}
    b = {}
    for l in range(1, len(nn_structure)):
        W[l] = r.random_sample((nn_structure[l], nn_structure[l-1]))
        b[l] = r.random_sample((nn_structure[l],))
    return W, b
예제 #4
0
def add_false_data_water(cost_dict):
    data = data_init()
    water_val = random.random_sample()*20 + 50
    for elem in data["crop_types"]:
        cost_dict[elem[0]] = (-1*elem[1] + water_val)/water_val
    cost_dict["CORN"] = (-1*random.random_sample()*20 + 50 - water_val)/water_val
    cost_dict["SOY"] = (-1*random.random_sample()*20 + 50 - water_val)/water_val
예제 #5
0
def generateRandomPlane(name,altitude):
    phi = -math.pi+random.random_sample()*2.0*math.pi
    dir_flight = phi+3.0*math.pi/4.0+random.random_sample()*math.pi/2.0

    position = vector.cylvec(70000.0,phi,altitude)
    velocity = vector.sphvec(airplane.ControllableAirplane.vcruise,math.pi/2.0,dir_flight)

    return airplane.ControllableAirplane(name,position,velocity)
    def train(self, epochs = 100, n_input = 2, n_hidden = 2, n_output = 1, 
            learning_rate = 0.1, momentum_learning_rate = 0.9,          
            test_size=0.2):
        """Initialize the network and start training."""

        #print ('inside train()')
        # Initialize variables
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.learning_rate = learning_rate
        self.momentum_learning_rate = momentum_learning_rate
        
        
        self.V_hidden = zeros((self.n_input + 1, self.n_hidden))
        self.W_hidden = random_sample(self.V_hidden.shape)
        self.V_output = zeros((self.n_hidden + 1, self.n_output))
        self.W_output = random_sample(self.V_output.shape)
        
        #print'Wh,Wo'
        #print self.W_hidden
        #print self.W_output

        # Start the training
        rmse=zeros((epochs,2))
        for t in arange(epochs):
            # Test then Train, since we'll use the training errors
            outputs, hidden = self.feed_forward(self.inputs)
            #print 'FF'
            #print hidden.shape
            #print outputs.shape
            #target=ones(outputs.shape)*(-1.0)
            #target[arange(target.shape[0]),self.targets-1]=1.0
            #print (target)
            errors = self.targets - outputs
            #print (hidden.shape)
            #print (outputs.shape)
            #print (self.targets.shape)

            #errors = self.targets - outputs
            i=0
            RMSE = sqrt((errors**2).mean())
            rmse[t, i] = sqrt((errors**2).mean()) 
            yield rmse, t, epochs
            
            if (RMSE<1e-4):
                break
            
            # Update weights using backpropagation
            #print 'Pre BP'
            #print self.inputs.shape
            #print hidden.shape
            #print outputs.shape
            #print errors.shape
            
            self.back_propagate(self.inputs, hidden, outputs, errors)
예제 #7
0
def vonmises(mu, kappa):
    if kappa > 1e-7 or kappa < 1e-8:
        return np.random.vonmises(mu, kappa)
    else:
        prop = 2*np.pi*(random_sample(1)-0.5)
        line = np.exp(kappa)*random_sample(1)
        while line > np.exp(kappa*np.cos(prop-mu)):
            prop = 2*np.pi*(random_sample(1)-0.5)
            line = np.exp(kappa)*random_sample(1)
        return prop
예제 #8
0
def gen_polar_direc(N=1):
    """ generates N random polar coordinate direction
    (azimuthal,polar) angles. 0<=azimuth<=2*pi, 0<=polar<=pi.
    """
    import numpy.random.random_sample
    import numpy.pi
    out = np.zeros((N,2),dtype=float)
    for i in range(N):
        out[i,0] = (2*pi)*random_sample()
        out[i,1] = (pi)*random_sample()
    return out
예제 #9
0
def logreg(A, N):
    """
    Logistic regression estimator of alpha,beta, gama.
    Returns the array of estimated parameters
    """
    print "...running bfgs ..."
    initParams = [r.random_sample(), r.random_sample(), r.random_sample()]
    print initParams
    results = minimize(loglikeliPxy, initParams, args=(A, N),
      method = 'BFGS', jac = True, options={'maxIter': 5000, 'disp': True})
    print results.x
    return results.x
예제 #10
0
파일: t_ex6_3.py 프로젝트: hoburg/gpfit
    def test_rms_error(self):
        np.random.seed(SEED)
        Vdd = random_sample(1000,) + 1
        Vth = 0.2*random_sample(1000,) + 0.2
        P = Vdd**2 + 30*Vdd*exp(-(Vth-0.06*Vdd)/0.039)
        u = vstack((Vdd, Vth))
        x = log(u)
        y = log(P)
        K = 4

        _, rms_error = fit(x, y, K, "MA")

        self.assertTrue(rms_error < 1e-2)
예제 #11
0
파일: conftest.py 프로젝트: cmshobe/landlab
def pytest_generate_tests(metafunc):
    if "diagonal_property" in metafunc.fixturenames:
        metafunc.parametrize("diagonal_property", DIAGONAL_PROPERTIES)
    elif "random_xy" in metafunc.fixturenames:
        from numpy.random import random_sample

        metafunc.parametrize(
            "random_xy",
            (
                tuple(-1e3 * random_sample(2)),
                tuple(1e3 * random_sample(2)),
                tuple(1e3 * (random_sample(2) - 0.5)),
            ),
        )
예제 #12
0
    def spectrum_to_phase_noise(self, freq, spectrum, transform=None):
        
        nf = len(spectrum)
        fmax = freq[nf-1]
        
        # Resolution in time domain
        if transform==None or transform=='r':
            nt = 2*(nf - 1) 
            dt = 1/(2*fmax) # in [s]
        elif transform=='c':  
            nt = nf 
            dt = 1./fmax # in [s]
        else:
            #NoiseError
            raise RuntimeError('ERROR: The choice of Fourier transform for the\
             RF noise generation could not be recognized. Use "r" or "c".')
            
        # Generate white noise in time domain
        rnd.seed(self.seed1)
        r1 = rnd.random_sample(nt)
        rnd.seed(self.seed2)
        r2 = rnd.random_sample(nt)
        if transform==None or transform=='r':
            Gt = np.cos(2*np.pi*r1) * np.sqrt(-2*np.log(r2))     
        elif transform=='c':  
            Gt = np.exp(2*np.pi*1j*r1)*np.sqrt(-2*np.log(r2)) 
 
        # FFT to frequency domain
        if transform==None or transform=='r':
            Gf = np.fft.rfft(Gt)  
        elif transform=='c':
            Gf = np.fft.fft(Gt)   
             
        # Multiply by desired noise probability density
        if transform==None or transform=='r':
            s = np.sqrt(2*fmax*spectrum) # in [rad]
        elif transform=='c':
            s = np.sqrt(fmax*spectrum) # in [rad]
        dPf = s*Gf.real + 1j*s*Gf.imag  # in [rad]
                
        # FFT back to time domain to get final phase shift
        if transform==None or transform=='r':
            dPt = np.fft.irfft(dPf) # in [rad]
        elif transform=='c':
            dPt = np.fft.ifft(dPf) # in [rad]
                    
        # Use only real part for the phase shift and normalize
        self.t = np.linspace(0, nt*dt, nt) 
        self.dphi_output = dPt.real
예제 #13
0
파일: datasets.py 프로젝트: Peichao/thunder
    def generate(self, dims=(100, 200), centers=5, t=100, margin=35, sd=3, noise=0.1, npartitions=1, seed=None):

        from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d
        from skimage.draw import circle
        from thunder.rdds.fileio.imagesloader import ImagesLoader
        from thunder.extraction.source import SourceModel

        random.seed(seed)

        if len(dims) != 2:
            raise Exception("Can only generate for two-dimensional sources.")

        if size(centers) == 1:
            n = centers
            xcenters = (dims[0] - margin) * random.random_sample(n) + margin/2
            ycenters = (dims[1] - margin) * random.random_sample(n) + margin/2
            centers = zip(xcenters, ycenters)
        else:
            centers = asarray(centers)
            n = len(centers)

        ts = [random.randn(t) for i in range(0, n)]
        ts = clip(asarray([gaussian_filter1d(vec, 5) for vec in ts]), 0, 1)
        for ii, tt in enumerate(ts):
            ts[ii] = (tt / tt.max()) * 2
        allframes = []
        for tt in range(0, t):
            frame = zeros(dims)
            for nn in range(0, n):
                base = zeros(dims)
                base[centers[nn][0], centers[nn][1]] = 1
                img = gaussian_filter(base, sd)
                img = img/max(img)
                frame += img * ts[nn][tt]
            frame += clip(random.randn(dims[0], dims[1]) * noise, 0, inf)
            allframes.append(frame)

        def pointToCircle(center, radius):
            rr, cc = circle(center[0], center[1], radius)
            return array(zip(rr, cc))

        r = round(sd * 1.5)
        sources = SourceModel([pointToCircle(c, r) for c in centers])

        data = ImagesLoader(self.sc).fromArrays(allframes, npartitions).astype('float')
        if self.returnParams is True:
            return data, ts, sources
        else:
            return data
예제 #14
0
def table(request):
    if request.param == "empty":
        return DataFrame({
            "numeric": random_sample(10),
            "integer": range(10),
            "comments": None
        })
    elif request.param == "half":
        df = DataFrame({
            "numeric": random_sample(10),
            "integer": range(10),
            "comments": None
        })
        df.loc[range(0, 10, 2), "comments"] = "Wise comment."
        return df
예제 #15
0
파일: pf.py 프로젝트: mirwox/particlefilter
def draw_random_sample(choices, probabilities, n):
    """ 
        Devolve uma amostra aleatória de n elementos retirada do conjunto choices em que cada 
        elemento tem uma probabilidade diferente de ser escolhido. As probabilidades
        estão na lista probabilities
        
        choices: lista de valores a amostrar
        probabilities: lista das probabilidades de cada valor
        n: número de amostras desejadas na lista resultado
    """
    if np.any(np.isnan(probabilities)):
        message = """\n\nIMPOSSÍVEL calcular com valor de probabilidade NaN - Not a number presentes na lista \n
        DICA: se estiver obtendo probabilidades muito pequenas \n 
        Cheque se suas contas estão certas, lembre-se de que a produtória \n
        da fórmula se aplica apenas aos lasers de cada partícula \n
        Caso precise mesmo representar valores menores que  {0} \n
        Use mpmath.mpf para armazenar os valores temporários do cálculo de probabilidade e de alpha \n
        e volte a armazenar no atributo w da particula após a multiplicação por alpha \n
        referênia: https://docs.sympy.org/0.6.7/modules/mpmath/basics.html"""
        message = message.format(sys.float_info.min)
        print(message)
        print("Suas probabilidades:")
        print(probabilities)
    values = np.array(range(len(choices)))
    probs = np.array(probabilities)
    bins = np.add.accumulate(probs)
    inds = values[np.digitize(random_sample(n), bins)]
    samples = []
    for i in inds:
        samples.append(deepcopy(choices[int(i)]))
    return samples
예제 #16
0
    def test_members(self):

        o = mfcc(buf_size, n_filters, n_coeffs, samplerate)
        #assert_equal ([o.buf_size, o.method], [buf_size, method])

        spec = cvec(buf_size)
        #spec.norm[0] = 1
        #spec.norm[1] = 1./2.
        #print "%20s" % method, str(o(spec))
        coeffs = o(spec)
        self.assertEqual(coeffs.size, n_coeffs)
        #print coeffs
        spec.norm = random.random_sample((len(spec.norm),)).astype(float_type)
        spec.phas = random.random_sample((len(spec.phas),)).astype(float_type)
        #print "%20s" % method, str(o(spec))
        self.assertEqual(count_nonzero(o(spec) != 0.), n_coeffs)
예제 #17
0
    def train(self, rdd_data, learn_rate=0.5, iteration=100, error=1e-8, method=None, seed=None):
        """
        Use for training linear regression model
        :param rdd_data: list data data type is [[feature, target], .....], features is vector liked list, current
        target only can have one output
        :param learn_rate: the learning rate of this training process
        :param iteration: the maximum iteration of this method
        :param error: target error
        :param method: method use to
        :param seed: used when self.weights is None
        :return: None
        """
        if self.weights is None:
            self.logger.debug("Init weights")
            if seed is not None:
                random.seed(seed=seed)
            self.weights = 2 * random.random_sample(len(rdd_data[0][0])) - 1

        if method is None:
            method = self.GD

        for i in range(iteration):
            self.logger.debug("Start {} iteration".format(i))
            if method == self.GD:
                if self.update_weight_using_gd(learn_rate, rdd_data, error):
                    break
            self.logger.debug("Iteration {} finished".format(i))
예제 #18
0
    def __init__(self, numInputs, shape, alphaInit=.5):
        '''
        Initize the som with a given shape
            numInputs - int
                The dimensionality of the input space.
            shape - a tuple of ints
                The shape of the network.
            alphaInit - float
                The initial alpha value for training.
                Alpha controls the height of the guassian used to update the 
                neurons about the best matching unit. The value should be in 
                [0,1] The default value is .5, and it slowly decays over time.
        '''
        if any(shape < 0):
            raise ValueError('shape values must be greater than 0')

        self.dim = len(shape)
        self.som = random_sample(shape + (numInputs,))
        self.nIn = numInputs
        self.alphaInit = alphaInit
        self.setLearnParams(1)

        # These grids simplify calculating distances later on.
        # Each element of the list is a range reshaped to one of the
        # dimensions of our shape. We'll subtract the location of our
        # winning neuron, square the result, and add them all together
        # to get the euclidean distance of each neuron from the winner
        self.grids = [arange(shape[i]).reshape(\
            tuple([1]*i + [shape[i]] + [1]*(self.dim-i-1)))\
            for i in xrange(self.dim)]
예제 #19
0
def test_sample_box():
    """Test that `sample_box` generates the proper number of samples within the
    correct bounds.

    """
    # Generate some random bounds
    bounds = []
    bounds_count = 0
    num_dimensions = random_integers(2, 10)
    while bounds_count < num_dimensions:
        candidate_bound = random_sample(2)
        low, high = candidate_bound
        if low > high:
            candidate_bound = [high, low]
        bounds.append(candidate_bound)
        bounds_count += 1

    num_samples = random_integers(100)
    points = sample_box(bounds, num_samples)

    assert len(points) == num_samples

    results = np.empty([num_samples, num_dimensions])
    for counter, point in enumerate(points):
        for axis, value in enumerate(point):
            low = bounds[axis][0]
            high = bounds[axis][1]
            results[counter, axis] = (low <= value <= high)
    assert results.all()
 def get_decision_H(self, data):
     num_docs = len(data)
     V = data.T
     H = random.random_sample(self.dimensions * num_docs) * 2 - 1
     H = H.reshape((self.dimensions, num_docs))
     mu_h = 1
     best_obj = float("inf")
     for i in range(500):
         logging.debug("Prediction iteration: %d", i)
         while True:
             H_new = H - mu_h * np.dot(self.W.T, np.dot(self.W, H) - V)
             new_obj = np.linalg.norm(V - np.dot(self.W, H_new))
             logging.debug("Objective: %f, mu_h: %f", new_obj, mu_h)
             if new_obj < best_obj:
                 if (best_obj - new_obj) / new_obj < 1e-8:
                     logging.info("Objective stopped decreasing at: %f", new_obj)
                     return H_new
                 H = H_new
                 best_obj = new_obj
                 break
             mu_h *= 0.5
             if mu_h <= 1e-200:
                 logging.info("Convergence at objective: %f", new_obj)
                 return H
         mu_h *= 1.2
예제 #21
0
def random_list(length, sparsity=0.75, amplitude=1000, integers=True):
    """Get a random sparse list for tests.
    """
    rand_fn = random.randint if integers else random.uniform
    dense = rand_fn(-amplitude, amplitude, length)
    dense[random.random_sample(length) < sparsity] = 0
    return list(dense)
예제 #22
0
    def test_members(self):
        o = specdesc()

        for method in methods:
            o = specdesc(method, buf_size)
            assert_equal ([o.buf_size, o.method], [buf_size, method])

            spec = cvec(buf_size)
            spec.norm[0] = 1
            spec.norm[1] = 1./2.
            #print "%20s" % method, str(o(spec))
            o(spec)
            spec.norm = random.random_sample((len(spec.norm),)).astype(float_type)
            spec.phas = random.random_sample((len(spec.phas),)).astype(float_type)
            #print "%20s" % method, str(o(spec))
            assert (o(spec) != 0.)
예제 #23
0
def w_choice(item,weight):
    n = rnd.random_sample()
    for i in range(0,len(weight)):
        if n < weight[i]:
            break
        n = n - weight[i]
    return i
예제 #24
0
파일: optimize.py 프로젝트: pmangg/AdaptSAW
def debugRectangles(rects):
    """
    Sanity test on rectangle sets go here.
    """
    # first, are any out of bounds?
    for r in rects:
        if r.lb[0] < 0. or r.lb[1] < 0.:
            print "ERROR: rectangle has lb", r.lb
        if r.ub[0] > 1. or r.ub[1] > 1.:
            print "ERROR: rectangle has ub", r.ub
        if r.lb[0] > r.ub[0] or r.lb[1] > r.ub[1]:
            print 'ERROR: lower bound greater than upper'
        assert r.center == [l+(u-l)/2. for u, l in zip(r.lb, r.ub)]
        assert r.d == sum([(l-c)**2. for l, c in zip(r.lb, r.center)])**0.5
        
    
    # okay, now sample
    for _ in xrange(10000):
        hits = 0
        samp = random_sample(2)
        for r in rects:
            if all(samp > r.lb) and all(samp < r.ub):
                hits += 1
        if hits != 1:
            print "ERROR: location has %d hits: %s" % (hits, samp)
예제 #25
0
	def fit(self, X):

		#converts X to np array
		if (type(X) == list):
			X = np.array(X)

		#randomly initialize weights
		n_samples, n_features = X.shape
		self.w_ = random_sample((1 + n_features, ))
		self.e_ = []
		y = np.ones(n_samples)

		#regularize features from 0 to 1
		for feat in range(n_features):
			min_val = min(X[:, feat])
			max_min_range = max(X[:, feat]) - min_val
			for sample in range(n_samples):
				X[sample, feat] = ((X[sample, feat] - min_val) / max_min_range)

		#iterate untill it converges
		improvement, total_error = 1, None
		while improvement >= self.error_tolerance:
			errors = []
			for xi, target in zip(X, y):				
				error = self.predict(xi) - target
				update = self.learning_rate * error
				self.w_[1:] -= update * xi
				self.w_[0] -= update
				errors.append(abs(error))
			self.e_.append(errors)

			#calculates improvement of prediction
			if total_error: 
				improvement = (total_error - sum(errors))/total_error
			total_error = sum(errors)
    def stochastic_pooling(self, X, n_layer):

        inh = X.shape[0] - self.shape_pool[0] + 1
        inw = X.shape[1] - self.shape_pool[1] + 1
        n_filter = self.n_filters[n_layer]
        filtersize = self.shape_pool[0] * self.shape_pool[1]

        randomsamples = random_sample((inh) * (inw) * n_filter).reshape(
            (inh), (inw), n_filter
        )  # generate random values
        randomsamples = np.repeat(randomsamples, repeats=filtersize, axis=2).reshape((inh), (inw), n_filter, filtersize)

        X_rfi = view_as_windows(X, self.shape_pool + (1,))
        sumpool = np.repeat(np.sum(X_rfi, axis=(3, 4, 5)), repeats=filtersize).reshape(
            (inh, inw, n_filter, self.shape_pool[0], self.shape_pool[1], 1)
        )
        probabilities = X_rfi / sumpool
        probabilities[np.isnan(probabilities)] = 1 / float(
            filtersize
        )  # get where the sum is zero and replace by one, so the division by zero error do not occur
        probabilities = probabilities.reshape((inh, inw, n_filter, filtersize))

        if self.training:

            bins = np.add.accumulate(probabilities, axis=3)
            binsbefore = np.concatenate((np.zeros((inh, inw, n_filter, 1)), bins[:, :, :, :-1]), axis=3)
            ret = X_rfi[np.where((((binsbefore <= randomsamples) * (bins > randomsamples))) == True)]
            ret = ret.reshape(((inh), (inw), n_filter))[:: self.stride_pool, :: self.stride_pool]

        else:  # for testing
            ret = probabilities * X_rfi
            sumpool[sumpool == 0.0] = 1.0
            ret = np.sum(ret, axis=(3)) / sumpool[:, :, :, 0, 0, 0]
            ret = ret[:: self.stride_pool, :: self.stride_pool]
예제 #27
0
 def predict_admit(self):
     """ Use first stage estimates to predict binary admission outcome """
     threshold = random_sample(self.data.shape[0])
     self.data['admit_hat'] = self.lsn_models['admit_binary'].predict_proba(
         self.data[self.lsn_rhs]
     )[:, 1]
     self.data['admit_hat'] = 1 * (self.data['admit_hat'] > threshold)
예제 #28
0
파일: misc.py 프로젝트: wcarthur/hazimp
def weighted_values(values, probabilities, size, forced_random=None):
    """
    Return values weighted by the probabilities.

    precondition: The sum of probabilities should sum to 1
    Code from: goo.gl/oBo2zz

    :param values:  The values to go into the final array
    :param probabilities:  The probabilities of the values
    :param size: The array size/shape. Must be 1D.
    :return: The array of values, made using the probabilities
    """

    msg = "Due to numpy.digitize the array must be 1D. "
    assert len(size) == 1, msg

    assert isinstance(probabilities, numpy.ndarray)
    assert isinstance(values, numpy.ndarray)

    assert values.shape == probabilities.shape

    if not numpy.allclose(probabilities.sum(), 1.0, atol=0.01):
        msg = 'Weights should sum to 1.0, got ', probabilities
        raise RuntimeError(msg)

    # Re-normalise weights so they sum to 1 exactly
    probabilities = probabilities / abs(probabilities.sum())  # normalize

    if forced_random is None:
        rand_array = random_sample(size)
    else:
        assert forced_random.shape == size
        rand_array = forced_random
    bins = numpy.add.accumulate(probabilities)
    return values[numpy.digitize(rand_array, bins)]
예제 #29
0
    def test_rectangles(self):
		from numpy import fliplr, flipud
		from numpy.random import random_sample, seed
		from integral_image import *
		seed(42)
		I = random_sample((11,11))
		# symmetrize by copying the first quadrant
		I[:,6:] = fliplr(I[:,0:5])
		I[6:,:] = flipud(I[0:5,:])
		intimg = integral_image(I)
		symmimg = integral_image(I[:6,:6])
		
		for (x0,x1,y0,y1) in [(-1,0,-1,0), # first element
							  (1,4,1,4), # first quadrant (simple)
							  (-1,5,-1,5), # first quadrant (edges)
							  (6,9,1,4), # quadrant 2 (simple)
							  (5,10,1,4), # quadrant 2 (edges)
							  (4,5,-1,5), # quadrant 1/2 boundary
							  (4,6,-1,5), # quadrant 1/2 boundary + 1 over
							  (-1,10,-1,5), # quadrants 1,2 (up to edges)
							  (7,9,7,9), # quadrant 3
							  (1,4,7,9), # quadrant 4
							  (3,7,4,8), #
							  (-1,10,-1,10), #
							  (5,6,-1,0)]:
			R0 = intimg_rect(intimg, x0, x1, y0, y1)
			R1 = symm_intimg_rect(symmimg, x0, x1, y0, y1, 5)
			print round(R0,5), round(R1,5), round(R0,5) == round(R1,5)
			self.assertAlmostEqual(R1, R0, 8)
예제 #30
0
    def _batch_query(self, mim_ids, _, sleep_time):
        html_dict = {}

        # OMIM seems to be very strict against web crawlers and bans IPs.
        # That's why we override the _batch_query method and avoid
        # parallelization here.
        # Never ommit the sleeping step between queries, and never let it
        # be less than a couple of seconds, just in case:
        if sleep_time < self.MIN_SLEEP_TIME:
            print('Force sleep time to ', self.MIN_SLEEP_TIME)
            sleep_time = self.MIN_SLEEP_TIME

        sys.stdout.flush()  # Necesary for tqdm stdout correctly
        for i, mim_id in enumerate(tqdm(mim_ids, desc=self.TQDM_PREFIX)):
            if i > 0:
                # To further simulate real human behavior when visiting the page,
                # randomize the sleeping time:
                # random_sleep_time = randomize_sleep_time(sleep_time)
                random_sleep_time = sleep_time + random_sample() * sleep_time
                time.sleep(random_sleep_time)

            html = self._query(mim_id)
            self._cache_set({mim_id: html})
            html_dict[mim_id] = html

        return html_dict
예제 #31
0
    def update_particles_with_odom(self, noise):
        """ Update the particles using the newly given odometry pose.
            The function computes the value delta which is a tuple (x,y,theta)
            that indicates the change in position and angle between the odometry
            when the particles were last updated and the current odometry.

            noise: a number between 0 and 1 which describes how much noise we will
            add to the movements
        """
        new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
        # compute the change in x,y,theta since our last update
        if self.current_odom_xy_theta:
            old_odom_xy_theta = self.current_odom_xy_theta
            delta = (new_odom_xy_theta[0] - self.current_odom_xy_theta[0],
                     new_odom_xy_theta[1] - self.current_odom_xy_theta[1],
                     new_odom_xy_theta[2] - self.current_odom_xy_theta[2])

            self.current_odom_xy_theta = new_odom_xy_theta
        else:
            self.current_odom_xy_theta = new_odom_xy_theta
            return

        d_x, d_y, d_theta = delta

        # move each particle the same distance in their coordinate frame
        new_particles = []
        for p in self.particle_cloud:
            angle_between_frames = old_odom_xy_theta[2] - p.theta
            res = rotate(angle_between_frames, d_x, d_y)
            new_d_x = res[0]
            new_d_y = res[1]
            rands = random_sample(3)
            pos = np.array([p.x + new_d_x, p.y + new_d_y, p.theta + d_theta])
            pos_noisy = pos * (1 + noise * (rands - 0.5))
            new_particle = Particle.from_numpy(np.append(pos_noisy, p.w))
            new_particles.append(new_particle)
        self.particle_cloud = new_particles
예제 #32
0
파일: testsim.py 프로젝트: h-bryant/funcsim
def simulator(stat, samples, precision, rounds):
    """
    Simulates a distribution-free statistical test to estimate its p-values.

    The first argument should be a function that computes the simulated
    statistic for a vector of ordered, uniform(0, 1) samples.

    The second argument, samples, tells how many samples to generate and test
    with. To generate a typical p-values table you would run the function for
    a number of consecutive sample counts.

    The third argument, precision, decides how many critical values to return.
    For example with precision = 100 you will get 99 values; approximately, a
    statistic above value indexed 94 will have a probability lower than .05.

    The fourth argument, rounds, tells us how many times to repeat the data
    generation and statistic calculation. The more rounds the higher the
    quality of the results. Must be a (large) multiple of precision.

    Example::

        from skgof.testsim import simulator
        from skgof.ecdfgof import ks_stat

        # Repeat the simulation one million times for vectors of 10 samples.
        ks10 = simulator(ks_stat, 10, 100, 1e6)

        # Get the approximate 95% critical value (to about 2 decimal digits).
        ks10[94]  # 0.409...
    """
    rounds = int(rounds)
    data = random_sample(size=(rounds, samples))
    data.sort(axis=1)
    stats = fromiter((stat(d) for d in data), float, rounds)
    stats.sort()
    step = int(rounds / precision)
    return stats[step:rounds:step]
예제 #33
0
def draw_random_sample(n, list, prob):
    """ Draws a random sample of n elements from a given list of choices 
    and their specified probabilities. We recommend that you fill in this 
    function using random_sample. """

    # get an array of numbers that correspond to indices in the list with 
    #   a specific prob value
    prob_idxs = []
    for i in range(len(list)):
        if list[i] == prob:
            prob_idxs.append(int(i))

    # randomly sample n elements as indices to choose from prob_idxs
    random_nums = len(prob_idxs) * random_sample((n, ))
    random_nums = random_nums.astype(int)

    # return randomly chosen elements in prob_idxs as a new array, with
    #   each new element being the index in list that corresponds to coordinates
    #   with a specific color, such as light gray (inside the house)
    list_idxs = []
    for i in random_nums:
        list_idxs.append(prob_idxs[i])
    
    return list_idxs
def draw_random_sample(cloud, probs, n):
    """ Return a random sample of n elements from the set choices with the specified probabilities
        choices: the values to sample from represented as a list
        probabilities: the probability of selecting each element in choices represented as a list
        n: the number of samples
    """
    # Create a new empty cloud
    new_cloud = []

    # Create a cloud that stores the integers 1-n
    fake_cloud = np.array(range(n))

    # Create bins from the cumulative sums of our probabilities
    bins = np.cumsum(probs)

    # Create a new array using the np.searchsorted function
    new_array = fake_cloud[np.searchsorted(bins, random_sample(n),
                                           side='left')]

    # For each index in our new array, add that particle from our original particle cloud to our new array
    for part in new_array:
        new_cloud.append(deepcopy(cloud[part]))

    return new_cloud
예제 #35
0
def main():
    parser = get_parser()
    args = parser.parse_args()
    dataset = '/tmp/data'
    if not os.path.exists(dataset):
        os.makedirs(dataset)
    train = pd.DataFrame(random.randint(low=0, high=100, size=(1000, 2)),
                         columns=['x', 'y'])
    train['label'] = train.apply(lambda v: 0 if v['x'] > v['y'] +
                                 (5 - random.random_sample() * 10) else 1,
                                 axis=1)
    test = pd.DataFrame(random.randint(low=0, high=100, size=(100, 2)),
                        columns=['x', 'y'])
    test['label'] = test.apply(lambda v: 0 if v['x'] > v['y'] else 1, axis=1)
    train.to_csv(dataset + '/train.csv')
    test.to_csv(dataset + '/test.csv')
    kl = client.Client()
    kl.datasets.push(os.environ.get('WORKSPACE_NAME'),
                     args.dataset,
                     args.version,
                     dataset,
                     create=True)
    client.update_task_info(
        {'dataset': '%s:%s' % (args.dataset, args.version)})
예제 #36
0
def debugRectangles(rects):
    """
    Sanity test on rectangle sets go here.
    """
    # first, are any out of bounds?
    for r in rects:
        if r.lb[0] < 0. or r.lb[1] < 0.:
            print("ERROR: rectangle has lb", r.lb)
        if r.ub[0] > 1. or r.ub[1] > 1.:
            print("ERROR: rectangle has ub", r.ub)
        if r.lb[0] > r.ub[0] or r.lb[1] > r.ub[1]:
            print('ERROR: lower bound greater than upper')
        assert r.center == [l + (u - l) / 2. for u, l in zip(r.lb, r.ub)]
        assert r.d == sum([(l - c)**2. for l, c in zip(r.lb, r.center)])**0.5

    # okay, now sample
    for _ in range(10000):
        hits = 0
        samp = random_sample(2)
        for r in rects:
            if all(samp > r.lb) and all(samp < r.ub):
                hits += 1
        if hits != 1:
            print("ERROR: location has %d hits: %s" % (hits, samp))
예제 #37
0
    def partida_aplastador(self, camion):

        self.total_material_transportado += camion.capacidad

        numero_aleatorio = random.random_sample()

        if numero_aleatorio < Simulacion.PROB_DESCOMPOSTURA:
            camion.lugar_descompostura = 'partida_aplastador'
            self.descompostura(camion)
        else:
            # Generar arribo a la pala del camion que parte (tiempo de regreso)
            tiempo = camion.tiempo_de_regreso() + self.reloj_simulacion
            self.lista_de_eventos['arribo_pala'].agregar(tiempo, camion)

        if self.cola_aplastador.cola:
            camion_cola = self.cola_aplastador.cola.pop(0)

            # Generar partida del camion del aplastador
            tiempo = camion_cola.tiempo_de_descarga() + self.reloj_simulacion
            self.lista_de_eventos['partida_aplastador'].agregar(
                tiempo, camion_cola)

        else:
            self.estado_aplastador = Simulacion.DESOCUPADO
예제 #38
0
def fourth_animation(scene_name, type, index_name):
    objC = oc.ObjectCreator()
    rot = list(rng.randint(0, 360, 3))
    color = getRandColor()
    size = rng.random_sample(1)[0] * 1.5 + 1
    objC.addCone([0.0, 0.0, 0.0], [1, 1, 1], color, rot)
    xyz = getxyz(10)
    objC.addPointLight(xyz, [1, 1, 1])
    dict = {
        "objects": [{
            "light": {
                "position": xyz
            }
        }, {
            "rect": {
                "color": color,
                "color_class": get_color_class(color),
                "size": size
            }
        }],
        "name":
        scene_name
    }
    sceneFromObjCreator(scene_name, objC, dict, type, index_name)
예제 #39
0
def gradient_test(train_x, train_y):
    estimators = random_integers(25, 100, size=5)
    print(estimators)
    max_features = random_integers(1, 7, size=6)
    min_samples_split = random_integers(2, 11, size=5)
    subsample = random_sample((5, ))
    params = {
        "n_estimators": sp.stats.randint(25, 100),
        "loss": ["deviance", "exponential"],
        "max_features": sp.stats.randint(1, 7),
        "min_samples_split": sp.stats.randint(2, 11),
        "criterion": ["friedman_mse", "mse", "mae"],
        "max_depth": [3, None]
    }

    random_search = RandomizedSearchCV(GradientBoostingClassifier(),
                                       param_distributions=params,
                                       cv=10,
                                       n_iter=5)
    start = time()
    random_search.fit(train_x, train_y)
    print("GridSearchCV took %.2f seconds for %d candidates"
          " parameter settings." % ((time() - start), 20))
    report(random_search.cv_results_)
예제 #40
0
def linpack(N):
    eps = 2.22e-16

    ops = (2.0 * N) * N * N / 3.0 + (2.0 * N) * N

    # Create AxA array of random numbers -0.5 to 0.5
    A = random.random_sample((N, N)) - 0.5
    B = A.sum(axis=1)

    # Convert to matrices
    A = matrix(A)

    B = matrix(B.reshape((N, 1)))
    na = amax(abs(A.A))

    start = time()
    X = linalg.solve(A, B)
    latency = time() - start

    mflops = (ops * 1e-6 / latency)

    result = {'mflops': mflops, 'latency': latency}

    return result
예제 #41
0
    def sphero_populate(self):
        # generate velocity commads
        twist_msg, twist_msg_2 = Twist(), Twist()
        twist_msg_3, twist_msg_4 = Twist(), Twist()

        twist_msg.linear.x = 100 * npr.random_sample()
        twist_msg.linear.y = 50 * npr.random_sample()
        twist_msg.linear.z = 32 * npr.random_sample()
        twist_msg.angular.x = 100 * npr.random_sample()
        twist_msg.angular.y = 200 * npr.random_sample()
        twist_msg.angular.z = 300 * npr.random_sample()

        twist_msg_2.linear.x = 2.2 * twist_msg.linear.x
        twist_msg_2.linear.y = 2.2 * twist_msg.linear.y
        twist_msg_2.linear.z = 2.2 * twist_msg.linear.z
        twist_msg_2.angular.x = 2.2 * twist_msg.angular.x
        twist_msg_2.angular.y = 2.2 * twist_msg.angular.y
        twist_msg_2.angular.z = 2.2 * twist_msg.angular.z

        twist_msg_3.linear.x = 0.5 * twist_msg.linear.x
        twist_msg_3.linear.y = 0.5 * twist_msg.linear.y
        twist_msg_3.linear.z = 0.5 * twist_msg.linear.z
        twist_msg_3.angular.x = 0.5 * twist_msg.angular.x
        twist_msg_3.angular.y = 0.5 * twist_msg.angular.y
        twist_msg_3.angular.z = 0.5 * twist_msg.angular.z

        twist_msg_4.linear.x = 0.25 * twist_msg_2.linear.x
        twist_msg_4.linear.y = 0.25 * twist_msg_2.linear.y
        twist_msg_4.linear.z = 0.25 * twist_msg_2.linear.z
        twist_msg_4.angular.x = 0.25 * twist_msg_2.angular.x
        twist_msg_4.angular.y = 0.25 * twist_msg_2.angular.y
        twist_msg_4.angular.z = 0.25 * twist_msg_2.angular.z

        #send robots around on twist topic
        self.twist_rgo_pub.publish(twist_msg)
        self.twist_ypp_pub.publish(twist_msg_2)
        self.twist_pob_pub.publish(twist_msg_3)
        self.twist_www_pub.publish(twist_msg_4)

        self.twist_rpg_pub.publish(twist_msg)
        self.twist_rwp_pub.publish(twist_msg_2)
        self.twist_wwb_pub.publish(twist_msg_3)
        self.twist_gbr_pub.publish(twist_msg_4)
예제 #42
0
 def random_start(self):
     """ Random point in the interval."""
     a, b = self.interval
     return a + (b - a) * rn.random_sample()
예제 #43
0
 def answer(self, item):
     answer = random.random_sample()
     if item == 1:
         return answer <= (1 - self.error_rate_yes)
     if item == 0:
         return answer <= self.error_rate_no
예제 #44
0
def information_coefficient(x, y, n_grids=25,
                            jitter=1E-10, random_seed=RANDOM_SEED):
    """
    Compute the information coefficient between x and y, which are
        continuous, categorical, or binary vectors. This function uses only python libraries -- No R is needed.
    :param x: numpy array;
    :param y: numpy array;
    :param n_grids: int; number of grids for computing bandwidths
    :param jitter: number;
    :param random_seed: int or array-like;
    :return: float; Information coefficient
    """

    # Can't work with missing any value
    # not_nan_filter = ~isnan(x)
    # not_nan_filter &= ~isnan(y)
    # x = x[not_nan_filter]
    # y = y[not_nan_filter]

    x, y = drop_nan_columns([x, y])

    # Need at least 3 values to compute bandwidth
    if len(x) < 3 or len(y) < 3:
        return 0

    x = asarray(x, dtype=float)
    y = asarray(y, dtype=float)

    # Add jitter
    seed(random_seed)
    x += random_sample(x.size) * jitter
    y += random_sample(y.size) * jitter

    # Compute bandwidths
    cor, p = pearsonr(x, y)

    # bandwidth_x = asarray(bcv(x)[0]) * (1 + (-0.75) * abs(cor))
    # bandwidth_y = asarray(bcv(y)[0]) * (1 + (-0.75) * abs(cor))

    # Compute P(x, y), P(x), P(y)
    # fxy = asarray(
    #     kde2d(x, y, asarray([bandwidth_x, bandwidth_y]), n=asarray([n_grids]))[
    #         2]) + EPS

    # Estimate fxy using scipy.stats.gaussian_kde
    xmin = x.min()
    xmax = x.max()
    ymin = y.min()
    ymax = y.max()
    X, Y = np.mgrid[xmin:xmax:complex(0, n_grids), ymin:ymax:complex(0, n_grids)]
    positions = np.vstack([X.ravel(), Y.ravel()])
    values = np.vstack([x, y])
    kernel = gaussian_kde(values)
    fxy = np.reshape(kernel(positions).T, X.shape) + EPS

    dx = (x.max() - x.min()) / (n_grids - 1)
    dy = (y.max() - y.min()) / (n_grids - 1)
    pxy = fxy / (fxy.sum() * dx * dy)
    px = pxy.sum(axis=1) * dy
    py = pxy.sum(axis=0) * dx

    # Compute mutual information;
    mi = (pxy * log(pxy / (asarray([px] * n_grids).T *
                           asarray([py] * n_grids)))).sum() * dx * dy

    # # Get H(x, y), H(x), and H(y)
    # hxy = - (pxy * log(pxy)).sum() * dx * dy
    # hx = -(px * log(px)).sum() * dx
    # hy = -(py * log(py)).sum() * dy
    # mi = hx + hy - hxy

    # Compute information coefficient
    ic = sign(cor) * sqrt(1 - exp(-2 * mi))

    # TODO: debug when MI < 0 and |MI|  ~ 0 resulting in IC = nan
    if isnan(ic):
        ic = 0

    return ic
예제 #45
0
 def weighted_probs(outcomes, probabilities, size):
     temp = np.add.accumulate(probabilities)
     return outcomes[np.digitize(random_sample(size), temp)]
예제 #46
0
import pygmt
import numpy as np
from scipy.special import sph_harm
from numpy.random import random_sample

#come up with n random points on a sphere
n=10000
x = random_sample(n)-0.5
y = random_sample(n)-0.5
z = random_sample(n)-0.5
lats = np.arccos( z/np.sqrt(x*x+y*y+z*z))
lons = np.arctan2( y, x )+np.pi

#evaluate a spherical harmonic on that sphere
vals = sph_harm(4,9, lons, lats).real
vals = vals/np.amax(vals)

lons = lons*180.0/np.pi
lats = 90.0-lats*180.0/np.pi

#recover the spherical harmonic with contouring
fig = pygmt.GMT_Figure("output.ps", figure_range='g', projection='G-75/41/7i', verbosity=2)
dataset = fig.blockmean('-I5/5 -Rg', [lons,lats,vals])
grid = fig.surface('-I5/5 -Rg', dataset)
c = fig.grd2cpt('-Chot', grid, output=None)
fig.grdimage('-E100i', grid, cpt=c)
fig.grdcontour('-Wthick,black -C0.2', grid)
fig.psxy('-Sp.1c', [lons,lats,vals])

fig.close()
예제 #47
0
#set row-count
size = 1000 * 1000 * 10
## ... or read as argument
if len(sys.argv) > 1:
    size = int(sys.argv[1])

#set size of randomly generated Chunks
CHUNK_SIZE = 10000
nat1 = list()
nat2 = list()
int1 = list()
float1 = list()

for i in range(0, size):
    ## for each sample row, produce random numbers:
    if i % CHUNK_SIZE == 0:
        nat1 = rdm.random_integers(0, 1000, size)
        nat2 = rdm.random_integers(0, 10 * 1000 * 1000, CHUNK_SIZE)
        int1 = rdm.random_integers(-1000, 1000, CHUNK_SIZE)
        float1 = -10 + 40 * rdm.random_sample(CHUNK_SIZE)
    print(i,
          nat1[i % CHUNK_SIZE],
          nat2[i % CHUNK_SIZE],
          float1[i % CHUNK_SIZE],
          int1[i % CHUNK_SIZE],
          sep=";")
#    line=';'.join([str(i), str(nat1[i%CHUNK_SIZE]), str(nat2[i%CHUNK_SIZE]), str(float1[i%CHUNK_SIZE]), str(int1[i%CHUNK_SIZE])])

#   print(line)
예제 #48
0
#Name: Anup Kumar
from numpy import matrix, array, random, min, max 
import pylab as plb 

A = random.random_sample(600)

B = plb.linspace(-plb.pi*3, plb.pi*2, 500, endpoint=True)

def replace_elem(arr):
    #print("Array min/max :", arr.min()," - ", arr.max())
    #for i, k in enumerate(arr, start = 0) :
    for i in range(arr.size):
        if arr[i] < 2 or arr[i] > 9:
                arr[i] = (arr.min() + arr.max()) /2
#                print(arr[i])
                arr[i] = (arr[i] / ( arr.max() / 0.1 ))
#                print(arr[i])
    return arr
                
C = replace_elem(A)
#print(C)

C = array(C)
#print(C)

D = C[0:len(B)] + B

예제 #49
0
 def __init__(self, number_of_weights):
     self.weights = random_sample(number_of_weights)
예제 #50
0
import matchingmarkets as mm
import numpy.random as rng

#
# Simulation Test
#

print("\n\nSimulation Test\n")

sim = mm.simulation(time_per_run=50, runs=10, logAllData=True)

print("Regular Run\n")

sim.run(arrival_rate=rng.randint(50),
        average_success_prob=lambda: rng.random(),
        discount=lambda: rng.random_sample(),
        typeGenerator=lambda x: rng.randint(x),
        numTypes=5)

sim.stats()

print("\n\nSingle Run tests \n")

arrival_r = rng.randint(1, 5)
print("Arrival rate: ", arrival_r)

lossTest = sim.single_run(0,
                          arrival_rate=arrival_r,
                          average_success_prob=lambda: rng.random(),
                          discount=lambda: rng.random_sample(),
                          typeGenerator=lambda x: rng.randint(x),
예제 #51
0
 def move(self):
     '''create a state change to the original state'''
     amplitude = (max(self.interval) -
                  min(self.interval)) * self.fraction / 10
     delta = (-amplitude / 2.) + amplitude * rn.random_sample()
     return delta
예제 #52
0
            targetRow = random.randint(0, matrix_dim)

            targetPos = int(targetRow + math.sqrt(N) * targetCol)

            #            colors = np.ones([N,3])
            newOris = drawOris(N, dmean, block['dtype'], block['dsd'],
                               targetPos, targetOri)
            for i in range(N):
                if i != targetPos:
                    trial['d_ori_%i' % i] = round(newOris[i], 1)
                else:
                    trial['d_ori_%i' % i] = ''
                    trial['correctResponse'] = 'up' if coordinates[i][
                        1] > 0 else 'down'
                coordinates[i] = coordinates[i] + (
                    -0.5 + random.random_sample(),
                    -0.5 + random.random_sample())
                trial['stim_pos_x_%i' % i] = round(coordinates[i][0], 3)
                trial['stim_pos_y_%i' % i] = round(coordinates[i][1], 3)
#            colors[targetPos,:] = (1, 0,0)
            coordinates_by_trial[curTrialN] = coordinates
            oris_by_trial[curTrialN] = newOris - 45
            set_sizes[curTrialN] = N
            #            colors_by_trial[curTrialN] = colors

            trial['targetDist'] = targetDist
            trial['distrMean'] = dmean

            trial['targetOri'] = round(targetOri, 1)

            trial['targetCol'] = targetCol
예제 #53
0
def mutacija(populacija,
             p_mut):  # mutira odredjen procenat bitova u populaciji
    br_jed = len(populacija)
    m = len(populacija[0])
    for i in range(br_jed):
        br_prom = random.binomial(m, p_mut)
        if br_prom != 0:
            indeksi = random.choice(m, br_prom, replace=False)
            for j in indeksi:
                populacija[i] = izvrni_bit(populacija[i], j)
    return populacija


def genetski_algoritam(populacija, p_elit, p_mut):
    populacija, fitness = np.split(populacija, [-1], axis=1)
    populacija = elitizam(populacija, p_elit)
    populacija = selekcija(populacija, fitness)
    populacija = ukrstanje(populacija)
    populacija = mutacija(populacija, p_mut)
    # print(populacija.shape)
    return np.concatenate((populacija, fitness), axis=1)


if __name__ == "__main__":
    pop = [[
        random.random_integers(0, 1, podaci.chromosome_len),
        random.random_sample()
    ] for i in range(podaci.pop_size)]
    t = time.process_time()
    genetski_algoritam(pop, 0.05, 0.03)
    print(time.process_time() - t)
예제 #54
0
from numpy import random

random.seed(0)

totals = {20: 0, 30: 0, 40: 0, 50: 0, 60: 0, 70: 0}
purchases = {20: 0, 30: 0, 40: 0, 50: 0, 60: 0, 70: 0}
totalPurchases = 0
for _ in range(100000):
    ageDecade = random.choice([20, 30, 40, 50, 60, 70])
    purchaseProbability = float(ageDecade) / 100.0
    totals[ageDecade] += 1
    test = random.random_sample()
    if test < purchaseProbability:
        totalPurchases += 1
        purchases[ageDecade] += 1

print("Totals:", totals)
예제 #55
0
    def rvs(self, samples=1):
        ''' generate random samples from the distribution

        Parameters
        ----------
        samples : integer, number of random samples to draw

        Returns
        -------
        samples x 3 array with x, y, z velocities of each sample
        '''

        # take only 100,000 samples at a time for memory reasons
        if samples > 100000:
            vel = []
            remaining = samples
            while remaining > 0:
                vel.append(self.rvs(min(remaining, 100000)))
                remaining -= 100000
            return np.vstack(vel)

        # first get the x-coordinates
        x_uniform = random_sample(samples)
        ix = self._xcdf.searchsorted(x_uniform)
        dx = ((-self._xpdf[ix - 1] +
               np.sqrt(self._xpdf[ix - 1]**2 + 2 *
                       (self._xpdf[ix] - self._xpdf[ix - 1]) *
                       (x_uniform - self._xcdf[ix - 1]))) /
              (self._xpdf[ix] - self._xpdf[ix - 1]))
        # fix divisions by zero (e.g. if clipping of distribution)
        zerofix = np.isclose(self._xpdf[ix],
                             self._xpdf[ix - 1],
                             rtol=1e-12,
                             atol=1e-12)
        dx[zerofix] = random_sample(zerofix.sum())
        x = self._x[ix - 1] + dx * (self._x[ix] - self._x[ix - 1])

        # interpolate the y-direction cdf and get y-coordinate
        interp_ycdf = (self._ycdf[ix - 1, :] * (1.0 - dx[:, newaxis]) +
                       dx[:, newaxis] * (self._ycdf[ix, :]))
        interp_ypdf = (self._ypdf[ix - 1, :] * (1.0 - dx[:, newaxis]) +
                       dx[:, newaxis] * (self._ypdf[ix, :]))
        y_uniform = np.random.random_sample(samples)
        jy = np.empty(y_uniform.shape, dtype=int)
        for j, sample in enumerate(y_uniform):
            jy[j] = interp_ycdf[j, :].searchsorted(sample)
        iy = np.arange(0, samples)
        dy = ((-interp_ypdf[iy, jy - 1] +
               np.sqrt(interp_ypdf[iy, jy - 1]**2 + 2 *
                       (interp_ypdf[iy, jy] - interp_ypdf[iy, jy - 1]) *
                       (y_uniform - interp_ycdf[iy, jy - 1]))) /
              (interp_ypdf[iy, jy] - interp_ypdf[iy, jy - 1]))
        # fix divisions by zero (e.g. if clipping of distribution)
        zerofix = np.isclose(interp_ypdf[iy, jy],
                             interp_ypdf[iy, jy - 1],
                             rtol=1e-12,
                             atol=1e-12)
        dy[zerofix] = np.random.random_sample(zerofix.sum())
        y = self._y[jy - 1] + dy * (self._y[jy] - self._y[jy - 1])

        # finally, interpolate in the z-direction
        interp_zcdf = (
            (1.0 - dx[:, newaxis] - dy[:, newaxis] +
             dx[:, newaxis] * dy[:, newaxis]) * self._zcdf[ix - 1, jy - 1, :] +
            dx[:, newaxis] *
            (1.0 - dy[:, newaxis]) * self._zcdf[ix, jy - 1, :] +
            dy[:, newaxis] *
            (1.0 - dx[:, newaxis]) * self._zcdf[ix - 1, jy, :] +
            dx[:, newaxis] * dy[:, newaxis] * self._zcdf[ix, jy, :])
        interp_zpdf = (
            (1.0 - dx[:, newaxis] - dy[:, newaxis] +
             dx[:, newaxis] * dy[:, newaxis]) * self._zpdf[ix - 1, jy - 1, :] +
            dx[:, newaxis] *
            (1.0 - dy[:, newaxis]) * self._zpdf[ix, jy - 1, :] +
            dy[:, newaxis] *
            (1.0 - dx[:, newaxis]) * self._zpdf[ix - 1, jy, :] +
            dx[:, newaxis] * dy[:, newaxis] * self._zpdf[ix, jy, :])
        z_uniform = random_sample(samples)
        kz = np.empty(z_uniform.shape, dtype=int)
        for k, sample in enumerate(z_uniform):
            kz[k] = interp_zcdf[k, :].searchsorted(sample)
        iz = np.arange(0, samples)
        dz = ((-interp_zpdf[iz, kz - 1] +
               np.sqrt(interp_zpdf[iz, kz - 1]**2 + 2 *
                       (interp_zpdf[iz, kz] - interp_zpdf[iz, kz - 1]) *
                       (z_uniform - interp_zcdf[iz, kz - 1]))) /
              (interp_zpdf[iz, kz] - interp_zpdf[iz, kz - 1]))
        # fix divisions by zero (e.g. if clipping of distribution)
        zerofix = np.isclose(interp_zpdf[iz, kz],
                             interp_zpdf[iz, kz - 1],
                             rtol=1e-12,
                             atol=1e-12)
        dz[zerofix] = random_sample(zerofix.sum())
        z = self._z[kz - 1] + dz * (self._z[kz] - self._z[kz - 1])

        return np.column_stack((x, y, z))
예제 #56
0
파일: gui.py 프로젝트: hfurhoff/exjobb
	def processSearch(self):
		startTime = str(datetime.datetime.now())
		self.readInputFields()
		randCourse = False
		try:
			c = self.entries[self._COURSEINDEX].get()
			f = float(c)
			i = int(c)
		except:
			randCourse = True
			
		try:
			tx = self.entries[self._TARGETXINDEX].get()
			ty = self.entries[self._TARGETYINDEX].get()
			f = float(tx)
			i = int(tx)
			f = float(ty)
			i = int(ty)
		except:
			self.values[self._TARGETXINDEX] = 'r'
			self.values[self._TARGETYINDEX] = 'r'
		ackSimLength = dict()
		ackProcTime = dict()
		gridsizes = dict()
		targets = []
		originalTargetX = self.values[self._TARGETXINDEX]
		originalTargetY = self.values[self._TARGETYINDEX]
		for strat in self.strategies:
			ackSimLength[strat] = 0
			ackProcTime[strat] = 0
		runs = int(self.values[self._RUNSINDEX])
		executedRuns = 0
		latestDTO = None
		try:
			gc.disable()
			for i in range(runs):
				print(repr(i) + '.0')
				if randCourse:
					self.values[self._COURSEINDEX] = int(random.random_sample() * 360)
				self.values[self._TARGETXINDEX] = originalTargetX
				self.values[self._TARGETYINDEX] = originalTargetY
				strat = self.strategies[0]
				premises = self.getPremises(strat)
				self.contr.simulate(premises)
				sa = self.contr.getSearcharea()
				latestDTO = self.contr.getAckumulatedSearch()
				self.processedSearches[latestDTO.toString()] = latestDTO
				self.searchSelector.insert(0, latestDTO.toString())
				ackSimLength[strat] = ackSimLength[strat] + latestDTO.len()
				rt = latestDTO.getRTS() + (0.000001 * latestDTO.getRTMS())
				print('running time: ' + repr(rt) + '\n')
				ackProcTime[strat] = ackProcTime[strat] + rt
				target = sa.getTarget()
				x, y = target.getX(), target.getY()
				orTar = Point(x, y)
				targets.append(orTar)
				if i == 0:
					gridsizes[strat] = latestDTO.getGridsize()
				if len(self.strategies) > 1:
					self.values[self._TARGETXINDEX] = x
					self.values[self._TARGETYINDEX] = -y
					for j in range(1, len(self.strategies)):
						print(repr(i) + '.' + repr(j))
						strat = self.strategies[j]
						premises = self.getPremises(strat)
						self.contr.simulate(premises)
						latestDTO = self.contr.getAckumulatedSearch()
						rt = latestDTO.getRTS() + (0.000001 * latestDTO.getRTMS())
						print('running time: ' + repr(rt) + '\n')
						self.processedSearches[latestDTO.toString()] = latestDTO
						self.searchSelector.insert(0, latestDTO.toString())
						ackSimLength[strat] = ackSimLength[strat] + latestDTO.len()
						ackProcTime[strat] = ackProcTime[strat] + rt
						if i == 0:
							gridsizes[strat] = latestDTO.getGridsize()
				executedRuns += 1
				if executedRuns % 3 == 0 and not executedRuns > runs - 4:
					self.searchSelector.delete(0, END)
					self.processedSearches.clear()
					gc.collect()
					print(gc.garbage)
		except:
			print "Exception in user code:"
			print '-'*60
			traceback.print_exc(file=sys.stdout)
			print '-'*60
		finally:
			if not gc.isenabled():
				gc.collect()
				gc.enable()
		self.values[self._TARGETXINDEX] = originalTargetX
		self.values[self._TARGETYINDEX]	= originalTargetY
		totDist = 0
		origo = Point(0, 0)
		for p in targets:
			totDist = totDist + p.distTo(origo)
		
		mttds = dict()
		averageProcTime = dict()
		h = str(latestDTO.getHeight())
		w = str(latestDTO.getWidth())
		filename = "../../mttds/" + h + 'x' + w
		for strat in self.strategies:
			mttds[strat] = ackSimLength[strat] / float(executedRuns)
			averageProcTime[strat] = ackProcTime[strat] / float(executedRuns)
			tmp = strat[:-3]
			filename = filename + '_' + tmp[:2] + tmp[-2:]
		file = open(filename + ".txt", "a")
		file.write('*************************************************************************\n')
		file.write('STARTED: \t' + startTime + '\n')
		endTime = str(datetime.datetime.now())
		file.write('FINISHED:\t' + endTime + '\n')
		file.write('PARAMETERS: ')
		if randCourse:
			self.values[self._COURSEINDEX] = 'r'
		for i in range(len(self.values)):
			if i % 2 == 0:
				file.write('\n' + self.textFields[i] + ' : ' + str(self.values[i]))
			else:
				file.write('\t' + self.textFields[i] + ' : ' + str(self.values[i]))
		file.write('\n\nUNITS : SECONDS AND METERS')
		file.write('\nAVERAGE DISTANCE TO TARGET FROM ORIGO: ' + str(totDist / executedRuns))
		file.write('\nRUNS PROCESSED: ' + str(executedRuns))
		file.write('\nHEIGHTxWIDTH: ' + h + 'x' + w + '\n')
		file.write('-------------------------------------------------------------------------\n')
		file.write('Strategy\t|MTTD\t\t|MEAN PROC TIME\t\t|GRIDSIZE\t|\n')
		file.write('-------------------------------------------------------------------------\n')
		for strat in self.strategies:
			stratname = strat[:2] + strat[-5:-3] 
			apt = str(averageProcTime[strat])
			mttd = str(round(mttds[strat], 2))
			if len(mttd) < 7:
				mttd = mttd + '\t'
			if len(apt) < 7:
				apt = apt + '\t'
			gs = str(gridsizes[strat])
			if len(gs) < 7:
				gs = gs + '\t'
			file.write(stratname + '\t\t|' + mttd + '\t|' + apt + '\t\t|' + gs + '\t|\n')
		file.write('*************************************************************************\n')
		print('processing done')
예제 #57
0
def get_rand_data(col):
    rng = col.max() - col.min()
    return pd.Series(random_sample(len(col)) * rng + col.min())
예제 #58
0
        print("La simulation n'est pas supportée avec ces paramètres :")
        print(params_values)
        df = pd.DataFrame([['default'], [0], ['failed']],
                          columns=['Parameter', 'score', 'status'])

else:
    print("La simulation n'est pas supportée avec ces paramètres :")
    print(params_values)

# loop on all choosen parameters for the hill climbing
rng = np.random.default_rng(12345)
for i in range(len(params)):
    for j in range(n_iterations):
        # take a step
        present_param = (params[i][2] -
                         params[i][1]) * random_sample() + params[i][1]
        print("***Lancement du hill climbing sur le paramètre " +
              str(params[i][0]) + "avec pour valeurs : " + str(present_param))
        present_parram_name = params[i][0]
        present_path = present_parram_name + str(present_param)
        present_params_values = params_values
        present_params_values[present_parram_name] = present_param

        # entrainement
        present_v, present_spikes, present_weight_input, present_weight_inter = sim.lancement_sim(
            cellSourceSpikes,
            max_time,
            path,
            TIME_STEP=params_values["time_step"],
            input_n=params_values["input_n"],
            nb_neuron_int=params_values["nb_neuron_int"],
예제 #59
0
파일: uncGA.py 프로젝트: jomorlier/KADAL
def uncGA(fitnessfcn,
          lb,
          ub,
          opt='min',
          disp=False,
          npop=300,
          maxg=200,
          args=None,
          initialization=None):
    # only required for multi-objective fcn
    if isinstance(ub, int) or isinstance(ub, float):
        nvar = 1
        ub = np.array([ub])
        lb = np.array([lb])
    else:
        nvar = len(ub)
    pmut = 0.1  #mutation probability
    pcross = 0.95  #crossover probability
    history = np.zeros(shape=[maxg, 2])  #ask Kemas for explanation

    #Initialize population
    # samplenorm = haltonsampling.halton(nvar, npop)
    if initialization is None:
        samplenorm = sobol_points(npop, nvar)
    else:
        if initialization.ndim == 1:
            samplenorm = np.random.normal(
                initialization,
                np.std(args[1][0].KrigInfo['X_norm'], 0) * 0.2, (npop, nvar))
        else:
            n_init = np.size(initialization, 0)
            nbatch = int(npop / n_init)
            samplenorm = np.zeros((npop, nvar))
            for ij in range(n_init - 1):
                samplenorm[ij * nbatch:(ij + 1) *
                           nbatch, :] = np.random.normal(
                               initialization[ij, :],
                               np.std(args[1][0].KrigInfo['X_norm'], 0) * 0.2,
                               (nbatch, nvar))
            samplenorm[(ij + 1) * nbatch:, :] = np.random.normal(
                initialization[(ij + 1), :],
                np.std(args[1][0].KrigInfo['X_norm'], 0) * 0.2,
                (np.size(samplenorm[(ij + 1) * nbatch:, :], 0), nvar))
            samplenorm[samplenorm < 0] = 0
            samplenorm[samplenorm > 1] = 1

    population = np.zeros(shape=[npop, nvar + 1])
    for i in range(0, npop):
        for j in range(0, nvar):
            population[i, j] = (samplenorm[i, j] * (ub[j] - lb[j])) + lb[j]
            # for i in range (0,npop):
            #     for j in range (0,nvar):
            #         population[i,j] = lb[j] + (ub[j]-lb[j])*random_sample()
        if args == None:
            temp = fitnessfcn(population[i, 0:nvar])
        else:
            temp = fitnessfcn(population[i, 0:nvar], *args)
        population[i, nvar] = deepcopy(temp)

    #Evolution loop
    generation = 1
    oldFitness = 0
    while generation <= maxg:
        # for generation 1:1
        tempopulation = deepcopy(population)

        #Tournament Selection
        matingpool = np.zeros(shape=[npop, nvar])
        for kk in range(0, npop):
            ip1 = int(np.ceil(npop * random_sample()))  #random number 1
            ip2 = int(np.ceil(npop * random_sample()))  #random number 2
            while ip1 >= npop or ip2 >= npop:
                ip1 = int(np.ceil(npop * random_sample()))
                ip2 = int(np.ceil(npop * random_sample()))
            if ip2 == ip1:  #In case random number 1 = random number 2
                while ip2 == ip1 or ip2 >= npop:
                    ip2 = int(np.ceil(npop * random_sample()))

            lst = np.arange(0, nvar)
            Ft1 = population[ip1, lst]
            Ft2 = population[ip2, lst]
            Fit1 = population[ip1, nvar]
            Fit2 = population[ip2, nvar]

            #Switch case, in Python we use if and elif instead of switch-case
            if opt == "max":
                if Fit1 > Fit2:
                    matingpool[kk, :] = Ft1
                else:
                    matingpool[kk, :] = Ft2
            elif opt == "min":
                if Fit1 < Fit2:
                    matingpool[kk, :] = Ft1
                else:
                    matingpool[kk, :] = Ft2
            else:
                pass

        #Crossover with tournament seelection
        child = np.zeros(shape=[2, nvar])
        lst = np.arange(0, nvar)
        for jj in range(0, npop, 2):
            idx1 = int(np.ceil(npop * random_sample()))
            idx2 = int(np.ceil(npop * random_sample()))
            while idx1 >= npop or idx2 >= npop or idx1 == idx2:
                idx1 = int(np.ceil(npop * random_sample()))
                idx2 = int(np.ceil(npop * random_sample()))
            if (random_sample() < pcross):
                child = SBX.SBX(matingpool[idx1, :], matingpool[idx2, :], nvar,
                                lb, ub)
                tempopulation[jj, 0:nvar] = child[0, :]
                tempopulation[jj + 1, 0:nvar] = child[1, :]
            else:
                tempopulation[jj, 0:nvar] = matingpool[idx1, :]
                tempopulation[jj + 1, 0:nvar] = matingpool[idx2, :]
            if args == None:
                tempopulation[jj, nvar] = fitnessfcn(tempopulation[jj, lst])
                tempopulation[jj + 1, nvar] = fitnessfcn(tempopulation[jj + 1,
                                                                       lst])
            else:
                tempopulation[jj, nvar] = fitnessfcn(tempopulation[jj, lst],
                                                     *args)
                tempopulation[jj + 1,
                              nvar] = fitnessfcn(tempopulation[jj + 1, lst],
                                                 *args)

        #Combined Population for Elitism
        compopulation = np.vstack((population, tempopulation))

        #Sort Population based on their fitness value
        if opt == 'max':
            i = np.argsort(compopulation[:, nvar])[::-1]
            compopulation = compopulation[i, :]
        elif opt == 'min':
            i = np.argsort(compopulation[:, nvar])
            compopulation = compopulation[i, :]

        #Record Optimum Solution
        bestFitness = compopulation[0, nvar]
        bestx = compopulation[0, 0:nvar]

        #Mutation
        for kk in range(1, (2 * npop)):
            compopulation[kk, 0:nvar] = mutation.gaussmut(
                compopulation[kk, 0:nvar], nvar, pmut, ub, lb)
            if args == None:
                compopulation[kk, nvar] = fitnessfcn(compopulation[kk, 0:nvar])
            else:
                compopulation[kk, nvar] = fitnessfcn(compopulation[kk, 0:nvar],
                                                     *args)

        history[generation - 1, 0] = generation
        history[generation - 1, 1] = bestFitness

        fiterr = 100 * (abs(bestFitness - oldFitness)) / bestFitness
        if disp:
            print("Done, generation ", generation, " | Best X = ", bestx,
                  " | Fitness Error (%)= ", fiterr)
        generation = generation + 1
        if fiterr <= 10**(-2) and generation >= 50:
            break

        oldFitness = bestFitness
        #Next Population
        for i in range(0, npop):
            population[i, :] = compopulation[i, :]

    #Show Best Fitness and Design Variables
    # print("Best Fitness = ",bestFitness)
    # for i in range (0,nvar):
    #     print("X",i+1," = ",bestx[i])

    return (bestx, bestFitness, history)
예제 #60
0
파일: Query.py 프로젝트: mcorreaiz/CORETest
 def _has_kleene(self):
     kleene = random.random_sample() < KLEENE_PROB
     return "+" if kleene else ""