Esempio n. 1
0
    def makePoint():
        """Return a random point and its satellite information.

        Satellite is 'blue' if point is in the circle, else 'red'."""
        point = random.random((2,)) * 10
        vectorLength = lambda x: dot(x.T, x)
        return point, 'blue' if vectorLength(point - center) < 25 else 'red'
def gen_IC(sigma,rn,outfile="workfile",icdir="ICs", M=5, N=50, lapfile="Laplacian.txt", tries=10, iclist=[]):
   lap = loadtxt(lapfile)
   spa = sparse.csr_matrix(lap)
   success=0
   attempts=0
   while success==0 and attempts<tries:
     try:
	tag='s%.2fr%.3d'%(sigma,rn)
	tag=tag.replace(".", "")

	parameters = [35.0, 16.0, 9.0, 0.4, 0.12, sigma]
	x0=10*(random.random(2*N)-0.5)

	tic=time.time()
	trajectory = integrate.odeint(mimura, x0, range(0,1000), args=(parameters,spa))
	print "integration took", time.time()-tic, "seconds"

	x1=trajectory[-1]

	sol=fsolve(mimura3, x1, args=(parameters,spa),full_output=True)
	x2=sol[0]
	if x2 not in iclist:
	    savetxt(icdir+'/init_cond_'+tag+'.txt',x2)
	    write_mimu(lap,par=parameters,ic=x2,outfile=outfile)
    	    iclist.append(x2)
	    success=1
	tries+=1
     except: pass
   return iclist
Esempio n. 3
0
    def __init__(self, dim, nNeurons, name=None, outputFullMap=False):
        if outputFullMap:
            outdim = nNeurons ** 2
        else:
            outdim = 2
        Module.__init__(self, dim, outdim, name)

        # switch modes
        self.outputFullMap = outputFullMap

        # create neurons
        self.neurons = random.random((nNeurons, nNeurons, dim))
        self.difference = zeros(self.neurons.shape)
        self.winner = zeros(2)
        self.nInput = dim
        self.nNeurons = nNeurons
        self.neighbours = nNeurons
        self.learningrate = 0.01
        self.neighbourdecay = 0.9999

        # distance matrix
        distx, disty = mgrid[0:self.nNeurons, 0:self.nNeurons]
        self.distmatrix = zeros((self.nNeurons, self.nNeurons, 2))
        self.distmatrix[:, :, 0] = distx
        self.distmatrix[:, :, 1] = disty
Esempio n. 4
0
    def __call__(self, shape):
        from scipy import random

        if self.index >= len(self.cache):
            self.cache += [random.random(shape)]
        x = self.cache[self.index]
        self.index += 1
        return x
 def __init__(self, input_length, hidden_length, out_lenght):
     self.input_length = input_length
     self.out_lenght = out_lenght
     self.hidden_length = hidden_length
     self.centers = []
     for i in xrange(hidden_length):
         self.centers.append(random.uniform(-1, 1, input_length))
     self.variance = 1
     self.W = random.random((self.hidden_length, self.out_lenght))
Esempio n. 6
0
 def drawSample(self):
     sum = 0.0
     rndFakt = random.random()
     for g in range(self.numOGaus):
         sum += self.sigmo(self.alpha[g])
         if rndFakt < sum:
             if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig
             x = random.normal(self.mue[g], self.sigma[g])
             break
     return x
 def drawSample(self):
     sum = 0.0
     rndFakt = random.random()
     for g in range(self.numOGaus):
         sum += self.sigmo(self.alpha[g])
         if rndFakt < sum:
             if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig
             x = random.normal(self.mue[g], self.sigma[g])
             break
     return x
Esempio n. 8
0
    def __roulette(self, vec):
        #vec is a probability distribution
        assert (np.isclose(vec.sum(), 1.0))
        r = random.random()
        i = 0
        t = 0
        while (t < r):
            t += vec[i]
            i += 1

        return i - 1
Esempio n. 9
0
def ic_binary(G, fr):
    data = {}
    for n in G.nodes():
        rval = random.random()
        if rval <= fr:
            G.node[n]['state'] = 1.0
            data[n] = 1.0
        else:
            data[n] = 0.0
            G.node[n]['state'] = 0.0
    return data
Esempio n. 10
0
def ic_binary(G,fr):
	data={}
	for n in G.nodes():
		rval=random.random()
		if rval <= fr:
			G.node[n]['state']=1.0
			data[n]=1.0
		else: 
			data[n] = 0.0
			G.node[n]['state']=0.0
	return data
Esempio n. 11
0
 def _forwardImplementation(self, inbuf, outbuf):
     """ Draws a random number between 0 and 1. If the number is less
         than epsilon, the action is selected according to the boltzmann exploration strategy. If it is equal or
         larger than epsilon, the greedy action is returned.
     """
     assert self.module
     if random.random() < self.epsilon:
         values = self.module.getActionValues(self._state)
         action = drawGibbs(values, self.tau)
         outbuf[:] = array([action])
     else:
         outbuf[:] = inbuf
Esempio n. 12
0
 def getAction(self):
     """ activates the module with the last observation and stores the result as last action. """
     # get greedy action
     action = LearningAgent.getAction(self)
     
     # explore by chance
     if random.random() < self.epsilon:
         action = array([random.randint(self.module.numActions)])
     
     # reduce epsilon
     self.epsilon *= self.epsilondecay
     
     return action
Esempio n. 13
0
def filter_sbs(n=10000, split=5000):
    data = random.random(n)
    result = []
    b = signal.firwin(150, 0.004)
    z = signal.lfilter_zi(b, 1)
    for i in range(n // split):
        test_data = data[split * i:split * (i + 1)]
        test_result = zeros(test_data.size)
        for i, x in enumerate(test_data):
            test_result[i], z = signal.lfilter(b, 1, [x], zi=z)
        result.append(test_result)

    return data, result
Esempio n. 14
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        if random.random() < self.epsilon:
            outbuf[:] = array([random.randint(self.module.numActions)])
        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 15
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        if random.random() < self.epsilon:
            outbuf[:] = array([random.randint(self.module.numActions)])
        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 16
0
def pos_fans_fr_al_CI(inputmlist, preoutputmatrix, netDim):
    pos2change = []
    choosed = []

    outputmatrixDim = preoutputmatrix.shape[0]
    fans = inputmlist[0]
    fri = inputmlist[1]
    al = inputmlist[2]
    fafr = al * 1.0 * fans / fri
    fafr += 0.0000001 if fafr.all() == 0 else fafr

    fansum = float(sp.sum(fafr))
    fansratio = [i / fansum for i in fafr]

    dislist = distance_network(preoutputmatrix, hubnodeindex=0)
    chance2selected = []
    for afansr, adis in zip(*(fansratio, dislist)):
        chance2selected.append(afansr * 10**-adis[0])
    problist = chance2selected / (sp.sum(chance2selected))
    nonzerorange = findindexofvalues(
        preoutputmatrix.any(axis=1).getA1(),
        True)  #choosePatMatrix(preoutputmatrix,mode='IN')
    #     nonzerorange = nonzerorange.choose()
    outputmatrixDim = len(nonzerorange)
    #     print outputmatrixDim,len(problist)

    lena = len(problist)
    choosed.append(sp.random.choice(nonzerorange, 1, p=problist))

    "add mention"
    mention = inputmlist[3]
    mentionsum = float(sp.sum(mention))
    mentionsum = mentionsum if mentionsum > 0 else 1
    #     print mentionsum,mention
    mentionratio = [i / mentionsum for i in mention]

    mentioncnt = int(mention[-1])
    #     print mentioncnt
    for i in range(1, mentioncnt + 1):
        signin = (outputmatrixDim - 1) / float(netDim)
        if random.random() >= signin:
            #             preoutputmatrix = addrowcol_matrix(preoutputmatrix,1,1)
            pos2change.append((outputmatrixDim - 1, outputmatrixDim + i - 1))
        else:
            problist = mentionratio
            #             print outputmatrixDim,len(problist),problist
            choosed.append(sp.random.choice(outputmatrixDim, 1, p=problist))

    for cha in choosed:
        pos2change.append((nonzerorange[-1] + 1, cha[0]))
    return pos2change
Esempio n. 17
0
def chenSIR(graph, iIn, y, runs):
	"""
	In: graph,indices of an initial set of infected nodes,recovery parameter (usually ~3),total number of simulation runs
		-returns the average number of infected nodes under the SIR Model specified in the
		-chen 12 paper "Identifying influential nodes in complex networks"
		-the recovery parameter is the average number of steps until a node
	"""
	sumI = 0.0
	n = []
	for x in range(runs):
		i = []
		for restock in iIn:
			i.append(restock)
		r = []
		while len(i) > 0:
			infect = -1
			newI = []
			newR = []
			for j in i:
				if random.random() < (1.0/y):
					newR.append(j)
				else:
					n = graph.neighborss(j)
					for e in r:
						if e in n:
							n.remove(e)
					infect = int(len(n)*random.random())
					if len(n) > 0:
						if n[infect] not in i:
							newI.append(n[infect])
			for l in newR:
				i.remove(l)
				r.append(l)
			for k in newI:
				if k not in i:
					i.append(k)
		sumI = sumI + (len(r)*1.0)
	return (sumI/(runs*1.0))
Esempio n. 18
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

#        print "Getting moves"
        if random.random() < self.epsilon:
            outbuf[:] = array(pyrand.sample(self.env.valid_moves, 1))
        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 19
0
 def drawSample(self, dm):
     sum = 0.0
     rndFakt = random.random()
     if dm == "max":
         for g in range(self.numOGaus):
             sum += self.sigmo(self.alpha[g])
             if rndFakt < sum:
                 if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig
                 x = random.normal(self.mue[g], self.sigma[g])
                 break
         return x
     if dm == "dist":
         return rndFakt * self.distRange + self.rangeMin
     return 0.0
Esempio n. 20
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        #        print "Getting moves"
        if random.random() < self.epsilon:
            outbuf[:] = array(pyrand.sample(self.env.valid_moves, 1))
        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 21
0
def initialize(input):
    # to initialize the MLE parameters
    a = random.uniform(0, 150)
    b = random.uniform(0, 150)
    c = random.uniform(0, 150)
    miu = [
        input[:, a],
        input[:, b],
        input[:, c],
    ]  # set the initial mu to be the same as randomly chosen input from the original inputs
    cov = [
        np.matrix(np.eye(4)),
        np.matrix(np.eye(4)),
        np.matrix(np.eye(4)),
    ]  # set the initial covariances to be identities
    a = random.random()
    b = random.random()
    c = random.random()
    a1 = a / a + b + c
    b1 = b / a + b + c
    c1 = c / a + b + c
    pai = [a1, b1, c1]  # set the initial pi to be three normalized random figure, and sums up to 1
    return [pai, miu, cov]
Esempio n. 22
0
 def drawSample(self, dm):
     sum = 0.0
     rndFakt = random.random()
     if dm == "max":
         for g in range(self.numOGaus):
             sum += self.sigmo(self.alpha[g])
             if rndFakt < sum:
                 if self.sigma[g] < self.minSig: self.sigma[g] = self.minSig
                 x = random.normal(self.mue[g], self.sigma[g])
                 break
         return x
     if dm == "dist":
         return rndFakt * self.distRange + self.rangeMin
     return 0.0
Esempio n. 23
0
def pos_fansmention(inputmlist, preoutputmatrix, outputmatrixDim, netDim):
    pos2change = []
    choosed = []

    outputmatrixDim = preoutputmatrix.shape[0]
    fans = inputmlist[0]
    fansum = float(sp.sum(fans))
    fansratio = [i / fansum for i in fans]

    #     adjmatrix = mx('0,0,0;1,0,0;1,0,0')#'0,0,0;1,0,0;1,0,0'#
    #     adjmatrix = inputmatrix#random.randint(low=0,high=2,size=(30,30))
    #     print adjmatrix
    dislist = distance_network(preoutputmatrix, hubnodeindex=0)
    chance2selected = []
    for afansr, adis in zip(*(fansratio, dislist)):
        chance2selected.append(afansr * 10**-adis[0])
    problist = chance2selected / (sp.sum(chance2selected))
    nonzerorange = findindexofvalues(
        preoutputmatrix.any(axis=1).getA1(),
        True)  #choosePatMatrix(preoutputmatrix,mode='IN')
    #     nonzerorange = nonzerorange.choose()
    outputmatrixDim = len(nonzerorange)
    #     print outputmatrixDim,len(problist)

    lena = len(problist)
    choosed.append(sp.random.choice(nonzerorange, 1, p=problist))

    "add mention"
    mention = inputmlist[2]
    mentionsum = float(sp.sum(mention))
    mentionsum = mentionsum if mentionsum > 0 else 1
    #     print mentionsum,mention
    mentionratio = [i / mentionsum for i in mention]

    mentioncnt = mention[-1]
    #     print mentioncnt
    for i in range(1, mentioncnt + 1):
        signin = (outputmatrixDim - 1) / float(netDim)
        if random.random() >= signin:
            #             preoutputmatrix = addrowcol_matrix(preoutputmatrix,1,1)
            pos2change.append((outputmatrixDim - 1, outputmatrixDim + i - 1))
        else:
            problist = mentionratio
            #             print outputmatrixDim,len(problist),problist
            choosed.append(sp.random.choice(outputmatrixDim, 1, p=problist))

    for cha in choosed:
        pos2change.append((nonzerorange[-1] + 1, cha[0]))
    return pos2change
Esempio n. 24
0
 def initPopulation(self):
     if self.xBound is not None:
         self.initBoundaries()
     if self.initialPopulation is not None:
         self.currentpop = self.initialPopulation
     else:
         self.currentpop = [self._initEvaluable]
         for _ in range(self.populationSize-1):
             if self.xBound is None:
                 self.currentpop.append(self._initEvaluable+randn(self.numParameters)
                                    *self.mutationStdDev*self.initRangeScaling)
             else:
                 position = rd.random(self.numParameters)
                 position *= (self.maxs-self.mins)
                 position += self.mins
                 self.currentpop.append(position)
Esempio n. 25
0
def kohonen():
    som = KohonenMap(2, 10)

    pylab.ion()
    p = pylab.plot(som.neurons[:, :, 0].flatten(),
                   som.neurons[:, :, 1].flatten(), 's')

    for i in range(25000):
        # one forward and one backward (training) pass
        som.activate(random.random(2))
        som.backward()

        # plot every 100th step
        if i % 100 == 0:
            p[0].set_data(som.neurons[:, :, 0].flatten(),
                          som.neurons[:, :, 1].flatten())
            pylab.draw()
Esempio n. 26
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        if random.random() < self.epsilon:
            #only the actions that have a Q value > -infinity are valid
            actionValues = self.module.getActionValues(self.state)
            #print(actionValues)
            actions = [a for a in xrange(len(actionValues)) if actionValues[a] > float("-inf")]
            outbuf[:] = random.choice(actions)
        else:
            outbuf[:] = self.module.getMaxAction(self.state)

        self.epsilon *= self.decay
Esempio n. 27
0
def make(**kw):
    step, noise, decay = [float(kw[k]) for k in ['--step','--noise','--decay']]
    X, Y = [int(kw[k]) for k in ['--width', '--height']]
    H = X/2

    gray = ones((Y,X),dtype=float)
    for x in range(H):
        dI = step * exp(-decay*float(H-x))
        gray[:,0+x+0] += dI
        gray[:,X-x-1] -= dI

    scatter = ((random.random((Y,X))*2)-1) * noise
    image = (gray+scatter)*127

    saved = fromarray(image)
    name = 'egray/egray.s.%f.n.%f.d.%f.gif' % (step, noise, decay)
    saved.save(name)
    toimage(saved)
    def __init__(self, indim, numCenters, outdim):
        """Radial Basis Funciton
        
        :param indim: input dimension
        :type indim: int
        :param numCenters: hidden dimension
        :type numCenters: int
        :param outdim: output dimension
        :type outdim: int
        """

        self.indim = indim
        self.outdim = outdim
        self.numCenters = numCenters
        self.centers = [random.uniform(-1, 1, indim)
                        for i in range(numCenters)]
        self.beta = 8
        self.W = random.random((self.numCenters, self.outdim))
Esempio n. 29
0
    def run(self):
        raw_file = open('res_sirs.txt', 'w+')
        #same as while I > 0
        while len(self.iAgentList) > 0:
            tempIAgentList = []
            newI = 0

            for iAgent in self.iAgentList:
                for agent in self.adjacencyList[iAgent]:
                    if agent in self.sAgentList:
                        if random.random() < self.b:
                            newI += self.infectAgent(agent)
                            tempIAgentList.append(agent)

            # then get the list of who is recovering
            recoverList = self.recoverAgents()
            susceptibleList = self.susceptibleAgent()
            # for recoveries
            for recoverAgent in recoverList:
                self.iAgentList.remove(recoverAgent)
                self.rAgentList.append(recoverAgent)
                self.susAgent(recoverAgent)

            for susceptibleAgent in susceptibleList:
                self.rAgentList.remove(susceptibleAgent)
                self.sAgentList.append(susceptibleAgent)

            self.iAgentList.extend(tempIAgentList)

            self.sList.append(len(self.sAgentList))
            self.iList.append(len(self.iAgentList))
            self.rList.append(len(self.sAgentList))
            self.newIList.append(newI)


            self.t += 1

            print('t', self.t, 'numS', len(self.sAgentList), 'numI', len(self.iAgentList), 'numR', len(self.rAgentList))
            raw_file.write(str(self.t) + ' ' + str(len(self.sAgentList)) + ' ' + str(len(self.iAgentList)) +
                           ' ' + str(len(self.rAgentList)) + '\n')

            random.shuffle(self.iAgentList)

        return [self.sList, self.iList, self.rList, self.newIList]
Esempio n. 30
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        if random.random() < self.epsilon:
            #only the actions that have a Q value > -infinity are valid
            actionValues = self.module.getActionValues(self.state)
            #print(actionValues)
            actions = [
                a for a in xrange(len(actionValues))
                if actionValues[a] > float("-inf")
            ]
            outbuf[:] = random.choice(actions)
        else:
            outbuf[:] = self.module.getMaxAction(self.state)

        self.epsilon *= self.decay
Esempio n. 31
0
def make(**kw):
    step  = kw.get( 'step', 3e-2)
    scale = kw.get('scale', 3e-1)
    shape = (Y, X) = [kw.get(edge, 512) for edge in ['Y', 'X']]
    X0, X1, X2, X3 = 0, X/2-1, X/2, X-1

    gray = ones(shape, dtype=float)
    gray[:,X0:X1] -= step
    gray[:,X2:X3] += step

    noise = ((random.random((Y,X))*2)-1) * scale

    image = (gray+noise)*127
    print gray.max(), noise.max(), image.max()
    print gray.min(), noise.min(), image.min()

    saved = fromarray(image)
    name = 'gray/gray.step.%f.scale.%f.gif' % (step, scale)
    print name
    saved.save(name)
Esempio n. 32
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        # Choose action from allowed actions.
        if random.random() < self.epsilon:

            # Only select allowed actions
            allowed_actions = np.where(
                np.array(self.env.visited_states) == 0)[0]
            act = array([random.choice(allowed_actions)])
            outbuf[:] = act

        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 33
0
def gen_IC(sigma,
           rn,
           outfile="workfile",
           icdir="ICs",
           M=5,
           N=50,
           lapfile="Laplacian.txt",
           tries=10,
           iclist=[]):
    lap = loadtxt(lapfile)
    spa = sparse.csr_matrix(lap)
    success = 0
    attempts = 0
    while success == 0 and attempts < tries:
        try:
            tag = 's%.2fr%.3d' % (sigma, rn)
            tag = tag.replace(".", "")

            parameters = [35.0, 16.0, 9.0, 0.4, 0.12, sigma]
            x0 = 10 * (random.random(2 * N) - 0.5)

            tic = time.time()
            trajectory = integrate.odeint(mimura,
                                          x0,
                                          range(0, 1000),
                                          args=(parameters, spa))
            print "integration took", time.time() - tic, "seconds"

            x1 = trajectory[-1]

            sol = fsolve(mimura3, x1, args=(parameters, spa), full_output=True)
            x2 = sol[0]
            if x2 not in iclist:
                savetxt(icdir + '/init_cond_' + tag + '.txt', x2)
                write_mimu(lap, par=parameters, ic=x2, outfile=outfile)
                iclist.append(x2)
                success = 1
            tries += 1
        except:
            pass
    return iclist
Esempio n. 34
0
    def _forwardImplementation(self, inbuf, outbuf):
        """ Draws a random number between 0 and 1. If the number is less
            than epsilon, a random action is chosen. If it is equal or
            larger than epsilon, the greedy action is returned.
        """
        assert self.module

        #print np.nonzero(self.env.actions_index[self.env.current_state])[0]

        # Choose action from allowed actions.
        if random.random() < self.epsilon:
            act = array(
                [random.choice(self.env.mods[self.env.current_state]) - 1])
            outbuf[:] = act
            #print "current_state", self.env.current_state, "exploration action", act
            #print "allowed actions", self.env.mods[self.env.current_state]
            #outbuf[:] = array([random.randint(self.module.numActions)])
        else:
            outbuf[:] = inbuf

        self.epsilon *= self.decay
Esempio n. 35
0
    def __init__(self, shape=(10, 20), spacing=(1., 1.), origin=(0., 0.),
                 alpha=1.):
        """Create a new heat model.

        Paramters
        ---------
        shape : array_like, optional
            The shape of the solution grid as (*rows*, *columns*).
        spacing : array_like, optional
            Spacing of grid rows and columns.
        origin : array_like, optional
            Coordinates of lower left corner of grid.
        alpha : float
            Alpha parameter in the heat equation.
        """
        self._shape = shape
        self._spacing = spacing
        self._origin = origin
        self._time = 0.
        self._alpha = alpha
        self._time_step = min(spacing) ** 2 / (4. * self._alpha)

        self._temperature = random.random(self._shape)
        self._next_temperature = np.empty_like(self._temperature)
Esempio n. 36
0
 def _initializeGrids(self):
     offset = self.omega * self.radius
     radius_offset = ones(self.dim) * self.radius
     self.gridBalls = random.random((self._findAmountOfGrids(), self.dim))
     self.gridBalls *= offset
     self.gridBalls += radius_offset
Esempio n. 37
0
 def _forwardImplementation(self, inbuf, outbuf):
     outbuf[:] = inbuf <= random.random(inbuf.shape)
Esempio n. 38
0
            agent.learn()
            agent.reset()
        print "Parameters:", agent.learner.original, "Sigmas:", agent.learner.sigList
        print "Episode:", runs, "/", (
            updates + 1
        ) * prnts * batch, "Best:", agent.learner.best, "Base:", agent.learner.baseline, "Reward:", agent.learner.reward
        print ""
        rl.append(float(agent.learner.baseline))
        pr.append(float(agent.learner.original[0]))

    if save:
        fnStart = "dataSimple"
        fnExp = repr(int(agent.learner.gd.alpha * 100)) + "m" + repr(
            int(agent.learner.gdSig.alpha * 100)) + "s" + repr(
                batch / 2) + "b" + repr(int(agent.learner.epsilon * 10)) + "e"
        fnIdent = "SPLA" + repr(int(random.random() * 1000000.0))
        filename = fnStart + fnExp + fnIdent + ".dat"
        file = open(filename, "w")
        rlLen = len(rl)
        for i in range(rlLen):
            file.write(repr((i + 1) * batch * prnts) + "\n")
            file.write(repr(rl[i]) + "\n")
        file.close()

        fnStart = "dataSimplePara"
        fnExp = repr(int(agent.learner.gd.alpha * 100)) + "m" + repr(
            int(agent.learner.gdSig.alpha * 100)) + "s" + repr(
                batch / 2) + "b" + repr(int(agent.learner.epsilon * 10)) + "e"
        fnIdent = "SPLA" + repr(int(random.random() * 1000000.0))
        filename = fnStart + fnExp + fnIdent + ".dat"
        file = open(filename, "w")
Esempio n. 39
0
 def _forwardImplementation(self, inbuf, outbuf):
     outbuf[:] = inbuf <= random.random(inbuf.shape)
Esempio n. 40
0
    pr=[]
    for updates in range(epis):
        for i in range(prnts):
            experiment.doEpisodes(batch)
            agent.learn()
            agent.reset()
        print "Parameters:", agent.learner.original, "Sigmas:", agent.learner.sigList 
        print "Episode:", runs, "/", (updates+1)*prnts*batch, "Best:", agent.learner.best, "Base:", agent.learner.baseline, "Reward:", agent.learner.reward
        print ""    
        rl.append(float(agent.learner.baseline))
        pr.append(float(agent.learner.original[0]))

    if save:
        fnStart="dataSimple"
        fnExp=repr(int(agent.learner.gd.alpha*100))+"m"+repr(int(agent.learner.gdSig.alpha*100))+"s"+repr(batch/2)+"b"+repr(int(agent.learner.epsilon*10))+"e"
        fnIdent="SPLA"+repr(int(random.random()*1000000.0))
        filename=fnStart+fnExp+fnIdent+".dat"
        file = open(filename,"w")
        rlLen=len(rl)
        for i in range(rlLen):
            file.write(repr((i+1)*batch*prnts)+"\n")
            file.write(repr(rl[i])+"\n")
        file.close()       
     
        fnStart="dataSimplePara"
        fnExp=repr(int(agent.learner.gd.alpha*100))+"m"+repr(int(agent.learner.gdSig.alpha*100))+"s"+repr(batch/2)+"b"+repr(int(agent.learner.epsilon*10))+"e"
        fnIdent="SPLA"+repr(int(random.random()*1000000.0))
        filename=fnStart+fnExp+fnIdent+".dat"
        file = open(filename,"w")
        rlLen=len(pr)
        for i in range(rlLen):
Esempio n. 41
0
import initExample


from scipy import random
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
    from metaarray import *
except:
    print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
    exit()
    
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()

ma = MetaArray(random.random((3, 1000)), info=[{'name': 'Signal', 'cols': [{'name': 'Col1'}, {'name': 'Col2'}, {'name': 'Col3'}]}, {'name': 'Time', 'vals': linspace(0., 1., 1000)}])
pw.plot(ma)

## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
    import sys
    if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
        QtGui.QApplication.instance().exec_()

             'ShowerFalls','SnakeRiver','SnakeRiverStation','SylvanLake',
             'SylvanRoad','ThumbDivide','Togwotee','TowerFalls',
             'TwoOceanPlateau','WhiskeyCreek','WhiteMill','Wolverine',
             'YNPMammoth','YountsPeak']
             
elevationindex = [7333,9380,7851,8176,9865,6732,7887,9196,7460,8160,9068,9134,5280,9268,6814,7346,
                  8796,6319,6650,7835,7881,6896,8786,6798,7434,7320,9449,8084,8087,6896,6900,8491,7172,
                  8018,9610,6266,9281,6814,8700,7644,6300,8366]
pc1 = score[0]
pc2 = score[1]
pc3 = score[2]
fig = pyplot.figure()

ax = fig.add_subplot(121,projection='3d')
for i in range(len(stationindex)-2):
    C1 = random.random()
    C2 = random.random()
    C3 = random.random()
    #pyplot.scatter(pc1[stationindex[i]:stationindex[i+1]],pc2[stationindex[i]:stationindex[i+1]], s = (elevationindex[i]/1000)**2, c = [[C1,C2,C3],[C1,C2,C3]])    
    p = ax.scatter(pc1[stationindex[i]:stationindex[i+1]],pc2[stationindex[i]:stationindex[i+1]],yeararray[stationindex[i]:stationindex[i+1]], 
                   s=(elevationindex[i]/3000)**4, c=str((elevationindex[i]/10000.)**2), marker = "o")
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
ax.set_zlabel("year")

ax2 = fig.add_subplot(233)
for i in range(len(stationindex)-2):
    p2 = ax2.scatter(pc1[stationindex[i]:stationindex[i+1]],pc2[stationindex[i]:stationindex[i+1]], 
                   s=(elevationindex[i]/3000)**4, c=str((elevationindex[i]/10000.)**2), marker = "o")
ax2.set_xlabel("PC1")
ax2.set_ylabel("PC2")
Esempio n. 43
0
 # create controller network
 net = buildNetwork(task.outdim, task.indim, outclass=TanhLayer)
 # create agent with controller and learner
 agent = FiniteDifferenceAgent(net, SPLA())
 # learning options
 agent.learner.gd.alpha = 0.3 #step size of \mu adaption
 agent.learner.gdSig.alpha = 0.15 #step size of \sigma adaption
 agent.learner.gd.momentum = 0.0
 batch=2 #number of samples per gradient estimate (was: 2; more here due to stochastic setting)
 #create experiment
 experiment = EpisodicExperiment(task, agent)
 prnts=1 #frequency of console output
 epis=2000/batch/prnts
 
 #actual roll outs
 filename="dataSPLA08NoRew"+repr(int(random.random()*1000000.0))+".dat"
 wf = open(filename, 'wb')
 for updates in range(epis):
     for i in range(prnts):
         experiment.doEpisodes(batch) #execute #batch episodes
         agent.learn() #learn from the gather experience
         agent.reset() #reset agent and environment
     #print out related data
     stp = (updates+1)*batch*prnts
     print "Step: ", runs, "/", stp, "Best: ", agent.learner.best, "Base: ", agent.learner.baseline, "Reward: ", agent.learner.reward   
     wf.write(repr(stp)+"\n") 
     wf.write(repr(agent.learner.baseline[0])+"\n") 
     if useGraphics:
         pl.addData(0,float(stp),agent.learner.baseline)
         pl.addData(1,float(stp),agent.learner.best)
         pl.update()
Esempio n. 44
0
##################################################
# Example for Kohonen Map
#
# Clusters random 2D coordinates in range [0,1]
# with a Kohonen Map of 5x5 neurons.
#
# Note: you need pylab to show the results
##################################################

__author__ = "Thomas Rueckstiess, [email protected]"

import pylab
from scipy import random
from pybrain.structure.modules import KohonenMap

som = KohonenMap(2, 5)

pylab.ion()
p = pylab.plot(som.neurons[:, :, 0].flatten(), som.neurons[:, :, 1].flatten(), "s")

for i in range(25000):
    # one forward and one backward (training) pass
    som.activate(random.random(2))
    som.backward()

    # plot every 100th step
    if i % 100 == 0:
        p[0].set_data(som.neurons[:, :, 0].flatten(), som.neurons[:, :, 1].flatten())
        pylab.draw()
Esempio n. 45
0
    n = len(y)  # length of the signal
    k = arange(n)
    T = n / Fs
    frq = k / T  # two sides frequency range
    frq = frq[range(int(n / 2))]  # one side frequency range

    Y = fft(y) / n  # fft computing and normalization
    Y = Y[range(int(n / 2))]

    plot(frq, abs(Y), 'r')  # plotting the spectrum
    xlabel('Freq (Hz)')
    ylabel('|Y(freq)|')


Fs = 150.0
# sampling rate
Ts = 1.0 / Fs
# sampling interval
t = arange(0, 1, Ts)  # time vector

ff = 5
# frequency of the signal
y = random.random(150)

subplot(2, 1, 1)
plot(t, y)
xlabel('Time')
ylabel('Amplitude')
subplot(2, 1, 2)
plotSpectrum(y, Fs)
show()
Esempio n. 46
0
from PyQt4 import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph.MultiPlotWidget import MultiPlotWidget
try:
    from metaarray import *
except:
    print "MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)"
    exit()

app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()

ma = MetaArray(random.random((3, 1000)),
               info=[{
                   'name':
                   'Signal',
                   'cols': [{
                       'name': 'Col1'
                   }, {
                       'name': 'Col2'
                   }, {
                       'name': 'Col3'
                   }]
               }, {
                   'name': 'Time',
                   'vals': linspace(0., 1., 1000)
               }])
pw.plot(ma)
Esempio n. 47
0
 def _initializeGrids(self):
     offset = self.omega * self.radius
     radius_offset = ones(self.dim) * self.radius
     self.gridBalls = random.random((self._findAmountOfGrids(), self.dim))
     self.gridBalls *= offset
     self.gridBalls += radius_offset
Esempio n. 48
0
 def pertGlasPos(self, num):
     self.env.pert = asarray(
         [random.random() * 2.0 - 1.0, 0.0,
          random.random() * 0.5 + 0.5])
Esempio n. 49
0
from methods import *
from scipy import random

# allow the user to specify the matrix size
matrixSize = int(input("Please enter the size of the testing matrix: "))
n = int(
    input(
        "Please enter the multiplier of the random function(since random only generates value from [0, 1)): "
    ))
A = symmetricMatrix(matrixSize, n)
x = [n * random.random() for i in range(matrixSize)]
b = multiplyMatrix(A, x)
print('the x we generated is:')
for line in x:
    print(line)
print('A = ')
for line in A:
    print(line)
print('b =')
for line in b:
    print(line)
choleskiOutput = choleski(A, b)
solution = backwardElim(choleskiOutput[0], choleskiOutput[1])
print('solution = ')
for line in solution:
    print(line)
Esempio n. 50
0
dtype = np.float64

if 1:
    from scipy import random
    import _generated_tripleproduct
    from _generatedtripleproductexpansions import *
    import genericcomponent
    import convolvespecial
    import timer

    t = timer.timer()

    N1 = 10
    N2 = 1024
    a = np.ones((2 * N1 + 1, N2), dtype)
    b = random.random((2 * N1 + 1, N2)).astype(dtype)
    c = np.zeros((N1, N2), dtype)

    print ":MP:"
    t.start("mp")
    for ii in range(100):
        y1 = direct_triple_product_abc10(a, a, b)
    t.stop("mp")

    print ":Python:"
    t.start("python")
    t.stop("python")

    print ":convolve:"
    t.start("convolve")
    for ii in range(100):
Esempio n. 51
0
from sklearn.cluster import KMeans
# from sklearn import datasets
from scipy import random

r = random.random()

# iris = datasets.load_iris()
X = [[1.,  0.75,  1.125],
     [1.,  1.75,  1.125],
     [-1., -1.25, -0.875],
     [-1., -1.25, -1.375]]
# X = [[1, 1, 1], [1, 2, 1], [-1, -1, -1], [-1, -1, -1.5]]
print(X)
kmeans = KMeans(n_clusters=2, random_state=10,
                max_iter=1).fit(X)
# print(kmeans.cluster_centers_)
Esempio n. 52
0
File: pca.py Progetto: boegel/yaff
def pca_convergence(f,
                    eq_time=0 * picosecond,
                    n_parts=None,
                    step=1,
                    fn='PCA_convergence',
                    n_bootstrap=50,
                    mw=True):
    """
    Calculates the convergence of the simulation by calculating the pca
    similarity for different subsets of the simulation.

    **Arguments:**

    f
        An h5.File instance containing the trajectory data.

    **Optional arguments:**

    eq_time
        Equilibration time, discarded from the simulation.

    n_parts
        Array containing the number of parts in which
        the total simulation is divided.

    step
        Stepsize used in the trajectory.

    fn
        Filename containing the convergence plot.

    n_bootstrap
        The number of bootstrapped trajectories.

    mw
        If mass_weighted is True, the covariance matrix is mass-weighted.
    """

    # Configure n_parts, the array containing the number of parts in which the total simulation is divided
    if n_parts is None:
        n_parts = np.array([1, 3, 10, 30, 100, 300])

    # Read in the timestep and the number of atoms
    time = f['trajectory/time']
    timestep = time[1] - time[0]
    time_length = len(time)

    # Determine the equilibration size
    eq_size = int(eq_time / timestep)

    ### ---PART A: SIMILARITY OF THE TRUE TRAJECTORY--- ###

    # Calculate the covariance matrix of the whole production run as golden standard
    covar_total, q_ref = calc_cov_mat(f, start=eq_size, step=step, mw=mw)

    # Initialize the average similarity vector of the divided trajectories
    sim_block = np.zeros(len(n_parts))
    # Calculate this average similarity vector
    for j in xrange(len(n_parts)):
        # Determine in how many parts the trajectory should be divided and the corresponding block size
        n_part = n_parts[j]
        block_size = (time_length - eq_size) / n_part
        # Calculate the n_part covariance matrices and compare with the total covariance matrix
        tot_sim_block = 0
        for i in xrange(n_part):
            start = eq_size + i * block_size
            covars, tmp = calc_cov_mat(f,
                                       start=start,
                                       end=start + block_size + 1,
                                       step=step,
                                       mw=mw)
            tot_sim_block += pca_similarity(covars, covar_total)
        # Determine the average similarity
        sim_block[j] = tot_sim_block / n_part

    ### ---PART B: SIMILARITY OF BOOTSTRAPPED TRAJECTORIES --- ###

    # Read in the positions, which will be used to generate bootstrapped trajectories
    pos = f['trajectory/pos'][eq_size:, :, :]
    pos = pos.reshape(pos.shape[0], -1)

    if mw:
        # Read in the masses of the atoms, and replicate them d times (d=dimension)
        masses = f['system/masses']
        masses = np.repeat(masses, 3)

        # Create the mass-weighted positions matrix, on which the bootstrapping will be based
        pos *= np.sqrt(masses)

    # Initialize the vector containing the average similarity over all the bootstrapped, divided trajectories
    sim_bt_all = np.zeros(len(n_parts))

    for k in xrange(n_bootstrap):
        with log.section('PCA'):
            log('Processing %s of %s bootstrapped trajectories' %
                (k + 1, n_bootstrap))
            # Create a bootstrapped trajectory bt
            pos_bt = np.zeros(pos.shape)
            random_time = random.random(time_length) * time_length
            for h in np.arange(time_length):
                pos_bt[h, :] = pos[random_time[h], :]

            # Covariance matrix of the total bootstrapped trajectory
            covar_bt_total, tmp = calc_cov_mat_internal(pos_bt)

            # Initialize the vector containing the average similarity over the different blocks,
            # for the given bootstrapped trajectory
            sim_bt = np.zeros(len(n_parts))

            for j in xrange(len(n_parts)):
                # Calculate the number of blocks, as well as the block size
                n_part = n_parts[j]
                block_size = (len(time) - eq_size) / n_part
                tot_sim_bt = 0
                # Calculate the total similarity of this number of blocks, for this bootstrapped trajectory
                for i in xrange(n_part):
                    start = eq_size + i * block_size
                    pos_bt_block = pos_bt[start:start + block_size:step]
                    covars_bt, tmp = calc_cov_mat_internal(pos_bt_block)
                    tot_sim_bt += pca_similarity(covars_bt, covar_bt_total)
                # Calculate the average similarity for this number of blocks, for this bootstrapped trajectory
                sim_bt[j] = tot_sim_bt / n_part
            sim_bt_all += sim_bt
    # Calculate the average similarity over all bootstrapped trajectories
    sim_bt_all /= n_bootstrap

    ### ---PART C: PROCESSING THE RESULTS --- ###

    pt.clf()
    pt.semilogx((time[-1] - time[0]) / n_parts / picosecond,
                sim_block / sim_bt_all, 'r-')
    pt.semilogx((time[-1] - time[0]) / n_parts / picosecond,
                sim_block / sim_bt_all, 'rs')
    pt.xlabel('Block size [ps]')
    pt.ylabel('PCA similarity (1=perfectly similar)')
    pt.title('Convergence assessment via PCA: ' + fn)
    pt.ylim([0, 1])
    pt.savefig(fn + '.png')
    pt.savefig(fn + '.pdf', format='pdf')
    return sim_block / sim_bt_all
Esempio n. 53
0
def randomRotation(dim):
    """Return a random rotation matrix of rank dim."""
    return orth(random.random((dim, dim)))
Esempio n. 54
0
def randomRotation(dim):
    """Return a random rotation matrix of rank dim."""
    return orth(random.random((dim, dim)))
Esempio n. 55
0
File: pca.py Progetto: molmod/yaff
def pca_convergence(f, eq_time=0*picosecond, n_parts=None, step=1, fn='PCA_convergence', n_bootstrap=50, mw=True):
    """
    Calculates the convergence of the simulation by calculating the pca
    similarity for different subsets of the simulation.

    **Arguments:**

    f
        An h5.File instance containing the trajectory data.

    **Optional arguments:**

    eq_time
        Equilibration time, discarded from the simulation.

    n_parts
        Array containing the number of parts in which
        the total simulation is divided.

    step
        Stepsize used in the trajectory.

    fn
        Filename containing the convergence plot.

    n_bootstrap
        The number of bootstrapped trajectories.

    mw
        If mass_weighted is True, the covariance matrix is mass-weighted.
    """

    # Configure n_parts, the array containing the number of parts in which the total simulation is divided
    if n_parts is None:
        n_parts = np.array([1,3,10,30,100,300])

    # Read in the timestep and the number of atoms
    time = f['trajectory/time']
    timestep = time[1] - time[0]
    time_length = len(time)

    # Determine the equilibration size
    eq_size = int(eq_time/timestep)

    ### ---PART A: SIMILARITY OF THE TRUE TRAJECTORY--- ###

    # Calculate the covariance matrix of the whole production run as golden standard
    covar_total, q_ref = calc_cov_mat(f, start=eq_size, step=step, mw=mw)

    # Initialize the average similarity vector of the divided trajectories
    sim_block = np.zeros(len(n_parts))
    # Calculate this average similarity vector
    for j in range(len(n_parts)):
        # Determine in how many parts the trajectory should be divided and the corresponding block size
        n_part = n_parts[j]
        block_size = (time_length-eq_size)//n_part
        # Calculate the n_part covariance matrices and compare with the total covariance matrix
        tot_sim_block=0
        for i in range(n_part):
            start = eq_size + i*block_size
            covars, tmp = calc_cov_mat(f, start=start, end=start+block_size+1, step=step, mw=mw)
            tot_sim_block += pca_similarity(covars, covar_total)
        # Determine the average similarity
        sim_block[j] = tot_sim_block/n_part


    ### ---PART B: SIMILARITY OF BOOTSTRAPPED TRAJECTORIES --- ###

    # Read in the positions, which will be used to generate bootstrapped trajectories
    pos = f['trajectory/pos'][eq_size:,:,:]
    pos = pos.reshape(pos.shape[0], -1)

    if mw:
        # Read in the masses of the atoms, and replicate them d times (d=dimension)
        masses = f['system/masses']
        masses = np.repeat(masses,3)

        # Create the mass-weighted positions matrix, on which the bootstrapping will be based
        pos *= np.sqrt(masses)

    # Initialize the vector containing the average similarity over all the bootstrapped, divided trajectories
    sim_bt_all = np.zeros(len(n_parts))

    for k in range(n_bootstrap):
        with log.section('PCA'):
            log('Processing %s of %s bootstrapped trajectories' %(k+1,n_bootstrap))
            # Create a bootstrapped trajectory bt
            pos_bt = np.zeros(pos.shape)
            random_time = random.random(time_length)*time_length
            for h in np.arange(time_length):
                pos_bt[h,:] = pos[random_time[h],:]

            # Covariance matrix of the total bootstrapped trajectory
            covar_bt_total, tmp = calc_cov_mat_internal(pos_bt)

            # Initialize the vector containing the average similarity over the different blocks,
            # for the given bootstrapped trajectory
            sim_bt = np.zeros(len(n_parts))

            for j in range(len(n_parts)):
                # Calculate the number of blocks, as well as the block size
                n_part = n_parts[j]
                block_size = (len(time)-eq_size)//n_part
                tot_sim_bt = 0
                # Calculate the total similarity of this number of blocks, for this bootstrapped trajectory
                for i in range(n_part):
                    start = eq_size + i*block_size
                    pos_bt_block = pos_bt[start:start+block_size:step]
                    covars_bt, tmp = calc_cov_mat_internal(pos_bt_block)
                    tot_sim_bt += pca_similarity(covars_bt, covar_bt_total)
                # Calculate the average similarity for this number of blocks, for this bootstrapped trajectory
                sim_bt[j] = tot_sim_bt/n_part
            sim_bt_all += sim_bt
    # Calculate the average similarity over all bootstrapped trajectories
    sim_bt_all /= n_bootstrap

    ### ---PART C: PROCESSING THE RESULTS --- ###

    pt.clf()
    pt.semilogx((time[-1]-time[0])/n_parts/picosecond, sim_block/sim_bt_all, 'r-')
    pt.semilogx((time[-1]-time[0])/n_parts/picosecond, sim_block/sim_bt_all, 'rs')
    pt.xlabel('Block size [ps]')
    pt.ylabel('PCA similarity (1=perfectly similar)')
    pt.title('Convergence assessment via PCA: ' + fn)
    pt.ylim([0,1])
    pt.savefig(fn+'.png')
    pt.savefig(fn+'.pdf', format='pdf')
    return sim_block/sim_bt_all
                print("State: {0}".format(state_t))
                print("Angle to Goal: {0}".format(environmental_data.raw_angle_to_goal))

                # --------- GET ACTION AND PERFORM IT ---------
                # Compute the deterministic (??? deterministic if its an approximation ???) action
                state_t_sub_features = basis_functions.computeFeatures(state_t.values())
                state_t_full_features = np.r_[state_t_sub_features,
                                     np.array([0.0 for i in range(len(state_t_sub_features))])]
                action_t_deterministic = policy.computeOutput(state_t_sub_features)
                # action_t_deterministic = policy.computeOutput(np.array(state_t.values()))
                # action_t_deterministic = policy.computeOutput(state_t_full_features)
                # Add noise/exploration to the action in the form of a Gaussian/Normal distribution
                # if state_t["yaw_velocity"] > 0.6 or state_t["yaw_velocity"] < 0.4:
                if step_number % (3*CONFIG["spin_rate"]) == 0:
                    if random.random() < CONFIG["epsilon"]:
                        exploration = np.random.normal(0.0, CONFIG["exploration_sigma"])
                        CONFIG["exploration_sigma"] = CONFIG["exploration_sigma"] - CONFIG["exploration_sigma"]
                    else:
                        exploration = 0.0
                # else:
                #     exploration = np.random.normal(0.0, 0.1)
                action_t = action_t_deterministic + exploration
                print("Deterministic Action: {0}".format(action_t_deterministic))
                print("Stochastic Action: {0}".format(action_t))
                if prev_action is None:
                    prev_action = deepcopy(action_t)

                # Log the deterministic action chosen for each state according to the policy LA
                # use the deterministic action just so that it is cleaner to look at for debugging
                logging_list = state_t.values()
Esempio n. 57
0
 def pertGlasPos(self, num):
     self.env.pert = asarray([random.random()*2.0 - 1.0, 0.0, random.random()*0.5 + 0.5])
Esempio n. 58
0
def gigrnd(p, a, b):
    # setup -- sample from the two-parameter version gig(lam,omega)
    p = float(p)
    a = float(a)
    b = float(b)
    lam = p
    omega = math.sqrt(a * b)

    if lam < 0:
        lam = -lam
        swap = True
    else:
        swap = False

    alpha = math.sqrt(math.pow(omega, 2) + math.pow(lam, 2)) - lam

    # find t
    x = -psi(1, alpha, lam)
    if (x >= 1 / 2) and (x <= 2):
        t = 1
    elif x > 2:
        t = math.sqrt(2 / (alpha + lam))
    elif x < 1 / 2:
        t = math.log(4 / (alpha + 2 * lam))

    # find s
    x = -psi(-1, alpha, lam)
    if (x >= 1 / 2) and (x <= 2):
        s = 1
    elif x > 2:
        s = math.sqrt(4 / (alpha * math.cosh(1) + lam))
    elif x < 1 / 2:
        if alpha == 0:
            s = 1 / lam
        else:
            if lam == 0:
                s = math.log(1 + 1 / alpha +
                             math.sqrt(1 / math.pow(alpha, 2) + 2 / alpha))
            else:
                s = min(
                    1 / lam,
                    math.log(1 + 1 / alpha +
                             math.sqrt(1 / math.pow(alpha, 2) + 2 / alpha)))

    # find auxiliary parameters
    eta = -psi(t, alpha, lam)
    zeta = -dpsi(t, alpha, lam)
    theta = -psi(-s, alpha, lam)
    xi = dpsi(-s, alpha, lam)

    p = 1 / xi
    r = 1 / zeta

    td = t - r * eta
    sd = s - p * theta
    q = td + sd

    # random variate generation
    while True:
        U = random.random()
        V = random.random()
        W = random.random()
        if U < q / (p + q + r):
            rnd = -sd + q * V
        elif U < (q + r) / (p + q + r):
            rnd = td - r * math.log(V)
        else:
            rnd = -sd + p * math.log(V)

        f1 = math.exp(-eta - zeta * (rnd - t))
        f2 = math.exp(-theta + xi * (rnd + s))
        if W * g(rnd, sd, td, f1, f2) <= math.exp(psi(rnd, alpha, lam)):
            break

    # transform back to the three-parameter version gig(p,a,b)
    rnd = math.exp(rnd) * (
        lam / omega + math.sqrt(1 + math.pow(lam, 2) / math.pow(omega, 2)))
    if swap:
        rnd = 1 / rnd

    rnd = rnd / math.sqrt(a / b)
    return rnd