示例#1
0
def transitionAndWeight(states, y, parameters, t):
    Nx = states.shape[0]
    Ntheta = states.shape[2]
    weights = zeros((Nx, Ntheta))
    newstates = zeros_like(states)
    poissonparameters1 = parameters[6, :] * parameters[4, :] * (parameters[2, :]**2) / parameters[3, :]
    poissonparameters2 = (1 - parameters[6, :]) * (parameters[4, :] + \
            parameters[5, :]) * (parameters[2, :]**2) / parameters[3, :]
    poissonparameters1 = repeat(poissonparameters1[:,newaxis], Nx, axis = 1)
    poissonparameters2 = repeat(poissonparameters2[:,newaxis], Nx, axis = 1)
    for indextheta in range(Ntheta):
        allK1 = array(random.poisson(lam = array(poissonparameters1[indextheta,:]))).reshape(Nx)
        allK1[allK1 > 10**4] = 10**4
        allK1 = array(allK1).reshape(Nx)
        sumK1 = numpysum(allK1)
        allK2 = array(random.poisson(lam = poissonparameters2[indextheta,:])).reshape(Nx)
        allK2[allK2 > 10**4] = 10**4
        allK2 = array(allK2).reshape(Nx)
        sumK2 = numpysum(allK2)
        alluniforms1 = random.uniform(size = 2 * sumK1)
        alluniforms2 = random.uniform(size = 2 * sumK2)
        subresults = subtransitionAndWeight(states[..., indextheta], y, parameters[:, indextheta], \
                         alluniforms1, allK1, alluniforms2, allK2)
        newstates[..., indextheta] = subresults["states"]
        weights[..., indextheta] = subresults["weights"]
    return {"states": newstates , "weights": weights}
示例#2
0
    def __init__(self, unique_id, model):
        super().__init__(unique_id, model)
        
        # Scalar: amount of effort a scientist is born with (M)
        self.start_effort = poisson(lam=10)

        # Scalar: rate of decay for starting_effort (lambda_e)
        self.start_effort_decay = 1

        # Scalar: amount of effort a scientist has left
        self.avail_effort = self.start_effort.copy() 
        
        # Investment cost for each idea for the scientist
        self.k = poisson(lam=2, size=model.total_ideas)
        
        # Parameters determining perceived returns for ideas
        self.sds = poisson(4, model.total_ideas)
        self.means = poisson(50, model.total_ideas)
        
        # Create the ideas/returns matrix
        self.returns_matrix = \
            create_return_matrix(model.total_ideas, max(model.max_investment), sds, means)
        
        # Records when the scientist was born
        self.birth_time = model.schedule.time
        
        # Array keeping track of how much effort this scientist has invested in each idea
        self.effort_invested = np.zeros(model.total_ideas)
  def update_with_poisson(self, l, t):
    self.net.layer[0].I = rn.poisson(l, INHIB_NEURONS) * EXTRA_I 

    for i in range(1, EXCIT_MODULES + 1):
      self.net.layer[i].I = rn.poisson(l, EXCIT_NEURONS_PER_MODULE) * EXTRA_I 
    
    self.net.Update(t)
示例#4
0
def tst_robust_glm():
  from scitbx.glmtbx import robust_glm
  from scitbx.array_family import flex
  from numpy.random import poisson, seed
  from math import exp

  seed(0)

  n_obs = 100

  for c in range(1, 100):

    # Test for a constant value
    X = flex.double([1 for i in range(n_obs)])
    X.reshape(flex.grid(n_obs, 1))
    Y = flex.double(list(poisson(c, n_obs)))
    B = flex.double([0])
    result = robust_glm(X, Y, B, family="poisson", max_iter=100)
    assert(abs(c - exp(result.parameters()[0])) < 0.1*c)

  # Now test with a massive outlier
  for c in range(1, 100):

    # Test for a constant value
    X = flex.double([1 for i in range(n_obs)])
    X.reshape(flex.grid(n_obs, 1))
    Y = flex.double(list(poisson(c, n_obs)))
    Y[n_obs//2] = c * 100
    B = flex.double([0])
    result = robust_glm(X, Y, B, family="poisson", max_iter=100)
    assert(abs(c - exp(result.parameters()[0])) < 0.1*c)

  print 'OK'
    def setUp(self):
        # produce a dictionary of numbers n
        # and their factorizations
        # d[n] = [(p1,e1),(p2,e2)...(pk,ek)]
        # where p1..pk are the distinct prime
        # factors of n in order    

        # for example:
        # d[12] = [(2,2),(3,1)]
        
        # j is the number of examples to generate
        j = 50

        # d is a dictionary 
        self.d = {}

        # pp is the average number of disctinct prime factors
        pp = 5

        # ee is the average exponent 
        ee = 5
        
        for i in range(j):
            p = poisson(pp)
            shuffled_primes = copy.deepcopy(ppp)
            shuffle(shuffled_primes)
            factors = shuffled_primes[:pp]
            factors.sort()
            pairs = []
            for factor in factors:
                # make sure e != 0
                e = poisson(ee) + 1 
                pairs.append((factor,e))
            n = self.unfactor(pairs)
            self.d[n] = pairs
示例#6
0
 def reproduce_pop_by_rec_probability (self):
     '''
     reproduces the population by choosing two pools of genotypes from it
     2 organisms are paired and their sex_locus probabilities are averaged
     if a random number is lower than this value, they will recombine and that value
     is assigned as the new organism's sex_locus value. otherwise, one will be
     cloned.
     '''
     new_pop = Population()
     chosen_genotypes1 = Population.choose_genotype(self) #choose 2 pools of genotypes from current pop
     chosen_genotypes2 = Population.choose_genotype(self)
     for i in range(0,len(chosen_genotypes1)):
         if len(new_pop.organisms) < self.population_size:
             offspring_sex_probability = (chosen_genotypes1[i].sex_locus + chosen_genotypes2[i].sex_locus)/2.0 #calc the avg of each genotype's sex_locus
             if random.random() < offspring_sex_probability: 
                 recombined = (genotype.Genotype.recombine(chosen_genotypes1[i], chosen_genotypes2[i]))
                 recombined.mutate_random(rnd.poisson(recombined.mutation_rate))
                 recombined.last_repro_mode = "sexual" #organism will be labled as product of sexual reproduction
                 recombined.sex_locus = offspring_sex_probability #avg of parents' sex_locus assigned to offspring
                 new_pop.organisms.append(recombined)
             else:
                 chosen_genotypes1[i].mutate_random(rnd.poisson(chosen_genotypes1[i].mutation_rate))
                 chosen_genotypes1[i].last_repro_mode = "asexual" #organism labled as product of asexual reproduction
                 chosen_genotypes1[i].sex_locus = offspring_sex_probability
                 new_pop.organisms.append(chosen_genotypes1[i])
     Population.get_population_fitness(new_pop) 
     return new_pop                   
def underdogwin(gpm1, gpm2, elapsed_time, thresh, total_time):
    remaining_time = total_time - elapsed_time
    ok1 = [poisson(gpm1, remaining_time).sum() for i in range(100)]
    ok2 = [poisson(gpm2, remaining_time).sum() for i in range(100)]
    okdiff = np.array(ok1) - np.array(ok2)
    downbyx = thresh * elapsed_time / float(total_time)
    over5 = okdiff > downbyx
    return len(okdiff[over5]), downbyx
示例#8
0
 def generate_actions(self):
     # get feed
     ba   = self.ba
     feed = ba['feed'][ba['venue_id']][0]
     best_ask = feed['BEST_ASK1'] if feed['BEST_ASK1'] != None else self['start_price']
     best_bid = feed['BEST_BID1'] if feed['BEST_BID1'] != None else self['start_price']
     # generate actions
     while len(self.actions) == 0:
         # initialize an empty list for actions
         actions    = []
         actions_LO = []
         actions_MO = []
         # loop on the side
         for side in [Order.Sell, Order.Buy]:
             # draw a poisson sample
             n = poisson(self['alpha']*self['timestep']/(2*self['sigma']))
             # build actions and generate timestamp
             for i in range(n):
                 if side == Order.Buy:
                     price       = best_ask - self['tick_size']*int(self['price_range']*random()+1)
                 else:                        
                     price       = best_bid + self['tick_size']*int(self['price_range']*random()+1)
                 #draw a half-normal sample
                 qty = gauss(self['sigma'], self.order_size_std)
                 qty = abs(int(qty))
                 actions_LO.append({'timestamp'   : self.start_time_interval+long(self['timestep']*random()),
                                    'type'        : Order.Limit,
                                    'quantity'    : qty,
                                    'side'        : side,
                                    'price'       : self['tick_size']*round(price/self['tick_size'])})
         if feed['TIME_NUM'] >= self['execution_start_time']:
             m = poisson(self['mu']*self['timestep']/(2*self['sigma']))
             side = int(2*random())
             for i in range(m):
                 #draw a half-normal sample
                 qty = gauss(self['sigma'], self.order_size_std)
                 qty = abs(int(qty))
                 actions_MO.append({'timestamp'   : self.start_time_interval+long(self['timestep']*random()),
                                    'type'        : Order.Market,
                                    'quantity'    : qty,
                                    'side'        : side,
                                    'price'       : 0.0})
         # concatenation des deux listes d'actions market orders et limit orders
         actions.extend(actions_MO)
         actions.extend(actions_LO)   
         # if there is no action, just skip this part
         if len(actions) > 0:
             # we sort the actions list
             actions.sort(key = lambda x: x['timestamp'])
             # make sure timestamps are different !
             for i in range(len(actions)-1):
                 if actions[i]['timestamp'] == actions[i+1]['timestamp']:
                     actions[i+1]['timestamp'] += 1L
         # save actions
         self.actions = actions
         # increment the start_time_interval
         self.start_time_interval += self['timestep']
示例#9
0
    def generate_next_packet(self, period, destination=None, data=None, *args, **kwargs):
        """
        Lowest-count node gets a message indicating what number packet it is that
        is addressed to it with a particular stream length.
        The counter counts the 'actual' number of packets while the packet.data
        carries the zero-indexed 'packet id'

        :param period:
        :param destination:
        :param data:
        :param args:
        :param kwargs:
        """
        if destination is not None:
            raise RuntimeWarning(
                "This isn't the kind of application you use with a destination bub")

        # Update packet_counters with information from the routing layer
        indirect_nodes = filter(
            lambda n: n not in self.total_counter.keys(), self.layercake.net.keys())
        if len(indirect_nodes):
            self.logger.debug("Inferred new nodes: {0}".format(indirect_nodes))
            for node in indirect_nodes:
                self.total_counter[node] = 0

        # DOES NOT MERGE TARGET COUNTER
        self.merge_counters()

        if self.current_target is None:
            self.current_target = self.select_target()

        destination = self.current_target
        packet_id = self.layercake.hostname + str(self.stats['packets_sent'])
        packet = {"ID": packet_id,
                  "dest": destination,
                  "source": self.layercake.hostname,
                  "route": [], "type": "DATA",
                  "time_stamp": Sim.now(),
                  "length": self.layercake.packet_length,
                  "data": self.test_packet_counter[destination]}
        self.sent_counter[destination] += 1
        self.test_packet_counter[destination] += 1

        if self.test_packet_counter[destination] % self.test_stream_length:
            # In Stream
            period = poisson(float(self.period * self.stream_period_ratio))
        else:
            # Finished Stream
            period = poisson(float(self.period))
            if DEBUG:
                self.logger.info("Finished Stream {0} for {1}, sleeping for {2}".format(
                    self.test_packet_counter[destination] / self.test_stream_length,
                    destination,
                    period
                ))
            self.current_target = None
        return packet, period
def RobotStep(args):
  global t

  # Input from Sensors
  SL, SR = Env.GetSensors(x[t], y[t], w[t])

  RL_spikes = 0.0
  RR_spikes = 0.0
  for t2 in xrange(dt):

    # Deliver stimulus as a Poisson spike stream to the sensor neurons and
    # noisy base current to the motor neurons
    I = np.hstack([rn.poisson(SL*15, N1), rn.poisson(SR*15, N2),
                   5*rn.randn(N3), 5*rn.randn(N4)])

    # Update network
    net.setCurrent(I)
    fired = net.update()

    RL_spikes += np.sum(np.logical_and(fired > (N1+N2), fired < N1+N2+N3))
    RR_spikes += np.sum(fired > (N1+N2+N3))

    # Maintain record of membrane potential
    v[t2,:],_ = net.getState()

  # Output to motors
  # Calculate motor firing rates in Hz
  RL = 1.0*RL_spikes/(dt*N3)*1000.0
  RR = 1.0*RR_spikes/(dt*N4)*1000.0

  # Set wheel velocities (as fractions of Umax)
  UL = (Umin/Umax + RL/Rmax*(1 - Umin/Umax))
  UR = (Umin/Umax + RR/Rmax*(1 - Umin/Umax))

  # Update Environment
  x[t+1], y[t+1], w[t+1] = RobotUpdate(x[t], y[t], w[t], UL, UR,
                                       Umax, dt, xmax, ymax)

  ## PLOTTING
  for i in range(Ns):
    pl11[i].set_data(range(dt), v[:,i])
    pl12[i].set_data(range(dt), v[:,i+Ns])

  for i in range(Nm):
    pl21[i].set_data(range(dt), v[:,2*Ns+i])
    pl22[i].set_data(range(dt), v[:,2*Ns+Nm+i])

  ax2.scatter(x, y)
  manager1.canvas.draw()
  manager2.canvas.draw()

  t += 1

  if t == len(x)-1:
    print 'Terminating simulation'
    StopSimulation()
def test_mixture_random_shape():
    # test the shape broadcasting in mixture random
    y = np.concatenate([nr.poisson(5, size=10),
                        nr.poisson(9, size=10)])
    with pm.Model() as m:
        comp0 = pm.Poisson.dist(mu=np.ones(2))
        w0 = pm.Dirichlet('w0', a=np.ones(2))
        like0 = pm.Mixture('like0',
                           w=w0,
                           comp_dists=comp0,
                           observed=y)

        comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
                                shape=(20, 2))
        w1 = pm.Dirichlet('w1', a=np.ones(2))
        like1 = pm.Mixture('like1',
                           w=w1,
                           comp_dists=comp1,
                           observed=y)

        comp2 = pm.Poisson.dist(mu=np.ones(2))
        w2 = pm.Dirichlet('w2',
                          a=np.ones(2),
                          shape=(20, 2))
        like2 = pm.Mixture('like2',
                           w=w2,
                           comp_dists=comp2,
                           observed=y)

        comp3 = pm.Poisson.dist(mu=np.ones(2),
                                shape=(20, 2))
        w3 = pm.Dirichlet('w3',
                          a=np.ones(2),
                          shape=(20, 2))
        like3 = pm.Mixture('like3',
                           w=w3,
                           comp_dists=comp3,
                           observed=y)

    rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
                                             point=m.test_point,
                                             size=100)
    assert rand0.shape == (100, 20)
    assert rand1.shape == (100, 20)
    assert rand2.shape == (100, 20)
    assert rand3.shape == (100, 20)

    with m:
        ppc = pm.sample_posterior_predictive([m.test_point], samples=200)
    assert ppc['like0'].shape == (200, 20)
    assert ppc['like1'].shape == (200, 20)
    assert ppc['like2'].shape == (200, 20)
    assert ppc['like3'].shape == (200, 20)
示例#12
0
文件: tests.py 项目: gic888/gdblocks
 def r(self, sid):
     b = 2.0 * float(sid + 1) / (self.nstims + 2)
     first = b * self.exp / 2.0
     scnd = self.exp - first
     if hasattr(self, 'perfect'):
         first = int(round(first))
         scnd = int(round(scnd))
     else:
         first = npr.poisson(first)
         scnd = npr.poisson(scnd)
     r = np.concatenate([npr.normal(self.m1, self.sd, first), npr.normal(self.m2, self.sd, scnd)])
     self.hist = [r]
示例#13
0
def axc_ndarr(navg, dims, bgs, flucfs1, flucfs2, flucas1, flucas2, t):
    '''
    Routine for generating a two dimensional array of averaged cross
    correlations.

    INPUTS:
    navg        :       Number of cross correlations to generate and average at
    each point.
    dims        :       Dimensions of the array to generate xcorrs for.
                        Averaged xcorrs will be generated for the last point
    bgs         :       Background count rate. Assumed constant for all
                        elements.
    flucfs1     :       Array of dimensions dims with fluctuation frequencies.
                        Each element is a tuple with (flucf1, flucf2).
    flucfs2     :       Array of dimensions dims with fluctuation frequencies
                        for PMT 2.
    flucas1     :       Array of dimensionality dims with fluctuation
                        amplitudes for PMT 1. Each element is a tuple with (fluca1,
                        fluca2)
    flucas2     :       Array of dimensionality dims with fluctuation
                        amplitudes for PMT 2.

    As an exmaple, if I wanted to generate a 2x2 matrix of cross correlations,
    each of which had been averaged over 50 runs measuring a constant 1200 Hz
    fluctuation, I'd say:

    bgs = 10000
    flucfs = np.array([1200,1200])
    dims = (2, 2)
    navg = 50
    xc2d = axc_2darr(50, dims, bgs, flucfs, flucas, t)
    '''
    #Generate all the background arrays.
    dims = dims + (navg,)
    bg1 = rand.poisson(bgs, size=dims + (np.size(t),))
    bg2 = rand.poisson(bgs, size=dims + (np.size(t),))
    #Generate the ndimensional PMT1 and PMT2 matrices.
    #Generate meshgrids for each of the desired coordinates (t, flucas1,
    #flucas2, flucfs1, flucfs2)
    #Each one of these meshgrid lines generates an ndimensional grid of what we
    #want to use for making the function.
    fa1g, fs1g, xcpref, tt = np.meshgrid(*[flucfs1, flucas1, np.ones((navg,)), t])
    fa2g, fs2g, xcpref, tt = np.meshgrid(*[flucfs2, flucas2, np.ones((navg,)), t])
    PMT1 = fa1g * np.sin(2 * np.pi * fs1g * tt) + bg1
    PMT2 = fa2g * np.sin(2 * np.pi * fs2g * tt) + bg2
    #Add the background noise to it.
    #Cross correlate the two along each last dimension.
    #Use a helper function which cross correlates the corresponding 1d arrays
    #in a certain dimension.
    uxc12 = uxcorr_ndim(PMT1, PMT2, axis=-1)
    return (uxc12, PMT1, PMT2)
示例#14
0
def add_zingers(imageset, params):

  from dxtbx.format.FormatCBF import FormatCBF
  assert issubclass(imageset.reader().get_format_class(), FormatCBF), (
    "Only CBF format images supported")

  from cbflib_adaptbx import compress
  import binascii
  from numpy.random import poisson
  from random import sample
  import os.path

  for i in range(len(imageset)):
    image_data = imageset[i]

    num = poisson(params.zingers.average_per_image)
    index = sample(range(len(image_data)), num)
    value = list(poisson(params.zingers.average_intensity, num))
    for j, v in zip(index, value):
      image_data[j] += v
    out_image = os.path.join(params.output.directory, "image_%04i.cbf" % i)

    start_tag = binascii.unhexlify('0c1a04d5')

    data = open(imageset.get_path(i), 'rb').read()
    data_offset = data.find(start_tag)
    cbf_header = data[:data_offset]

    new_header = []
    compressed = compress(image_data)

    old_size = 0

    for record in cbf_header.split('\n')[:-1]:
      if 'X-Binary-Size:' in record:
        old_size = int(record.split()[-1])
        new_header.append('X-Binary-Size: %d\r\n' % len(compressed))
      elif 'Content-MD5' in record:
        pass
      else:
        new_header.append('%s\n' % record)

    tailer = data[data_offset + 4 + old_size:]

    with open(out_image, 'wb') as f:
      f.write(''.join(new_header) + start_tag + compressed + tailer)
      print '%s written with %d zingers' % (out_image, num)

  return
示例#15
0
def test_glm_any_model():
    nbin = 10
    align = 'hold'
    lag = 0.1 # s

    ds = datasets['small']
    dc = DataCollection(ds.get_files())
    dc.add_unit(ds.get_units()[0], lag)
    bnd = dc.make_binned(nbin=nbin, align=align)
    ntask, nrep, nunit, nbin = bnd.shape

    # make perfect test counts
    # direction-only model
    tp = radians([20, 10]) # degrees
    b0 = log(10) # log(Hz)
    pd = pol2cart(tp)

    drn = kinematics.get_idir(bnd.pos, axis=2)
    rate = exp(dot(drn, pd) + b0)
    window_size = diff(bnd.bin_edges, axis=2)
    count_mean = rate * window_size
    count = poisson(count_mean)
    count = count[:,:,None]
    bnd.set_PSTHs(count)

    # now fit data for pd
    count, pos, time = bnd.get_for_regress()
    bnom, bse_nom = glm_any_model(count, pos, time, model='kd')
    pd_exp = unitvec(bnom['d'])
    tp_exp = cart2pol(pd_exp)
    
    acceptable_err = 0.05 # about 3 degrees absolute error
    err = abs(tp - tp_exp)
    assert_array_less(err, acceptable_err)
示例#16
0
  def submitPilotsForTaskQueue( self, taskQueueDict, waitingPilots ):

    from numpy.random import poisson
    from DIRAC.WorkloadManagementSystem.private.Queues import maxCPUSegments

    taskQueueID = taskQueueDict['TaskQueueID']
    maxCPU = maxCPUSegments[-1]
    extraPilotFraction = self.am_getOption( 'extraPilotFraction' )
    extraPilots = self.am_getOption( 'extraPilots' )

    taskQueuePriority = taskQueueDict['Priority']
    self.log.verbose( 'Priority for TaskQueue %s:' % taskQueueID, taskQueuePriority )
    taskQueueCPU = max( taskQueueDict['CPUTime'], self.am_getOption( 'lowestCPUBoost' ) )
    self.log.verbose( 'CPUTime  for TaskQueue %s:' % taskQueueID, taskQueueCPU )
    taskQueueJobs = taskQueueDict['Jobs']
    self.log.verbose( 'Jobs in TaskQueue %s:' % taskQueueID, taskQueueJobs )

    # Determine number of pilots to submit, boosting TaskQueues with low CPU requirements
    pilotsToSubmit = poisson( ( self.pilotsPerPriority * taskQueuePriority +
                                self.pilotsPerJob * taskQueueJobs ) * maxCPU / taskQueueCPU )
    # limit the number of pilots according to the number of waiting job in the TaskQueue
    # and the number of already submitted pilots for that TaskQueue
    pilotsToSubmit = min( pilotsToSubmit,
                          int( ( 1 + extraPilotFraction ) * taskQueueJobs ) + extraPilots - waitingPilots )

    if pilotsToSubmit <= 0: return S_OK( 0 )
    self.log.verbose( 'Submitting %s pilots for TaskQueue %s' % ( pilotsToSubmit, taskQueueID ) )

    return self.__submitPilots( taskQueueDict, pilotsToSubmit )
示例#17
0
def poissonize(h,tol):
    """
    Monte-Carlo sample the mass function, using a Poisson distribution in each bin.
    h = halo model instance
    tol = threshold beyond which we don't bother with the Monte Carlism.  If there's a high number of
          haloes in a bin (>~1000?), Poissonizing will make little difference
    """
    
    poissonnmz = h.nmz*0.
    arraytopoissonize = h.nmz*(h.volume*h.m*h.dlogm)
    #pylab.clf()
    #pylab.loglog(h.m,1.+arraytopoissonize)
    #print arraytopoissonize
    tolm2 = tol**(-2)
    for j in range(len(h.nmz)):
        if (arraytopoissonize[j] < tolm2) and (arraytopoissonize[j]>1e-45):
            arraytopoissonize[j] = RandomArray.poisson(arraytopoissonize[j])
            #poissonnmz[j] = jimmy
            #print arraytopoissonize[j],poissonnmz[j]
        elif (arraytopoissonize[j] <= 1e-45):
            arraytopoissonize[j] = 0.

    #pylab.loglog(h.m,1.+poissonnmz)
    ans = arraytopoissonize/(h.volume*h.m*h.dlogm)

    #pylab.savefig('poissonize.ps')

    return(ans)
示例#18
0
def generate_toy_MC_from_distribution(distribution):
    initial_values = list(distribution)
    new_values = [rnd.poisson(value) for value in initial_values]
    #statistical errors
    new_errors = [sqrt(value) for value in new_values]
    toy_MC = value_error_tuplelist_to_hist(zip(new_values, new_errors), list(distribution.xedges()))
    return toy_MC
示例#19
0
	def tgen(self):
		poiss = random.poisson(lam = 1000, size = 200) # 1000 = 1 day = expected time between measuremens
		poiss = poiss / 1000.0
		x_axis = ones(len(poiss),dtype=float)
		for i in arange(len(x_axis))[:-1]:
			x_axis[i+1] = x_axis[i] + poiss[i]
		return x_axis
示例#20
0
def generate_toy_MC_from_values( values ):
    values = [ 0 if i < 0 else i for i in values ]

    new_values = [poisson( value ) for value in values]
    # statistical errors
    new_errors = [sqrt( value ) for value in new_values]
    return new_values, new_errors
示例#21
0
 def diffuse_ribosomes_to_initiation_site(self, mRNA, deltat, time):
     """Perform Poisson experiment to test how many ribosomes make it initiation site and try to attach."""
     if self.ribo_free > 0:
         # k = npr.binomial(self.ribo_free, mRNA.init_rate*deltat, 1)[0]  # number of ribosomes that diffuse to the initiation site during deltat
         k = npr.poisson(
             self.ribo_free * mRNA.init_rate * deltat)  # number of ribosomes that diffuse to the initiation site during deltat
         # log.debug('update_initiation: %s ribosomes out of %s diffused to init site at mRNA %s with probability %s', k, self.ribo_free, mRNA.index, self.init_rate*deltat)
         for i in range(k):  # currently k>1 will not attach k ribosomes, TODO:
             if not mRNA.ribosomes or not mRNA.first_position_occupied():
                 # log.debug("update_initiation: found mRNA with free first position")
                 if self.GTP > 0 and self.ATP > 0:
                     if mRNA.attach_ribosome_at_start():
                         # log.debug("update_initiation: attaching ribosome at start of mRNA %s", mRNA.index)
                         self.ribo_bound += 1
                         self.ribo_free -= 1
                         self.GTP -= 1
                         self.GDP += 1
                         self.ATP -= 2
                         self.AMP += 2
                         if not mRNA.tic:  # no time measurement ongoing on this mRNA
                             mRNA.tic = time
                             mRNA.toc = len(mRNA.ribosomes) + 1  # number of ribos + 1 to countdown to end of time measurement
                     else:
                         log.warning("update_initiation: unsuccessful attempt to attach ribosome")
                 else:
                     log.warning("update_initiation: no GTP or no ATP")
             else:
                 # log.warning("update_initiation: unsuccessful attempt to attach ribosome: first position occupied")
                 pass
     else:
         # log.warning("update_initiation: no free ribosomes left")
         pass
示例#22
0
def sexer_pestieau_poisson(n):  #TODO TODO chk
	from numpy.random import poisson
	nkidsvec = poisson(2,n)
	for nkids in nkidsvec:
		result = 'F'*nkids
		print result,
		yield result
示例#23
0
    def sample_topic_freqs(word_topic_rates, mask):
        """Given observed_word_freqs, that each word was held out (if mask is
        0) or not held out (if mask is 1), and each word w was produced via
        topic i according to a Poisson with rate word_topic_rates[w, i],
        returns a sample from the posterior distribution on the number of times
        a word was generated from each topic.
        """

        # First, sample the contribution from words where mask was 0.  Each
        # such word is sampled from a poisson distribution.  The sum of poisson
        # distributions is poisson, so we only need to sample one poisson for
        # each topic.
        topic_freqs = \
            poisson(np.sum((1 - mask)[:, np.newaxis] * word_topic_rates, 0))

        # For each word where mask was 1, we sample from a multinomial
        # distribution with probabilities proportional to the word-topic rates
        # for that word.  For efficiency, we skip over words that didn't occur
        # at all: multinomial_words is an array of the indices of words for
        # which we do need to take a sample.
        multinomial_words = np.arange(vocab_size)[
            np.array(mask * observed_word_freqs, dtype = bool)]
        for word in multinomial_words:
            topic_rates = word_topic_rates[word, :]
            topic_freqs += multinomial(observed_word_freqs[word],
                                       topic_rates / sum(topic_rates))
        return topic_freqs
示例#24
0
 def run(self):
   for t in range(1, self.runtime):
     for neuron in self.neurons:
       neuron.I = 15 if rn.poisson(0.01, 1)[0] > 0 else 0
     self.updateNeurons(t)
       
   return
def sim_lemonade(num, mean = 600, sd = 30, pois = False):
  ## Simulate the profits and tips for
  ## a lemonade stand.
  import numpy.random as nr
  
  ## number of customer arrivals
  if pois:
    arrivals = nr.poisson(lam = mean, size = num)
  else:
   arrivals = nr.normal(loc = mean, scale = sd, size = num) 

  print(dist_summary(arrivals, 'customer arrivals per day'))
  
  ## Compute distibution of average profit per arrival
  proft = gen_profits(num)
  print(dist_summary(proft, 'profit per arrival'))
  
  ## Total profits are profit per arrival 
  ## times number of arrivals.
  total_profit = arrivals * proft 
  print(dist_summary(total_profit, 'total profit per day'))
  
  ## Compute distribution of average tips per arrival
  tps = gen_tips(num)
  print(dist_summary(tps, 'tips per arrival'))
  
  ## Compute average tips per day
  total_tips = arrivals * tps
  print(dist_summary(total_tips, 'total tips per day'))
  
  ## Compute total profits plus total tips.
  total_take = total_profit + total_tips 
  return(dist_summary(total_take, 'total net per day'))
示例#26
0
def main1(i0b,re,eb,n,point,bpa,background):    
    
    parms=np.zeros(11)
    parms[0]=i0b
    parms[1]=re
    parms[2]=eb
    parms[3]=n
    parms[7]=point
    parms[8]=bpa
    parms[10]=background
    generate_model_galaxy(parms)
    imag1=fits.open('test_bulge1.fits')
    model_galaxy=imag1[0].data
    
    imag= fits.open('test_bulge.fits') # make 1d array from 2d galaxy array
    galaxy=imag[0].data
    z= galaxy.flat # make 1d array from 2d galaxy array
    numra.seed(120980)# Set random number seed
    #zerr = np.sqrt(1+0.0*((abs(numra.poisson(z)-z)*c.gain)**2.0+c.rdnoise**2.0)) 
    zerr = np.sqrt((abs(numra.poisson(z)-z)*1.0)**2.0+0.0**2.0) 
   #  print zerr
    
   
    validpix = np.where(zerr != 0.0)
    #if (c.use_mask):
    # numpoints = ma.count(c.maskedgalaxy)
    #else:
    #numpoints = 51 * 51
    
    #chi2= np.sum(((z[validpix]-model_galaxy.flat[validpix])/zerr[validpix])**2.0)/numpoints 
    #print chi2
    avg=np.mean(galaxy-model_galaxy)
    return avg   
示例#27
0
    def simOneGenRec(self,newNHap,recRate,verbose=False):
        """for now, just assume uniform recombination"""
        nRecEvents=rng.poisson(recRate,newNHap)
        if verbose: print nRecEvents
        newData=np.empty((newNHap,self.nSegsites))

        for i,nre in enumerate(nRecEvents):
            if nre==0:
                newData[i]      =   self.data[rng.randint(0,self.nHap)]
                if verbose: print i,"zero"
            else:
                v               =   rng.randint(0,self.nHap,nre+1)
                recPos          =   np.empty(nre+2);recPos[0]=0;recPos[nre+1]=1
                recPos[1:nre+1] =   np.sort(rng.random(nre))

                nChr            =   np.empty(self.nSegsites)
                for j in range(nre+1):
                    pos         =   np.logical_and(self.segSites>recPos[j],
                                    self.segSites<recPos[j+1])
                    nChr[pos]   =   self.data[v[j]][pos]
                newData[i]      =   nChr
                if verbose: print i,v,recPos

        self.nHap   =   newNHap
        self.data   =   newData
示例#28
0
def makePoissonNoiseImage(im):
    """Return a Poisson noise image based on im

    Parameters
    ----------
    im : `lsst.afw.image.Image`
        image; the output image has the same dtype, dimensions, and shape
        and its expectation value is the value of ``im`` at each pixel

    Returns
    -------
    noiseIm : `lsst.afw.image.Image`
        Newly constructed image instance, same type as ``im``.

    Notes
    -----
    - Warning: This uses an undocumented numpy API (the documented API
        uses a single float expectation value instead of an array).

    - Uses numpy.random; you may wish to call numpy.random.seed first.
    """
    import numpy.random as rand
    imArr = im.getArray()
    noiseIm = im.Factory(im.getBBox())
    noiseArr = noiseIm.getArray()

    with np.errstate(invalid='ignore'):
        intNoiseArr = rand.poisson(imArr)

    noiseArr[:, :] = intNoiseArr.astype(noiseArr.dtype)
    return noiseIm
示例#29
0
def mixture_process(nu, P, tauc, t):
    '''
    Generate correlated spike trains from a mixture process.
    nu = rates of source spike trains
    P = mixture matrix
    tauc = correlation time constant
    t = duration
    Returns a list of (neuron_number,spike_time) to be passed to SpikeGeneratorGroup.
    '''
    n = array(poisson(nu * t)) # number of spikes for each source spike train
    if n.ndim == 0:
        n = array([n])
    # Only non-zero entries:
    nonzero = n.nonzero()[0]
    n = n[nonzero]
    P = array(P.take(nonzero, axis=1))
    nik = binomial(n, P) # number of spikes from k in i
    result = []
    for k in xrange(P.shape[1]):
        spikes = rand(n[k]) * t
        for i in xrange(P.shape[0]):
            m = nik[i, k]
            if m > 0:
                if tauc > 0:
                    selection = sample(spikes, m) + array(exponential(tauc, m))
                else:
                    selection = sample(spikes, m)
                result.extend(zip([i] * m, selection))
    result = [(i,t*second) for i,t in result]
    return result
示例#30
0
文件: fd.py 项目: poneill/amic
def rfd_poisson(ps,n):
    """Sample n configs by conditioning on chromosomal occupancy via LeCam's theorem"""
    lam = sum(ps)
    G = len(ps)
    sample_q = lambda:nprandom.poisson(lam) # chromosomal occupancy approximately poisson.
    sampler = make_sampler(ps)
    return [direct_sampling_ps(ps,sample_q(),sampler) for i in xrange(n)]
示例#31
0
def ucuc_grad_solver(bandit,
                     steps,
                     lam=0.1,
                     init_reward=1.5,
                     uc=0,
                     ucuc=15,
                     heads=100,
                     bs2_heads=100,
                     p=0.3,
                     uc_var=False,
                     ucuc_var=False,
                     bernouli=False):
    avg_reward = init_reward * torch.randn(
        (bandit.batches, bandit.size, heads)).cuda()
    avg_reward = VG(avg_reward)
    pulled_arms = torch.zeros(bandit.batches, steps)
    br = torch.arange(0, bandit.batches).long().cuda()
    sr = torch.arange(0, bandit.size).long().cuda()
    sgd = optim.SGD([avg_reward], lr=lam)
    rand_idx_store = torch.LongTensor(64 * 1024 * 1024).random_(heads).cuda()

    if ucuc == 0:
        bs2_std = V(torch.zeros(1).cuda())

    def get_rand_idx(*sizes):
        count = np.prod(sizes)
        start = rng.randint(0, len(rand_idx_store) - count)
        return rand_idx_store[start:start + count].view(*sizes)

    for i in range(steps):
        r_std = (torch.var if uc_var else torch.std)(avg_reward, -1)
        r_mean = torch.mean(avg_reward, -1)
        if ucuc != 0:
            bs2_idx = get_rand_idx(bandit.batches, bandit.size,
                                   heads * bs2_heads)
            bs2 = avg_reward.data.gather(-1, bs2_idx)
            bs2 = bs2.view(bandit.batches, bandit.size, heads, bs2_heads)
            bs2_std = V((torch.var if ucuc_var else torch.std)(bs2.mean(-1),
                                                               -1))
        arm = (r_mean + uc * r_std + ucuc * bs2_std).data.max(dim=1)[1]

        r = V(FTC(bandit.step(NP(arm))))
        std_poisson = rng.binomial(1, p, (bandit.batches,
                                          heads)) if bernouli else rng.poisson(
                                              p, (bandit.batches, heads))
        std_poisson = V(FTC(std_poisson))
        avg_reward_err = (avg_reward[br, arm] - r.view(-1, 1))**2 * std_poisson

        err = avg_reward_err.sum()
        sgd.zero_grad()
        err.backward()
        sgd.step()

        pulled_arms[:, i] = arm.cpu()

        if i == 0 or (i + 1) % 100 == 0:
            print(i, NP(r_std.mean()), NP(bs2_std.mean()))

    r_mean = torch.mean(avg_reward, -1)
    avg_err = NP(r_mean) - bandit.mean
    print('mae', np.mean(np.abs(avg_err)), 'mse', np.mean(avg_err**2))
    return NP(pulled_arms)
示例#32
0
from math import e, factorial,log, gamma, sqrt
from matplotlib import pyplot as pt
from numpy.random import geometric, poisson, exponential
from scipy.stats import ks_2samp


f = open("cbdata.txt")
v = [int(x)+1 for x in f.readlines()]
f.close()
lam = 0.5275924670273324
poidata = poisson(lam ,len(v))
expdata = exponential(lam,len(v))
geodata = geometric(lam,len(v))
e= ks_2samp(v,expdata)
d = ks_2samp(v,poidata)
g = ks_2samp(v,geodata)
print('exponential',e)
print('poisson',d)
print('geometric',g)


示例#33
0
lista = []  #Result of each simulated experiment

for experiment in range(1):

    bins = np.linspace(lead_min_e, ct.muonmass / 2, 10 + 1)
    measured = np.zeros(len(bins) - 1)
    prediction = np.zeros(len(bins) - 1)

    # Here we simulate experimental data ("mock" data)
    for i in range(len(bins) - 1):
        if i == 6:
            measured[i] = rn.poisson(
                [
                    integrate.quad(
                        lambda Enu1: evt.dNdEvee(Enu1, ct.Ue4_2, ct.DelM2),
                        bins[i],
                        bins[i + 1],
                        epsabs=int_err)
                ][0][0] +
                evt.NMuOriginCC(ct.NuMuenergy, ct.Ue4_2, ct.Umu4_2, ct.DelM2))
        else:
            measured[i] = rn.poisson([
                integrate.quad(
                    lambda Enu1: evt.dNdEvee(Enu1, ct.Ue4_2, ct.DelM2),
                    bins[i],
                    bins[i + 1],
                    epsabs=int_err)
            ][0][0])

#Here we define the Chi^2
示例#34
0
Copyright (c) 2008 University of Otago. All rights reserved.
"""

# Import statements
from pymc3 import *
from numpy import random, array, arange, ones
import theano.tensor as t
# Sample size
n = 100000
# True mean count, given occupancy
theta = 2.1
# True occupancy
pi = 0.4

# Simulate some data data
y = array([(random.random() < pi) * random.poisson(theta) for i in range(n)])

model = Model()
with model:
    # Estimated occupancy
    p = Beta('b', 1, 1)

    # Latent variable for occupancy
    z = Bernoulli('z', p, shape=y.shape)

    # Estimated mean count
    theta = Uniform('theta', 0, 100)

    # Poisson likelihood
    z = ZeroInflatedPoisson('z', theta, z)
示例#35
0
 def _sample_obs(self, mean):
     return poisson(mean)
示例#36
0
def generate_image(image_parameters):
    """Generate image with particles.
    
    Input:
    image_parameters: list with the values of the image parameters in a dictionary:
        image_parameters['Particle Center X List']
        image_parameters['Particle Center Y List']
        image_parameters['Particle Radius List']
        image_parameters['Particle Bessel Orders List']
        image_parameters['Particle Intensities List']
        image_parameters['Image Half-Size']
        image_parameters['Image Background Level']
        image_parameters['Signal to Noise Ratio']
        image_parameters['Gradient Intensity']
        image_parameters['Gradient Direction']
        image_parameters['Ellipsoid Orientation']
        image_parameters['Ellipticity']
        
    Note: image_parameters is typically obained from the function get_image_parameters()
        
    Output:
    image: image of the particle [2D numpy array of real numbers betwen 0 and 1]
    """

    from numpy import meshgrid, arange, ones, zeros, sin, cos, sqrt, clip, array
    from scipy.special import jv as bessel
    from numpy.random import poisson as poisson

    particle_center_x_list = image_parameters['Particle Center X List']
    particle_center_y_list = image_parameters['Particle Center Y List']
    particle_radius_list = image_parameters['Particle Radius List']
    particle_bessel_orders_list = image_parameters[
        'Particle Bessel Orders List']
    particle_intensities_list = image_parameters['Particle Intensities List']
    image_half_size = image_parameters['Image Half-Size']
    image_background_level = image_parameters['Image Background Level']
    signal_to_noise_ratio = image_parameters['Signal to Noise Ratio']
    gradient_intensity = image_parameters['Gradient Intensity']
    gradient_direction = image_parameters['Gradient Direction']
    ellipsoidal_orientation_list = image_parameters['Ellipsoid Orientation']
    ellipticity = image_parameters['Ellipticity']

    ### CALCULATE IMAGE PARAMETERS
    # calculate image full size
    image_size = image_half_size * 2 + 1

    # calculate matrix coordinates from the center of the image
    image_coordinate_x, image_coordinate_y = meshgrid(
        arange(-image_half_size, image_half_size + 1),
        arange(-image_half_size, image_half_size + 1),
        sparse=False,
        indexing='ij')

    ### CALCULATE BACKGROUND
    # initialize the image at the background level
    image_background = ones((image_size, image_size)) * image_background_level

    # add gradient to image background
    if gradient_intensity != 0:
        image_background = image_background + gradient_intensity * (
            image_coordinate_x * sin(gradient_direction) + image_coordinate_y *
            cos(gradient_direction)) / (sqrt(2) * image_size)

    ### CALCULATE IMAGE PARTICLES
    image_particles = zeros((image_size, image_size))
    for particle_center_x, particle_center_y, particle_radius, particle_bessel_orders, particle_intensities, ellipsoidal_orientation in zip(
            particle_center_x_list, particle_center_y_list,
            particle_radius_list, particle_bessel_orders_list,
            particle_intensities_list, ellipsoidal_orientation_list):
        # calculate the radial distance from the center of the particle
        # normalized by the particle radius
        radial_distance_from_particle = sqrt(
            (image_coordinate_x - particle_center_x)**2 +
            (image_coordinate_y - particle_center_y)**2 +
            .001**2) / particle_radius

        # for elliptical particles
        rotated_distance_x = (image_coordinate_x - particle_center_x) * cos(
            ellipsoidal_orientation) + (image_coordinate_y - particle_center_y
                                        ) * sin(ellipsoidal_orientation)
        rotated_distance_y = -(image_coordinate_x - particle_center_x) * sin(
            ellipsoidal_orientation) + (image_coordinate_y - particle_center_y
                                        ) * cos(ellipsoidal_orientation)

        elliptical_distance_from_particle = sqrt(
            (rotated_distance_x)**2 +
            (rotated_distance_y / ellipticity)**2 + .001**2) / particle_radius

        # calculate particle profile
        for particle_bessel_order, particle_intensity in zip(
                particle_bessel_orders, particle_intensities):
            image_particle = 4 * particle_bessel_order**2.5 * (
                bessel(particle_bessel_order,
                       elliptical_distance_from_particle) /
                elliptical_distance_from_particle)**2
            image_particles = image_particles + particle_intensity * image_particle

    # calculate image without noise as background image plus particle image
    image_particles_without_noise = clip(image_background + image_particles, 0,
                                         1)

    ### ADD NOISE
    image_particles_with_noise = poisson(
        image_particles_without_noise *
        signal_to_noise_ratio**2) / signal_to_noise_ratio**2

    return image_particles_with_noise
示例#37
0
def nsbh_population(rate, t_min, t_max, f_online, d_min, d_max, h_0,\
                    q_0, m_min_1, m_max_1, m_mean_1, m_std_1, m_min_2, \
                    m_max_2, m_mean_2, m_std_2, a_min_1, a_max_1, \
                    a_min_2, a_max_2, seed=None, sample_z=False, \
                    redshift_rate=False, uniform_bh_masses=False, \
                    uniform_ns_masses=False, fixed_count=None, \
                    aligned_spins=False):

    # constrained realisation if desired
    if seed is not None:
        npr.seed(seed)

    # first draw number of events: a Poisson process, with rate 
    # given by the number of events per year per Gpc^3, the 
    # duration of observations and the volume
    if fixed_count is None:
        if sample_z:
            z_min = d2z(d_min, h_0, q_0)
            z_max = d2z(d_max, h_0, q_0)
            vol = volume_z(z_max, z_min, h_0, q_0, \
                           redshift_rate=redshift_rate)
        else:
            vol = volume(d_max, d_min, h_0, q_0)
        n_per_sec = rate * vol / 365.0 / 24.0 / 3600.0 * f_online
        n_exp = n_per_sec * (t_max - t_min)
        n_inj = npr.poisson(n_exp)
    else:
        if sample_z:
            z_min = d2z(d_min, h_0, q_0)
            z_max = d2z(d_max, h_0, q_0)
        n_inj = fixed_count
        n_per_sec = fixed_count / (t_max - t_min)

    # draw merger times consistent with the expected rate. add a 
    # check to ensure that the latest merger time is within the 
    # observation window
    times = np.zeros(n_inj)
    times[0] = t_min
    times[-1] = t_max + 1.0
    while times[-1] >= t_max:
        delta_times = npr.exponential(1.0 / n_per_sec, n_inj - 1)
        for i in range(1, n_inj):
            times[i] = times[i - 1] + delta_times[i - 1]

    # draw distances via an interpolated CDF
    if sample_z:

        z_grid = np.linspace(z_min, z_max, 10000)
        p_z_grid = volume_z(z_grid, z_min, h_0, q_0, \
                            redshift_rate=redshift_rate) / \
                   volume_z(z_max, z_min, h_0, q_0, \
                            redshift_rate=redshift_rate)
        interp = si.interp1d(p_z_grid, z_grid)
        redshifts = interp(npr.uniform(size=n_inj))
        distances = z2d(redshifts, h_0, q_0)

    else:

        d_grid = np.linspace(d_min, d_max, 10000)
        p_d_grid = volume(d_grid, d_min, h_0, q_0) / \
                   volume(d_max, d_min, h_0, q_0)
        interp = si.interp1d(p_d_grid, d_grid)
        distances = interp(npr.uniform(size=n_inj))

    # draw inclinations, colatitudes and longitudes
    incs = np.arccos(-npr.uniform(-1.0, 1.0, size=n_inj))
    colats = np.arcsin(-npr.uniform(-1.0, 1.0, size=n_inj))
    longs = npr.uniform(0.0, 2.0 * np.pi, size=n_inj)

    # draw masses
    if uniform_bh_masses:
        m_1s = npr.uniform(m_min_1, m_max_1, size=n_inj)
    else:
        dist = ss.truncnorm((m_min_1 - m_mean_1) / m_std_1, \
                            (m_max_1 - m_mean_1) / m_std_1, \
                            loc=m_mean_1, scale=m_std_1)
        m_1s = dist.rvs(n_inj)
    if uniform_ns_masses:
        m_2s = npr.uniform(m_min_2, m_max_2, size=n_inj)
    else:
        dist = ss.truncnorm((m_min_2 - m_mean_2) / m_std_2, \
                            (m_max_2 - m_mean_2) / m_std_2, \
                            loc=m_mean_2, scale=m_std_2)
        m_2s = dist.rvs(n_inj)

    # now draw spins: isotropic in direction, uniform in magnitude
    spin_amps = npr.uniform(a_min_1, a_max_1, size=n_inj)
    spin_colats = np.arccos(-npr.uniform(-1.0, 1.0, size=n_inj))
    spin_longs = npr.uniform(0.0, 2.0 * np.pi, size=n_inj)
    a_1_xs = spin_amps * np.sin(spin_colats) * np.cos(spin_longs)
    a_1_ys = spin_amps * np.sin(spin_colats) * np.sin(spin_longs)
    a_1_zs = spin_amps * np.cos(spin_colats)
    if aligned_spins:
        a_1_xs = 0.0
        a_1_ys = 0.0
    spin_amps = npr.uniform(a_min_2, a_max_2, size=n_inj)
    spin_colats = np.arccos(-npr.uniform(-1.0, 1.0, size=n_inj))
    spin_longs = npr.uniform(0.0, 2.0 * np.pi, size=n_inj)
    a_2_xs = spin_amps * np.sin(spin_colats) * np.cos(spin_longs)
    a_2_ys = spin_amps * np.sin(spin_colats) * np.sin(spin_longs)
    a_2_zs = spin_amps * np.cos(spin_colats)
    if aligned_spins:
        a_2_xs = 0.0
        a_2_ys = 0.0

    # finally draw isotropic coa_phase and polarization angles
    coa_phases = npr.uniform(0.0, 2.0 * np.pi, size=n_inj)
    pols = npr.uniform(0.0, 2.0 * np.pi, size=n_inj)

    # store in structured array
    dtypes = [('simulation_id', 'U256'), ('mass1', float), \
              ('mass2', float), ('spin1x', float), ('spin1y', float), \
              ('spin1z', float), ('spin2x', float), \
              ('spin2y', float), ('spin2z', float), ('redshift', float), \
              ('distance', float), ('inclination', float), \
              ('coa_phase', float), ('polarization', float), \
              ('longitude', float), ('latitude', float), \
              ('geocent_end_time', int), ('geocent_end_time_ns', int)]
    data = np.empty((n_inj, ), dtype=dtypes)
    data['simulation_id'] = \
        ['sim_inspiral:simulation_id:{:d}'.format(i) for i in range(n_inj)]
    data['mass1'] = m_1s
    data['mass2'] = m_2s
    data['spin1x'] = a_1_xs
    data['spin1y'] = a_1_ys
    data['spin1z'] = a_1_zs
    data['spin2x'] = a_2_xs
    data['spin2y'] = a_2_ys
    data['spin2z'] = a_2_zs
    data['redshift'] = redshifts
    data['distance'] = distances
    data['inclination'] = incs
    data['coa_phase'] = coa_phases
    data['polarization'] = pols
    data['longitude'] = longs
    data['latitude'] = colats
    data['geocent_end_time'] = [int(math.modf(t)[1]) for t in times]
    data['geocent_end_time_ns'] = [int(math.modf(t)[0] * 1e9) for t in times]

    return 1.0 / n_per_sec, data
    def _sampleFromModel(self,
                         D=200,
                         T=100,
                         K=10,
                         F=12,
                         P=8,
                         avgWordsPerDoc=500):
        '''
        Create a test dataset according to the model
        
        Params:
            T - Vocabulary size, the number of "terms". Must be a square number
            K - Observed topics
            P - Latent features
            F - Observed features
            D - Sample documents (each with associated features)
            avgWordsPerDoc - average number of words per document generated (Poisson)
        
        Returns:
            modelState - a model state object configured for training
            tpcs       - the matrix of per-document topic distribution
            vocab      - the matrix of per-topic word distributions
            docLens    - the vector of document lengths
            X          - the DxF side information matrix
            W          - The DxW word matrix
        '''

        # Generate vocab
        beta = 0.1
        betaVec = np.ndarray((T, ))
        betaVec.fill(beta)
        vocab = np.zeros((K, T))
        for k in range(K):
            vocab[k, :] = rd.dirichlet(betaVec)

        # Geneate the shared covariance matrix
        sigT = rd.random((K, K))
        sigT = sigT.dot(sigT)
        sigT.flat[::K + 1] += rd.random((K, )) * 4

        # Just link two topics
        sigT[K // 2, K // 3] = 3
        sigT[K // 3, K // 2] = 3

        sigT[4 * K // 5, K // 5] = 4
        sigT[K // 5, 4 * K // 5] = 4

        # Generate Y, then V, then A
        lfv = 0.1  # latent feature variance (for Y)
        fv = 0.1  # feature variance (for A)

        Y = matrix_normal(np.zeros((K, P)), lfv * np.eye(P), sigT)
        V = matrix_normal(np.zeros((P, F)), fv * np.eye(F), lfv * np.eye(P))
        A = matrix_normal(Y.dot(V), fv * np.eye(F), sigT)

        # Generate the input features. Assume the features are multinomial and sparse
        # (not quite a perfect match for the twitter example: twitter is binary, this
        # may not be)
        featuresDist = [1. / F] * F
        maxNonZeroFeatures = 3

        X = np.zeros((D, F), dtype=np.float32)
        for d in range(D):
            X[d, :] = rd.multinomial(maxNonZeroFeatures, featuresDist)
        X = ssp.csr_matrix(X)

        # Use the features and the matrix A to generate the topics and documents
        tpcs = rowwise_softmax(X.dot(A.T))

        docLens = rd.poisson(avgWordsPerDoc, (D, )).astype(np.float32)
        W = tpcs.dot(vocab)
        W *= docLens[:, np.newaxis]
        W = np.array(W, dtype=np.int32)  # truncate word counts to integers
        W = ssp.csr_matrix(W)

        # Return the initialised model, the true parameter values, and the
        # generated observations
        return tpcs, vocab, docLens, X, W
示例#39
0
# Program below the class
import numpy as np
from numpy.random import poisson
from matplotlib import pyplot as plt
import bucketHydrology as bh
#plt.ion()

rain = poisson(.2, 100)  # Convert to an import for real data
dt = 1.  # day

# Change these parameters with an input file or script, eventually
# Arbitrary units; will be made real in an actual landscape
res_surface = bh.reservoir(t_efold=1., f_to_discharge=0.5, Hmax=10.)
res_deep = bh.reservoir(t_efold=10., f_to_discharge=1., Hmax=np.inf)

strat_column = [res_surface, res_deep]

watershed = bh.buckets(reservoir_list=strat_column, dt=dt)
watershed.run(rain)
watershed.plot()
示例#40
0
plt.xlim(0, 1)
plt.ylim(0, 1)

plt.show()
fig.savefig("foo1.pdf", bbox_inches='tight')


def isInUnitSquare(x):
    return (x[0] > 0 and x[0] < 1 and x[1] > 0 and x[1] < 1)


nsLoc = np.empty(shape=(0, 2))
allocs = np.empty(shape=(0), dtype=int)
j = 0
for i in nhPPP:
    N = random.poisson(alpha)
    jitter = random.normal(loc=0, scale=sigma, size=(N, 2))
    nsLoc = np.concatenate((nsLoc, i + jitter))
    allocs = np.concatenate((allocs, np.full(N, j)))
    j += 1
index = [isInUnitSquare(x) for x in nsLoc]
nsLoc = nsLoc[index]
allocs = allocs[index]

fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect('equal')

plt.plot(pointpo.loc[:, 0], pointpo.loc[:, 1], 'o', c="#00743F", alpha=0.5)
plt.plot(nhPPP[:, 0], nhPPP[:, 1], 'o', c="#1E65A7")
plt.plot(nsLoc[:, 0], nsLoc[:, 1], 'ro', ms=3)
示例#41
0
        #     print
        #     "   <T> in {1, 3, 5}"
        #     print
        #     "  <mu> in {0.01, 0.02, ..., 0.20}"
        #     sys.exit()

        # Related coefficients
        theta = np.ceil(mu * n) / T
        CI_coef = norm.ppf(0.5 + CI_perc / 200.0)
        '''
        Importance sampling
        '''

        # Store variables
        Y_total = np.zeros(n_MC)
        m_MC = rd.poisson(np.ceil(mu * n), n_MC)
        #t_start = time.time()

        print('here. note n_Mc = {}'.format(n_MC))

        # Do Monte Carlo IS
        for i_MC in range(n_MC):

            # # Print progress
            # if (i_MC + 1) % (n_MC / 100) == 0:
            #     print
            #     "{0: >3}% done in {1}".format(
            #         int(100 * (i_MC + 1) / n_MC), timedelta(seconds=time.time() - t_start))

            # Step 1 - Generate S
示例#42
0
          │                   │   │     │                       │   │
7: ───────█───────────────────█───╱7╲───█───────────────────────█───3↦1───
    """.strip()
    assert actual_text_diagram == expected_text_diagram


def test_swap_network_init_error():
    with pytest.raises(ValueError):
        SwapNetworkGate(())
    with pytest.raises(ValueError):
        SwapNetworkGate((3, ))


@pytest.mark.parametrize(
    'part_lens, acquaintance_size',
    [[[l + 1 for l in poisson(size=n_parts, lam=lam)],
      poisson(4)] for n_parts, lam in product(range(2, 20, 3), range(1, 4))])
def test_swap_network_permutation(part_lens, acquaintance_size):
    n_qubits = sum(part_lens)
    gate = SwapNetworkGate(part_lens, acquaintance_size)

    expected_permutation = {
        i: j
        for i, j in zip(range(n_qubits), reversed(range(n_qubits)))
    }
    assert gate.permutation(n_qubits) == expected_permutation


def test_swap_network_permutation_error():
    gate = SwapNetworkGate((1, 1))
    with pytest.raises(ValueError):
    def _testInferenceFromHandcraftedExampleWithKEqualingQ(self):
        print("Fully handcrafted example, K=Q")
        rd.seed(0xC0FFEE)  # Global init for repeatable test

        T = 100  # Vocabulary size, the number of "terms". Must be a square number
        Q = 6  # Topics: This cannot be changed without changing the code that generates the vocabulary
        K = 6  # Observed topics
        P = 8  # Features
        F = 12  # Observed features
        D = 200  # Sample documents (each with associated features)

        avgWordsPerDoc = 500

        # The vocabulary. Presented graphically there are two with horizontal bands
        # (upper lower); two with vertical bands (left, right);  and two with
        # horizontal bands (inside, outside)
        vocab = makeSixTopicVocab(T)

        # Create our (sparse) features X, then our topic proportions ("tpcs")
        # then our word counts W
        lmda = np.zeros((D, K))
        X = np.zeros((D, F))
        for d in range(D):
            for _ in range(3):
                lmda[d, rd.randint(K)] += 1. / 3
            for _ in range(int(F / 3)):
                X[d, rd.randint(F)] += 1

        A = rd.random((K, F))
        X = lmda.dot(la.pinv(A).T)
        X = ssp.csr_matrix(X)

        tpcs = lmda

        docLens = rd.poisson(avgWordsPerDoc, (D, ))
        W = tpcs.dot(vocab)
        W *= docLens[:, np.newaxis]
        W = np.array(W, dtype=np.int32)  # truncate word counts to integers
        W = ssp.csr_matrix(W)

        #
        # Now finally try to train the model
        #
        modelState = newVbModelState(K, Q, F, P, T)

        (trainedState, queryState) = train(modelState,
                                           X,
                                           W,
                                           logInterval=1,
                                           iterations=1)
        tpcs_inf = rowwise_softmax(safe_log(queryState.expLmda))
        W_inf = np.array(tpcs_inf.dot(trainedState.vocab) *
                         queryState.docLen[:, np.newaxis],
                         dtype=np.int32)
        priorReconsError = np.sum(np.square(W - W_inf)) / D

        (trainedState, queryState) = train(modelState,
                                           X,
                                           W,
                                           logInterval=1,
                                           plotInterval=100,
                                           iterations=130)
        tpcs_inf = rowwise_softmax(safe_log(queryState.expLmda))
        W_inf = np.array(tpcs_inf.dot(trainedState.vocab) *
                         queryState.docLen[:, np.newaxis],
                         dtype=np.int32)

        print("Model Driven: Prior Reconstruction Error: %f" %
              (priorReconsError, ))
        print("Model Driven: Final Reconstruction Error: %f" %
              (np.sum(np.square(W - W_inf)) / D, ))

        print("End of Test")
    def _testInferenceFromHandcraftedExample(self):
        print("Partially hand-crafted example")
        rd.seed(0xC0FFEE)  # Global init for repeatable test

        T = 100  # Vocabulary size, the number of "terms". Must be a square number
        Q = 6  # Topics: This cannot be changed without changing the code that generates the vocabulary
        K = 10  # Observed topics
        P = 8  # Features
        F = 12  # Observed features
        D = 200  # Sample documents (each with associated features)

        avgWordsPerDoc = 500

        # Determine what A, U, Y and V should be
        U = rd.random((K, Q))
        Y = rd.random((Q, P))
        V = rd.random((F, P))
        A = U.dot(Y).dot(V.T)

        # The vocabulary. Presented graphically there are two with horizontal bands
        # (upper lower); two with vertical bands (left, right);  and two with
        # horizontal bands (inside, outside)
        vocab = makeSixTopicVocab(T)

        # Create our (sparse) features X, then our topic proportions ("tpcs")
        # then our word counts W
        X_low = np.array([1 if rd.random() < 0.3 else 0
                          for _ in range(D * P)]).reshape(D, P)
        X = ssp.csr_matrix(X_low.dot(V.T))

        lmda_low = X_low.dot(Y.T)
        print("lmda_low.mean() = %f" % (lmda_low.mean()))
        tpcs = rowwise_softmax(lmda_low)

        docLens = rd.poisson(avgWordsPerDoc, (D, ))
        W = tpcs.dot(vocab)
        W *= docLens[:, np.newaxis]
        W = np.array(W, dtype=np.int32)  # truncate word counts to integers
        W = ssp.csr_matrix(W)

        #
        # Now finally try to train the model
        #
        modelState = newVbModelState(K, Q, F, P, T)
        (trainedState, queryState) = train(modelState,
                                           X,
                                           W,
                                           logInterval=1,
                                           plotInterval=10,
                                           iterations=10)

        tpcs_inf = rowwise_softmax(np.log(queryState.expLmda))
        W_inf = np.array(tpcs_inf.dot(trainedState.vocab) *
                         queryState.docLen[:, np.newaxis],
                         dtype=np.int32)

        print("Handcrafted Test-Case")
        print(
            "====================================================================="
        )
        print(
            "Average, squared, per-element difference between true and estimated:"
        )
        print("    Topic Distribution:    %f" %
              (np.sum(np.square(tpcs.dot(U.T) - tpcs_inf)) / len(tpcs), ))
        print("    Vocab Distribution:    %f" %
              (np.sum(np.square(U.dot(vocab) - trainedState.vocab)) /
               len(vocab), ))
        print(
            "Average absolute difference between true and reconstructed documents"
        )
        print("    Documents:             %f" %
              (np.sum(np.abs(W.todense() - W_inf)) / np.sum(W.todense()), ))

        print("End of Test")
示例#45
0
    def game_loop(self):
        '''This is called every game tick.  You call this in a loop
        until it returns false, which means you hit a tree trunk, fell
        off the bottom of the screen, or jumped off the top of the
        screen.  It calls the action and reward callbacks.'''

        # Render the background.
        self.screen.blit(self.background_img, (self.iter, 0))
        if self.iter < self.background_img.get_width() - self.screen_width:
            self.screen.blit(self.background_img,
                             (self.iter + self.background_img.get_width(), 0))

        # Perhaps generate a new tree.
        if self.next_tree <= 0:
            self.next_tree = self.tree_img.get_width() + int(
                npr.geometric(1.0 / self.tree_mean))
            self.trees.append({
                'x':
                self.screen_width + 1,
                'y':
                int((0.3 + npr.rand() * 0.65) *
                    (self.screen_height - self.tree_gap)),
                's':
                False
            })
        # Process input events.
        for event in pg.event.get():
            if event.type == pg.QUIT:
                sys.exit()
            elif self.action_fn is None and event.type == pg.KEYDOWN:
                self.vel = npr.poisson(self.impulse)
                self.hook = self.screen_width

        # Perhaps take an action via the callback.
        if self.action_fn is not None and self.action_fn(self.get_state()):
            self.vel = npr.poisson(self.impulse)
            self.hook = self.screen_width

        # Eliminate trees that have moved off the screen.
        self.trees = filter(lambda x: x['x'] > -self.tree_img.get_width(),
                            self.trees)

        # Monkey dynamics
        self.monkey_loc -= self.vel
        self.vel -= self.gravity

        # Current monkey bounds.
        monkey_top = self.monkey_loc - self.monkey_img.get_height() / 2
        monkey_bot = self.monkey_loc + self.monkey_img.get_height() / 2

        # Move trees to the left, render and compute collision.
        self.next_tree -= self.horz_speed
        edge_hit = False
        tree_hit = False
        pass_tree = False
        for tree in self.trees:
            tree['x'] -= self.horz_speed

            # Render tree.
            self.screen.blit(self.tree_img, (tree['x'], self.tree_offset))

            # Render gap in tree.
            self.screen.blit(self.background_img, (tree['x'], tree['y']),
                             (tree['x'] - self.iter, tree['y'],
                              self.tree_img.get_width(), self.tree_gap))
            if self.iter < self.background_img.get_width() - self.screen_width:
                self.screen.blit(
                    self.background_img, (tree['x'], tree['y']),
                    (tree['x'] - (self.iter + self.background_img.get_width()),
                     tree['y'], self.tree_img.get_width(), self.tree_gap))

            trunk_left = tree['x'] + 215
            trunk_right = tree['x'] + 290
            trunk_top = tree['y']
            trunk_bot = tree['y'] + self.tree_gap

            # Compute collision.
            if (((trunk_left <
                  (self.monkey_left + 15)) and (trunk_right >
                                                (self.monkey_left + 15)))
                    or ((trunk_left < self.monkey_right) and
                        (trunk_right > self.monkey_right))):
                #pg.draw.rect(self.screen, (255,0,0), (trunk_left, trunk_top, trunk_right-trunk_left, trunk_bot-trunk_top), 1)
                #pg.draw.rect(self.screen, (255,0,0), (self.monkey_left+15, monkey_top, self.monkey_img.get_width()-15, monkey_bot-monkey_top), 1)
                if (monkey_top < trunk_top) or (monkey_bot > trunk_bot):
                    tree_hit = True

            # Keep score.
            if not tree['s'] and (self.monkey_left + 15) > trunk_right:
                tree['s'] = True
                self.score += 1
                global maxscore
                if self.score > maxscore:
                    maxscore = self.score
                pass_tree = True
                if self.sound:
                    self.blop_snd.play()

        # Monkey swings down on a vine.
        if self.vel < 0:
            pg.draw.line(self.screen, (92, 64, 51),
                         (self.screen_width / 2 + 20, self.monkey_loc - 25),
                         (self.hook, 0), 4)

        # Render the monkey.
        self.screen.blit(self.monkey_img, (self.monkey_left, monkey_top))

        # Fail on hitting top or bottom.
        if monkey_bot > self.screen_height or monkey_top < 0:
            edge_hit = True

        # Render the score
        score_text = self.font.render("Score: %d" % (self.score), 1,
                                      (230, 40, 40))
        self.screen.blit(score_text, score_text.get_rect())
        global maxscore
        maxscore_text = self.font.render("MaxScore: %d" % (maxscore), 1,
                                         (230, 40, 40))
        textpos = maxscore_text.get_rect()
        self.screen.blit(
            maxscore_text,
            (self.screen_width - 2.6 * textpos[2], 0, textpos[2], textpos[3]))

        if self.text is not None:
            text = self.font.render(self.text, 1, (230, 40, 40))
            textpos = text.get_rect()
            self.screen.blit(
                text,
                (self.screen_width - textpos[2], 0, textpos[2], textpos[3]))

        # Render the display.
        pg.display.update()

        # If failed, play sound and exit.  Also, assign rewards.
        if edge_hit:
            if self.sound:
                ch = self.screech_snd.play()
                while ch.get_busy():
                    pg.time.delay(500)
            if self.reward_fn is not None:
                self.reward_fn(self.edge_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False
        if tree_hit:
            if self.sound:
                ch = self.screech_snd.play()
                while ch.get_busy():
                    pg.time.delay(500)
            if self.reward_fn is not None:
                self.reward_fn(self.tree_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False

        if self.reward_fn is not None:
            if pass_tree:
                self.reward_fn(self.tree_reward)
            else:
                self.reward_fn(0.0)

        # Wait just a bit.
        pg.time.delay(self.tick_length)

        # Move things.
        self.hook -= self.horz_speed
        self.iter -= self.horz_speed
        if self.iter < -self.background_img.get_width():
            self.iter += self.background_img.get_width()

        return True
示例#46
0
 def randomPCR_with_ErrorsAndBias_FASTv2(self, slctdSeqs, seqLength,
                                         pcrCycleNum, pcrYld, errorRate,
                                         aptamerSeqs, alphabetSet,
                                         distance):
     # initialize Mutation object from class
     mut = Mutation(seqLength=seqLength,
                    errorRate=errorRate,
                    pcrCycleNum=pcrCycleNum,
                    pcrYld=pcrYld)
     totalseqs = 0
     uniqSeqs = 0
     #compute total seq num, unique seq num, and transfer info to x
     for i, seqIdx in enumerate(slctdSeqs):
         uniqSeqs += 1
         totalseqs += int(slctdSeqs[seqIdx][0])
     #initialize matrix to hold info for amplified pool
     x = np.zeros((uniqSeqs, pcrCycleNum + 4))
     for i, seqIdx in enumerate(slctdSeqs):
         x[i][0] = seqIdx
         x[i][1] = slctdSeqs[seqIdx][0]
         x[i][2] = slctdSeqs[seqIdx][1]
         x[i][3] = slctdSeqs[seqIdx][2]
     print(
         "number of unique seqs in selected pool prior to amplification: " +
         str(uniqSeqs))
     print("number of seqs in selected pool prior to amplification: " +
           str(totalseqs))
     # calculate probabilities of different possible mutation numbers
     mutNumProbs = mut.get_mutation_probabilities_original()
     # compute a discrete distribution of mutation numbers
     mutDist = mut.get_mutation_distribution_original()
     print("Discrete Mutation Distribution has been computed")
     # PCR Amplification
     totalseqs = 0
     # initialize dictionary to keep info on seqs to be mutated
     mutatedPool = {}
     #initialize matrix to hold info for mutation pool
     y = np.zeros((uniqSeqs, seqLength + 1))
     # keep track of sequence count after each pcr cycle (except last one)
     seqPop = np.zeros(pcrCycleNum)
     # compute cycle number probabilities for this seq
     cycleNumProbs = np.zeros(pcrCycleNum)
     print("Amplification has started...")
     # for each sequence in the selected pool
     for i, seqIdx in enumerate(slctdSeqs):
         # random PCR with bias using brute force
         for n in xrange(pcrCycleNum):
             # sequence count after n cycles
             seqPop[n] = x[i][1]
             # amplify count using initial count, polymerase yield, and bias score
             x[i][1] += int(binom(x[i][1], pcrYld + x[i][3]))
         # compute cycle number probabilities
         for s, seqNum in enumerate(seqPop):
             cycleNumProbs[s] = seqNum / np.sum(seqPop)
         # transfer info to x
         for j, cycleNumProb in enumerate(cycleNumProbs):
             x[i][j + 4] = cycleNumProb
         # update total num of seqs
         totalseqs += x[i][1]
         # transfer info from x to selection pool
         slctdSeqs[int(x[i][0])] = x[i][1:]
         #tranfer seq index to matrix y
         y[i][0] = x[i][0]
         #if accumulated seq count is greater than 10,000
         if np.sum(seqPop) > 10000:
             # for each possible number of mutations in any seq copy (1-seqLength)
             for mutNum in xrange(seqLength):
                 #approximate the proportion of copies that will be mutated using
                 #corresponding probability p(M=mutNum)
                 y[i][mutNum + 1] = mutNumProbs[mutNum + 1] * np.sum(seqPop)
         # if seq count is less than 10,000
         else:
             # draw random mutNum from the mutation distribution for each seq copy
             muts = poisson(errorRate * seqLength,
                            int(np.sum(seqPop)))  #SLOW STEP
             # remove all drawn numbers equal to zero
             muts = muts[muts != 0]
             # for each non-zero mutation number
             for mutNum in muts:
                 #increment copy number to be mutated
                 y[i][mutNum + 1] += 1
     del (x)
     gc.collect()
     print("Amplification carried out")
     print("Sequence selection for mutation has started...")
     #remove all mutation numbers with zero copies to be mutated
     y = y[y[:, 1] != 0]
     #for each seq to be mutated
     for mutInfo in y:
         #add to mutation pool with it's corresponding mutation info
         mutatedPool[int(mutInfo[0])] = mutInfo[1:][mutInfo[1:] != 0]
     del (y)
     gc.collect()
     print("Mutation selection has been carried out")
     print("Mutant generation has started...")
     if (distance == "hamming"):
         # generate mutants and add to the amplfied sequence pool
         amplfdSeqs = mut.generate_mutants_1D(mutatedPool=mutatedPool,
                                              amplfdSeqs=slctdSeqs,
                                              aptamerSeqs=aptamerSeqs,
                                              alphabetSet=alphabetSet)
         del (slctdSeqs)
         del (mutatedPool)
         gc.collect()
     elif (distance == "basepair"):
         amplfdSeqs = mut.generate_mutants_2D(mutatedPool=mutatedPool,
                                              amplfdSeqs=slctdSeqs,
                                              aptamerSeqs=aptamerSeqs,
                                              alphabetSet=alphabetSet)
         del (slctdSeqs)
         del (mutatedPool)
         gc.collect()
     elif (distance == "loop"):
         amplfdSeqs = mut.generate_mutants_loop(mutatedPool=mutatedPool,
                                                amplfdSeqs=slctdSeqs,
                                                aptamerSeqs=aptamerSeqs,
                                                alphabetSet=alphabetSet)
         del (slctdSeqs)
         del (mutatedPool)
         gc.collect()
     else:
         print("argument given for distance is invalid")
         return
     print("Mutation has been carried out")
     return amplfdSeqs
示例#47
0
def zdist(zmin,
          zmax,
          time=365.25,
          area=1.,
          ratefunc=lambda z: 1.e-4,
          cosmo=FlatLambdaCDM(H0=70.0, Om0=0.3)):
    """Generate a distribution of redshifts.

    Generates the correct redshift distribution and number of SNe, given
    the input volumetric SN rate, the cosmology, and the observed area and
    time.

    Parameters
    ----------
    zmin, zmax : float
        Minimum and maximum redshift.
    time : float, optional
        Time in days (default is 1 year).
    area : float, optional
        Area in square degrees (default is 1 square degree). ``time`` and
        ``area`` are only used to determine the total number of SNe to
        generate.
    ratefunc : callable
        A callable that accepts a single float (redshift) and returns the
        comoving volumetric rate at each redshift in units of yr^-1 Mpc^-3.
        The default is a function that returns ``1.e-4``.
    cosmo : `~astropy.cosmology.Cosmology`, optional
        Cosmology used to determine volume. The default is a FlatLambdaCDM
        cosmology with ``Om0=0.3``, ``H0=70.0``.

    Examples
    --------

    Loop over the generator:

    >>> for z in zdist(0.0, 0.25):
    ...     print(z)
    ...
    0.151285827576
    0.204078030595
    0.201009196731
    0.181635472172
    0.17896188781
    0.226561237264
    0.192747368762

    This tells us that in one observer-frame year, over 1 square
    degree, 7 SNe occured at redshifts below 0.35 (given the default
    volumetric SN rate of 10^-4 SNe yr^-1 Mpc^-3). The exact number is
    drawn from a Poisson distribution.

    Generate the full list of redshifts immediately:

    >>> zlist = list(zdist(0., 0.25))

    Define a custom volumetric rate:

    >>> def snrate(z):
    ...     return 0.5e-4 * (1. + z)
    ...
    >>> zlist = list(zdist(0., 0.25, ratefunc=snrate))

    """

    # Get comoving volume in each redshift shell.
    z_bins = 100  # Good enough for now.
    z_binedges = np.linspace(zmin, zmax, z_bins + 1)
    z_binctrs = 0.5 * (z_binedges[1:] + z_binedges[:-1])
    sphere_vols = cosmo.comoving_volume(z_binedges).value
    shell_vols = sphere_vols[1:] - sphere_vols[:-1]

    # SN / (observer year) in shell
    shell_snrate = np.array([
        shell_vols[i] * ratefunc(z_binctrs[i]) / (1. + z_binctrs[i])
        for i in range(z_bins)
    ])

    # SN / (observer year) within z_binedges
    vol_snrate = np.zeros_like(z_binedges)
    vol_snrate[1:] = np.add.accumulate(shell_snrate)

    # Create a ppf (inverse cdf). We'll use this later to get
    # a random SN redshift from the distribution.
    snrate_cdf = vol_snrate / vol_snrate[-1]
    snrate_ppf = Spline1d(snrate_cdf, z_binedges, k=1)

    # Total numbe of SNe to simulate.
    nsim = vol_snrate[-1] * (time / 365.25) * (area / WHOLESKY_SQDEG)

    for i in range(random.poisson(nsim)):
        yield float(snrate_ppf(random.random()))
示例#48
0
    def game_loop(self):
        '''This is called every game tick.  You call this in a loop
        until it returns false, which means you hit a tree trunk, fell
        off the bottom of the screen, or jumped off the top of the
        screen.  It calls the action and reward callbacks.'''

        # Perhaps generate a new tree.
        if self.next_tree <= 0:
            self.next_tree = self.tree_width * 5 + int(
                npr.geometric(1.0 / self.tree_mean))
            self.trees.append({
                'x':
                self.screen_width + 1,
                'y':
                int((0.3 + npr.rand() * 0.65) *
                    (self.screen_height - self.tree_gap)),
                's':
                False
            })

        # Perhaps take an action via the callback.
        if self.action_fn is not None and self.action_fn(self.get_state()):
            self.vel = npr.poisson(self.impulse)

        # Eliminate trees that have moved off the screen.
        self.trees = [x for x in self.trees if x['x'] > -self.tree_width]

        # Monkey dynamics
        self.monkey_loc -= self.vel
        self.vel -= self.gravity

        # Current monkey bounds.
        monkey_top = self.monkey_loc - self.monkey_height / 2
        monkey_bot = self.monkey_loc + self.monkey_height / 2

        # Move trees to the left, render and compute collision.
        self.next_tree -= self.horz_speed
        edge_hit = False
        tree_hit = False
        pass_tree = False
        for tree in self.trees:
            tree['x'] -= self.horz_speed

            trunk_left = tree['x']
            trunk_right = tree['x'] + self.tree_width
            trunk_top = tree['y']
            trunk_bot = tree['y'] + self.tree_gap

            # Compute collision.
            if (((trunk_left <
                  (self.monkey_left + 15)) and (trunk_right >
                                                (self.monkey_left + 15)))
                    or ((trunk_left < self.monkey_right) and
                        (trunk_right > self.monkey_right))):
                if (monkey_top < trunk_top) or (monkey_bot > trunk_bot):
                    tree_hit = True

            # Keep score.
            if not tree['s'] and (self.monkey_left + 15) > trunk_right:
                tree['s'] = True
                self.score += 1
                pass_tree = True

        # Fail on hitting top or bottom.
        if monkey_bot > self.screen_height or monkey_top < 0:
            edge_hit = True

        # If failed, exit.  Also, assign rewards.
        if edge_hit:
            if self.reward_fn is not None:
                self.reward_fn(self.edge_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False
        if tree_hit:
            if self.reward_fn is not None:
                self.reward_fn(self.tree_penalty)
            if self.action_fn is not None:
                self.action_fn(self.get_state())
            return False

        if self.reward_fn is not None:
            if pass_tree:
                self.reward_fn(self.tree_reward)
            else:
                self.reward_fn(0.0)

        return True
def get_image(image_parameters):
    """Generate image with particles.
    Input:
    image_parameters: list with the values of the image parameters in a dictionary:
        image_parameters['Particle Center X List']
        image_parameters['Particle Center Y List']
        image_parameters['Particle Radius List']
        image_parameters['Particle Bessel Orders List']
        image_parameters['Particle Intensities List']
        image_parameters['Image Size']
        image_parameters['Image Background Level']
        image_parameters['Signal to Noise Ratio']
        image_parameters['Gradient Intensity']
        image_parameters['Gradient Direction']
        image_parameters['Ellipsoid Orientation']
        image_parameters['Ellipticity']
        
    Note: image_parameters is typically obained from the function get_image_parameters()
        
    Output:
    image: image of the particle [2D numpy array of real numbers betwen 0 and 1]
    """

    from numpy import meshgrid, arange, ones, zeros, sin, cos, sqrt, clip, array, ceil, mean, amax, asarray, amin
    from numpy.random import normal, poisson
    from math import e
    from scipy.special import jv as bessel
    import warnings

    particle_center_x_list = image_parameters['Particle Center X List']
    particle_center_y_list = image_parameters['Particle Center Y List']
    particle_radius_list = image_parameters['Particle Radius List']
    particle_bessel_orders_list = image_parameters[
        'Particle Bessel Orders List']
    particle_intensities_list = image_parameters['Particle Intensities List']
    image_size = image_parameters['Image Size']
    image_background_level = image_parameters['Image Background Level']
    signal_to_noise_ratio = image_parameters['Signal to Noise Ratio']
    gradient_intensity = image_parameters['Gradient Intensity']
    gradient_direction = image_parameters['Gradient Direction']
    ellipsoidal_orientation_list = image_parameters['Ellipsoid Orientation']
    ellipticity = image_parameters['Ellipticity']

    ### CALCULATE BACKGROUND
    # initialize the image at the background level
    image_background = ones((image_size, image_size)) * image_background_level

    # calculate matrix coordinates from the center of the image
    image_coordinate_x, image_coordinate_y = meshgrid(arange(0, image_size),
                                                      arange(0, image_size),
                                                      sparse=False,
                                                      indexing='ij')

    # add gradient to image background
    image_background = image_background + gradient_intensity * (
        image_coordinate_x * sin(gradient_direction) +
        image_coordinate_y * cos(gradient_direction)) / (sqrt(2) * image_size)

    ### CALCULATE IMAGE PARTICLES
    image_particles = zeros((image_size, image_size))
    particle_intensities_for_SNR = []

    # calculate the particle profiles of all particles and add them to image_particles

    for particle_center_x, particle_center_y, particle_radius, particle_bessel_orders, particle_intensities, ellipsoidal_orientation in zip(
            particle_center_x_list, particle_center_y_list,
            particle_radius_list, particle_bessel_orders_list,
            particle_intensities_list, ellipsoidal_orientation_list):
        # calculate coordinates of cutoff window
        start_x = int(max(ceil(particle_center_x - particle_radius * 3), 0))
        stop_x = int(
            min(ceil(particle_center_x + particle_radius * 3), image_size))
        start_y = int(max(ceil(particle_center_y - particle_radius * 3), 0))
        stop_y = int(
            min(ceil(particle_center_y + particle_radius * 3), image_size))

        # calculate matrix coordinates from the center of the image
        image_coordinate_x, image_coordinate_y = meshgrid(
            arange(start_x, stop_x),
            arange(start_y, stop_y),
            sparse=False,
            indexing='ij')

        # calculate the elliptical distance from the center of the particle normalized by the particle radius
        rotated_distance_x = (image_coordinate_x - particle_center_x) * cos(
            ellipsoidal_orientation) + (image_coordinate_y - particle_center_y
                                        ) * sin(ellipsoidal_orientation)
        rotated_distance_y = -(image_coordinate_x - particle_center_x) * sin(
            ellipsoidal_orientation) + (image_coordinate_y - particle_center_y
                                        ) * cos(ellipsoidal_orientation)

        # The factor 2 is because the particle radius is defined as the point where the intensity reaches 1/3 of
        # the intensity in the middle of the particle when Bessel order = 0. When Bessel order = 1, the middle of
        # the particle is black, and at the radius the intensity is approximately at its maximum. For higher
        # Bessel orders, there is no clear definition of the radius.
        elliptical_distance_from_particle = 2 * sqrt(
            (rotated_distance_x)**2 +
            (rotated_distance_y / ellipticity)**2 + .001**2) / particle_radius

        # calculate particle profile.
        for particle_bessel_order, particle_intensity in zip(
                particle_bessel_orders, particle_intensities):
            image_particle = 4 * particle_bessel_order**2.5 * (
                bessel(particle_bessel_order,
                       elliptical_distance_from_particle) /
                elliptical_distance_from_particle)**2
            image_particles[start_x:stop_x, start_y:stop_y] = image_particles[
                start_x:stop_x,
                start_y:stop_y] + particle_intensity * image_particle

    # calculate image without noise as background image plus particle image
    image_particles_without_noise = clip(image_background + image_particles, 0,
                                         1)

    ### ADD NOISE
    image_particles_with_noise = poisson(
        image_particles_without_noise *
        signal_to_noise_ratio**2) / signal_to_noise_ratio**2

    cut_off_pixels = tuple([image_particles_with_noise > 1])

    percentage_of_pixels_that_were_cut_off = image_particles_with_noise[
        cut_off_pixels].size / (image_size**2) * 100

    # warn if there is a pixel brighter than 1
    def custom_formatwarning(msg, *args, **kwargs):
        # ignore everything except the message
        return str(msg) + '\n'

    if percentage_of_pixels_that_were_cut_off > 0:
        warnings.formatwarning = custom_formatwarning
        warn_message = (
            "Warning: %.5f%% of the pixels in the generated image are brighter than the 1 (%d pixels)! "
            "These were cut-off to the max value 1. Consider adjusting your gradient intensity, particle "
            "intensity, background level, or signal to noise ratio." %
            (percentage_of_pixels_that_were_cut_off,
             image_particles_with_noise[cut_off_pixels].size))
        warnings.warn(warn_message)

    # print("After poisson: Min is %.4f, Max is %.4f" % (amin(image_particles_with_noise),
    #                                                    amax(image_particles_with_noise)))

    return clip(image_particles_with_noise, 0, 1)
示例#50
0
 def run_simulation(self):
     simulation_time = int(
         input('For how long, in hours, should the simulation be run? '))
     simulation_limit = simulation_time * 3600
     q = queue_adt.Queue()
     nb_of_customers = 0
     cumulative_queue_length = 0
     cumulative_waiting_length = 0
     # A service time of -1 indicates that nobody is being served, which can only happen
     # if the queue is empty, possibly following the departure of the last served customer.
     service_time = -1
     cumulative_waiting_time = 0
     cumulative_waiting_and_serving_time = 0
     for simulation_tick in range(simulation_limit):
         new_customers = poisson(1 / self.average_time_between_two_arrivals)
         nb_of_customers += new_customers
         for _ in range(new_customers):
             customer = Customer()
             customer.arrival_time = simulation_tick
             customer.service_time = round(
                 exponential(self.average_service_time))
             q.enqueue(customer)
         # A new customer can now start being served.
         if service_time == -1 and not q.is_empty():
             service_time = q.peek_at_front().service_time
             cumulative_waiting_time += simulation_tick - q.peek_at_front(
             ).arrival_time
         # If the customer at the front has been served, then she should now quit the queue
         # and all customers that follow her and that need no time to be served should follow
         # her (these guys have been queuing for a while just to find out that they do not
         # have an indispensible piece of information when they are about to be served;
         # surely, they are pretty upset when they quit the queue).
         while service_time == 0:
             cumulative_waiting_and_serving_time += simulation_tick - q.dequeue(
             ).arrival_time
             if not q.is_empty():
                 service_time = q.peek_at_front().service_time
                 cumulative_waiting_time += simulation_tick - q.peek_at_front(
                 ).arrival_time
             else:
                 service_time = -1
         cumulative_queue_length += len(q)
         if not q.is_empty():
             service_time -= 1
             cumulative_waiting_length += len(q) - 1
     if nb_of_customers:
         print(
             f'Number of customers who have joinded the queue: {round(nb_of_customers, 2)}'
         )
         print(
             'Average number of customers in queue including those being served: '
             f'{round(cumulative_queue_length / simulation_limit, 2)}')
         print('Average number of customers in queue waiting to be served: '
               f'{round(cumulative_waiting_length / simulation_limit, 2)}')
         print('Average waiting time, including serving time:', end='')
         self.display_time(cumulative_waiting_and_serving_time /
                           nb_of_customers)
         print('Average waiting time, excluding serving time:', end='')
         self.display_time(cumulative_waiting_time / nb_of_customers)
     else:
         print('No one has joined the queue; a very quiet day...')
示例#51
0
# Seed the RNG
rng.seed(0)

# Publication time and current time
t_start = 2.2
t_end = 100.7
duration = t_end - t_start

# True parameter values
lambda_tips = 0.5
mu_tips = 1.0
sig_log_tips = 1.9

# Arrival times of tips from poisson process
expected_num_tips = lambda_tips * duration
num_tips = rng.poisson(expected_num_tips)

# Uniform distribution for times given number
times = t_start + duration * rng.rand(num_tips)
times = np.sort(times)

# Amounts of tips
amounts = mu_tips * np.exp(sig_log_tips * rng.randn(num_tips))

# Save data as YAML
f = open("example_data.yaml", "w")
f.write("---\n")
f.write("t_start: " + str(t_start) + "\n")
f.write("t_end: " + str(t_end) + "\n")
f.write("times:\n")
for i in range(num_tips):
print('Starting simulation')

for t in xrange(len(T)):

    sl, sr = env.read_sensors(x[t], y[t], w[t])

    # Reset the neuron firings tab, but carry over those firings that may not have yet reached their
    # target. Pull the time back to mark these firings as now negative.
    for layer_index, layer in vehicle.net.layers.items():
        firings = [[f[0] - dt, f[1]] for f in layer.firings
                   if f[0] > dt - max_conduction_delay]
        layer.firings = np.array(firings)

    for t2 in xrange(dt):
        vehicle.net.layers[0].I = rn.poisson(sl * 15, vehicle.net.layers[0].N)
        vehicle.net.layers[1].I = rn.poisson(sr * 15, vehicle.net.layers[1].N)

        vehicle.net.layers[2].I = 5 * rn.randn(vehicle.net.layers[2].N)
        vehicle.net.layers[3].I = 5 * rn.randn(vehicle.net.layers[3].N)

        vehicle.net.tick(t2)

        for layer_index, layer in vehicle.net.layers.items():
            membrane_potentials[layer_index][t2, :] = layer.V

    for layer_index, layer in vehicle.net.layers.items():
        layer.firings = np.array(filter(lambda f: f[0] > 0, layer.firings))

    rl = 1.0 * len(
        vehicle.net.layers[2].firings) / dt / vehicle.net.layers[2].N * 1000
示例#53
0
    def _testOnModelHandcraftedData(self):
        #
        # Create the vocab
        #
        T = 3 * 3
        K = 5
        
        # Horizontal bars
        vocab1 = ssp.coo_matrix(([1, 1, 1], ([0, 0, 0], [0, 1, 2])), shape=(3,3)).todense()
        #vocab2 = ssp.coo_matrix(([1, 1, 1], ([1, 1, 1], [0, 1, 2])), shape=(3,3)).todense()
        vocab3 = ssp.coo_matrix(([1, 1, 1], ([2, 2, 2], [0, 1, 2])), shape=(3,3)).todense()
        
        # Vertical bars
        vocab4 = ssp.coo_matrix(([1, 1, 1], ([0, 1, 2], [0, 0, 0])), shape=(3,3)).todense()
        #vocab5 = ssp.coo_matrix(([1, 1, 1], ([0, 1, 2], [1, 1, 1])), shape=(3,3)).todense()
        vocab6 = ssp.coo_matrix(([1, 1, 1], ([0, 1, 2], [2, 2, 2])), shape=(3,3)).todense()
        
        # Diagonals
        vocab7 = ssp.coo_matrix(([1, 1, 1], ([0, 1, 2], [0, 1, 2])), shape=(3,3)).todense()
        #vocab8 = ssp.coo_matrix(([1, 1, 1], ([2, 1, 0], [0, 1, 2])), shape=(3,3)).todense()
        
        # Put together
        T = vocab1.shape[0] * vocab1.shape[1]
        vocabs = [vocab1, vocab3, vocab4, vocab6, vocab7]
        
        # Create a single matrix with the flattened vocabularies
        vocabVectors = []
        for vocab in vocabs:
            vocabVectors.append (np.squeeze(np.asarray (vocab.reshape((1,T)))))
        
        vocab = normalizerows_ip(np.array(vocabVectors, dtype=DTYPE))
        
        # Plot the vocab
        ones = np.ones(vocabs[0].shape)
        for k in range(K):
            plt.subplot(2, 3, k)
            plt.imshow(ones - vocabs[k], interpolation="none", cmap = cm.Greys_r)
        plt.show()
        
        #
        # Create the corpus
        #
        rd.seed(0xC0FFEE)
        D = 1000

        # Make sense (of a sort) of this by assuming that these correspond to
        # Kittens    Omelettes    Puppies    Oranges    Tomatoes    Dutch People    Basketball    Football
        #topicMean = np.array([10, 25, 5, 15, 5, 5, 10, 25])
#        topicCovar = np.array(\
#            [[ 100,    5,     55,      20,     5,     15,      4,      0], \
#             [ 5,    100,      5,      10,    70,      5,      0,      0], \
#             [ 55,     5,    100,       5,     5,     10,      0,      5], \
#             [ 20,    10,      5,     100,    30,     30,     20,     10], \
#             [ 5,     70,      5,     30,    100,      0,      0,      0], \
#             [ 15,     5,     10,     30,      0,    100,     10,     40], \
#             [ 4,      0,      0,     20,      0,     10,    100,     20], \
#             [ 0,      0,      5,     10,      0,     40,     20,    100]], dtype=DTYPE) / 100.0

        topicMean = np.array([25, 15, 40, 5, 15])
        self.assertEqual(100, topicMean.sum())
        topicCovar = np.array(\
            [[ 100,    5,     55,      20,     5     ], \
             [ 5,    100,      5,      10,    70     ], \
             [ 55,     5,    100,       5,     5     ], \
             [ 20,    10,      5,     100,    30     ], \
             [ 5,     70,      5,     30,    100     ], \
             ], dtype=DTYPE) / 100.0
 
        
        meanWordCount = 80
        wordCounts = rd.poisson(meanWordCount, size=D)
        topicDists = rd.multivariate_normal(topicMean, topicCovar, size=D)
        W = topicDists.dot(vocab) * wordCounts[:, np.newaxis]
        W = ssp.csr_matrix (W.astype(DTYPE))
        
        #
        # Train the model
        #
        model      = ctm.newModelAtRandom(W, K, dtype=DTYPE)
        queryState = ctm.newQueryState(W, model)
        trainPlan  = ctm.newTrainPlan(iterations=65, plot=True, logFrequency=1)
        
        self.assertTrue (0.99 < np.sum(model.topicMean) < 1.01)
        
        return self._doTest (W, model, queryState, trainPlan)
示例#54
0
# import the required libraries
import numpy as np
import matplotlib.pyplot as plt
import random
from numpy import random as rand

# store the random numbers in a list
nums = []
mu = 100
sigma = 25

for i in range(1000):
    #s = random.gauss(mu,sigma)
    s = rand.poisson(3, 1)
    print(s[0])
    nums.append(s[0])

# plotting a graph
plt.hist(nums, bins=200)
plt.show()
示例#55
0

D_sorted = np.zeros(np.shape(A))
np.fill_diagonal(D_sorted, sorted_lambda_A)
D_sorted_inv = 1 / D_sorted
D_sorted_inv[np.isinf(D_sorted_inv)] = 0
U_sorted = swap(U, swaps)
U_sorted_inv = U_sorted.T

#A_ = U_sorted @ D_sorted @ U_sorted_inv
#A_[A_<0.01]=0
#print(A_-A<0.01)

#d)
rand.seed(230)
g_mess = rand.poisson(g, size=(1, 20))
print("g_{mess} = ")
print(np.mean(g_mess, axis=0))

b_wahr = U_sorted_inv @ f_wahr
b_mess = D_sorted_inv @ U_sorted_inv @ g_mess.T

c = U_sorted_inv @ g
c_mess = U_sorted_inv @ g_mess.T

V_g_mess = np.zeros((20, 20))
np.fill_diagonal(V_g_mess, np.mean(g_mess, axis=0))

V_c_mess = U_sorted_inv @ V_g_mess @ U_sorted

V_b_mess = D_sorted_inv @ V_c_mess @ D_sorted_inv
# The result, accuracy and computational time
def table(rewardM):
    n_sample = rewardM.shape[0]
    game_size = rewardM.shape[1]

    LP_record = np.zeros((n_sample, 2), dtype=float)
    NN_record = np.zeros((n_sample, 2), dtype=float)
    for i in range(n_sample):
        LP_record[i, 0], LP_record[i, 1] = LP_NE(rewardM[i])
        NN_record[i, 0], NN_record[i, 1] = CNN_NE(rewardM[i])

    LP_mv, LP_mt = LP_record[:, 0].mean(), LP_record[:, 1].mean()
    NN_mv, NN_mt = NN_record[:, 0].mean(), NN_record[:, 1].mean()
    difference_NN = np.abs(LP_record[:, 0] - NN_record[:, 0]).mean()
    gap_NN = np.abs(
        (LP_record[:, 0] - NN_record[:, 0]) / NN_record[:, 0]).mean()

    print(f"** number sample: {n_sample}, game size: {game_size} **\n")
    print(
        f"Linear programming         : mean time: {LP_mt:.4f}, mean value: {LP_mv:.4f} "
    )
    print(
        f"Convolutinal Neural Network: mean time: {NN_mt:.4f}, mean value: {NN_mv:.4f},  mean gap: {gap_NN*100:.2f}%"
    )


rewardM = uniform(-10, 100, (100, 35, 35)) + normal(25, 3,
                                                    (100, 35, 35)) + poisson(
                                                        35, (100, 35, 35))
table(rewardM)
示例#57
0
def sleeptime(lmbda=MEAN_TIME, use_poisson=True):
    """Random time until next message."""
    if use_poisson:
        return poisson(lmbda)
    else:
        return lmbda
示例#58
0
 def __call__(self):
     return nr.poisson(lam=self.s, size=np.size(self.s)) - self.s
    def _sampleFromModel(self,
                         D=200,
                         T=100,
                         K=10,
                         Q=6,
                         F=12,
                         P=8,
                         avgWordsPerDoc=500):
        '''
        Create a test dataset according to the model
        
        Params:
            T - Vocabulary size, the number of "terms". Must be a square number
            Q - Latent Topics:
            K - Observed topics
            P - Latent features
            F - Observed features
            D - Sample documents (each with associated features)
            avgWordsPerDoc - average number of words per document generated (Poisson)
        
        Returns:
            modelState - a model state object configured for training
            tpcs       - the matrix of per-document topic distribution
            vocab      - the matrix of per-topic word distributions
            docLens    - the vector of document lengths
            X          - the DxF side information matrix
            W          - The DxW word matrix
        '''

        # Generate vocab
        beta = 0.1
        betaVec = np.ndarray((T, ))
        betaVec.fill(beta)
        vocab = np.zeros((K, T))
        for k in range(K):
            vocab[k, :] = rd.dirichlet(betaVec)

        # Generate U, then V, then A
        tau = 0.1
        tsq = tau * tau
        (vSdRow, vSdCol) = (5.0, 5.0)
        (uSdRow, uSdCol) = (5.0, tau**2)  # For the K-dimensions we use tsq
        (ySdRow, ySdCol) = (5.0, 5.0)
        (aSdRow, aSdCol) = (5.0, tau**2)

        U = matrix_normal(np.zeros((K, Q)), uSdRow * np.eye(Q),
                          uSdCol * np.eye(K))
        Y = matrix_normal(np.zeros((Q, P)), ySdRow * np.eye(P),
                          ySdCol * np.eye(Q))
        V = matrix_normal(np.zeros((F, P)), vSdRow * np.eye(P),
                          vSdCol * np.eye(F))
        A = matrix_normal(
            U.dot(Y).dot(V.T), aSdRow * np.eye(F), aSdCol * np.eye(K))

        # Generate the input features. Assume the features are multinomial and sparse
        # (not quite a perfect match for the twitter example: twitter is binary, this
        # may not be)
        featuresDist = [1. / P] * P
        maxNonZeroFeatures = 3

        X_low = np.zeros((D, P), dtype=np.float32)
        for d in range(D):
            X_low[d, :] = rd.multinomial(maxNonZeroFeatures, featuresDist)
        X = np.round(X_low.dot(V.T))
        X = ssp.csr_matrix(X)

        # Use the features and the matrix A to generate the topics and documents
        tpcs = rowwise_softmax(X.dot(A.T))

        docLens = rd.poisson(avgWordsPerDoc, (D, )).astype(np.float32)
        W = tpcs.dot(vocab)
        W *= docLens[:, np.newaxis]
        W = np.array(W, dtype=np.int32)  # truncate word counts to integers
        W = ssp.csr_matrix(W)

        # Initialise the model
        modelState = newVbModelState(K, Q, F, P, T)

        # Return the initialised model, the true parameter values, and the
        # generated observations
        return modelState, tpcs, vocab, docLens, X, W
示例#60
0
 def run(self):
     while True:
         beta = 1 / self.rate
         t = poisson(beta)
         time.sleep(t)
         self.callback()