Beispiel #1
0
def init_fun(force_fresh=False):
    if args.init and not force_fresh:
        try:
            init_path = Path(args.fits_path) / args.init
            result = last_sample_as_dict(init_path, model_path)
        except:
            print("Couldn't use last sample from previous fit to initialize")
            return init_fun(force_fresh=True)
        else:
            print("Using last sample from previous fit to initialize")
    else:
        print("Using default values to initialize fit")
        from numpy.random import gamma, exponential, lognormal
        result = {
            'f1': gamma(1.5, 2.),
            'f2': gamma(1.5, 1.5),
            'sigmar': gamma(2., .1 / 2.),
            'sigmad': gamma(2., .1 / 2.),
            'sigmau': gamma(2., .1 / 2.),
            'q': exponential(.1),
            'mbase': gamma(2., .1 / 2.),
            'mlocation': lognormal(np.log(stan_data['tm']), 1.),
            'extra_std': exponential(1.),
            'cbase': gamma(2., 1.),
            'clocation': lognormal(np.log(20.), 1.),
            'n_pop': lognormal(np.log(1e5), 1.),
            'sigmar1': gamma(2., .1 / 2.)
        }
    return result
    def __init__(model, co, M, N, sigmax, Zrho_init, circuit_type):
        model.g_sig_init = 0.1  #initial log-variance parameter of wF and wL
        model.Zdelta_t_min = 100.0
        #minimum(initial) value of delta_t [trials]
        model.dTau = 1.0  #time step [ms]
        model.Taus = np.arange(0.0, 5001.0,
                               model.dTau)  #simulation time at each trial
        model.taur = 50.0  # timescale of firing rate dynamics [ms]
        model.mtsp = 5.0

        Zwinit = exp(0.5 * (model.g_sig_init * model.g_sig_init - 1.0))
        model.delta_t = 1.0 / model.Zdelta_t_min

        model.co = co
        model.M = M
        model.N = N
        model.sigmax2 = sigmax * sigmax
        model.wF = np.divide(nrnd.lognormal(0.0, model.g_sig_init, (M, N)),
                             co * M * Zwinit)
        model.wL = np.divide(nrnd.lognormal(0.0, model.g_sig_init, (N, M)),
                             co * M * Zwinit)
        model.wp = np.divide(nrnd.lognormal(0.0, model.g_sig_init, (M, N)),
                             co * M * Zwinit)
        model.rhoc = np.full((M), (co / model.sigmax2) / Zrho_init)
        model.rhop = np.full((M), (co / model.sigmax2) / Zrho_init)

        model.lambda_c = np.zeros((M))
        model.lambda_p = np.zeros((M))
        model.update_lambda_cp()

        model.ctype = circuit_type
        model.Jp = np.full((M, M), 0.02) - np.diag(np.full((M), 0.02))
Beispiel #3
0
	def __call__(self, x, p=1.0):
		batch, channels, height, width = x.shape
		centering = translation(-0.5, -0.5, -0.5)
		affine = centering @ indentity
		condition = uniform(size=batch) < p
		brightness = normal(scale=self.brightness, size=batch)
		brightness_adjustment = stack([translation(b, b, b) if c else indentity for c, b in zip(condition, brightness)])
		affine = brightness_adjustment @ affine
		condition = uniform(size=batch) < p
		contrast = lognormal(sigma=self.contrast, size=batch)
		contrast_adjustment = stack([scaling(s, s, s) if c else indentity for c, s in zip(condition, contrast)])
		affine = contrast_adjustment @ affine
		condition = uniform(size=batch) < p / 2
		luminance_flip = stack([householder([1, 1, 1]) if c else indentity for c in condition])
		affine = luminance_flip @ affine
		condition = uniform(size=batch) < p
		theta = uniform(low=-self.hue_rotation, high=self.hue_rotation, size=batch)
		hue_rotation = stack([rotation([1, 1, 1], t) if c else indentity for c, t in zip(condition, theta)])
		affine = hue_rotation @ affine
		condition = uniform(size=batch) < p
		saturation = lognormal(sigma=self.saturation, size=batch)
		v = array([1, 1, 1, 0], dtype=float32) / sqrt(3)
		o = outer(v, v)
		saturation_adjustment = stack([o + (indentity - o) * array([s, s, s, 1], dtype=float32) if c else indentity for c, s in zip(condition, saturation)])
		affine = saturation_adjustment @ affine
		inverse_centering = translation(0.5, 0.5, 0.5)
		affine = inverse_centering @ affine
		affine = affine[:, 0:3].reshape(batch, 1, 3, 4)
		affine = self.xp.asarray(affine)
		ones = self.xp.ones((batch, height * width, 1, 1), dtype=self.xp.float32)
		h1 = x.transpose(0, 2, 3, 1).reshape(batch, height * width, channels, 1)
		h2 = concat((h1, ones), axis=2)
		h3 = (affine @ h2).reshape(batch, height, width, channels)
		return h3.transpose(0, 3, 1, 2)
Beispiel #4
0
    def sampleTheParameterFromPrior(self, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

			reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 0: 
					reti[n]=self.models[ sampled_models[i] ].prior[n][1]

				if self.models[ sampled_models[i] ].prior[n][0] == 1: 
					reti[n]=rnd.normal( loc=self.models[ sampled_models[i] ].prior[n][1],
										scale=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
		
				if self.models[ sampled_models[i] ].prior[n][0] == 2: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],
										 high=self.models[ sampled_models[i] ].prior[n][2])

				if self.models[ sampled_models[i] ].prior[n][0] == 3: 
					reti[n]=rnd.lognormal(mean=self.models[ sampled_models[i] ].prior[n][1],
										  sigma=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
								  
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])
			
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
						reti[n]=self.models[ sampled_models[i] ].prior[n][1]     

				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
						reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])

						
			begincount = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					mean_n[begincount] = reti[n]
					begincount = begincount + 1
					
			begincountv = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
					var_n[begincountv]  = reti[n]
					begincountv = begincountv + 1

			count = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
					#reti[n]= mean_n[count]
					reti[n]=rnd.lognormal( mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],var_n[0],1)[0]
					count = count + 1 
	     
            
			ret.append( reti[:] )

            
        return [x[:] for x in ret]
Beispiel #5
0
def sample(community, n=100, growth=0.1, abundance=None, allocation=False, constraints=None,
             w_e=0.001, w_r=0.5, solver=None):

    if abundance:
        norm = sum(abundance.values())
        abundance = {org_id: abundance.get(org_id, 0) / norm for org_id in community.organisms}

    if not solver:
        solver = build_problem(community, growth=growth, abundance=abundance)

    if not allocation:
        w_e = 0
        w_r = 0

    enz_vars = allocation_constraints(community, solver, w_e=w_e, w_r=w_r, abundance=abundance)

    sols = []
    if abundance:
        w1 = {org_id: 1 / abundance[org_id] if abundance[org_id] > 0 else 0 for org_id in community.organisms}

    for _ in range(n):

        if not abundance:
            w1 = {org_id: lognormal(0, 1) for org_id in community.organisms}

        objective = {vi: w1[org_id] * lognormal(0, 1) for org_id, v_org in enz_vars.items() for vi in v_org}

        sol = solver.solve(objective, minimize=True, constraints=constraints)

        if sol.status == Status.OPTIMAL:
            sol = CommunitySolution(community, sol.values)
            sols.append(sol)

    return sols
Beispiel #6
0
def lognormal_samples(incubation_period,
                      infectivity_period,
                      infection_to_death_period,
                      basic_reproduction_number,
                      runs):
  
    # Computes mean and std for a lognormal distribution
    alpha_inv_params = make_lognormal_params_95_ci(*incubation_period)
    gamma_inv_params = make_lognormal_params_95_ci(*infectivity_period)
    delta_inv_params = make_lognormal_params_95_ci(*infection_to_death_period)
    R0__params = make_lognormal_params_95_ci(*basic_reproduction_number)

    # samples for a lognormal distribution (Monte Carlo Method)
    # alpha
    incubation_rate = 1 / npr.lognormal(*map(np.log, alpha_inv_params), runs)
    # gamma
    infectivity_rate = 1 / npr.lognormal(*map(np.log, gamma_inv_params), runs)
    #infectivity_rate = 1/ npr.gamma(97.1875, 3.7187, runs)
    # beta = r0 * gamma
    contamination_rate = npr.lognormal(*map(np.log, R0__params), runs) * infectivity_rate
    infection_to_death_rate = 1 / npr.lognormal(*map(np.log, delta_inv_params), runs)

    ret = (incubation_rate, infectivity_rate, contamination_rate, infection_to_death_rate)

    return ret
Beispiel #7
0
    def matrices(UP_list, CF, variables_techno, variables_interv):

        MA = lil_matrix((len(UP_list), len(UP_list)))
        MB = lil_matrix((CF.shape[1], len(UP_list)))

        if len(variables_techno["lognormal"]) > 0:
            MA[array(variables_techno["lognormal"][:, 0], int),
               array(variables_techno["lognormal"][:, 1], int)] = sign(
                   variables_techno["lognormal"][:, 2]) * random.lognormal(
                       array(variables_techno["lognormal"][:, 3], float32),
                       array(variables_techno["lognormal"][:, 4], float32))
        if len(variables_techno["normal"]) > 0:
            MA[array(variables_techno["normal"][:, 0], int),
               array(variables_techno["normal"][:, 1], int)] = random.normal(
                   array(variables_techno["normal"][:, 2], float32),
                   array(variables_techno["normal"][:, 3], float32))
        if len(variables_techno["triangle"]) > 0:
            MA[array(variables_techno["triangle"][:, 0], int),
               array(variables_techno["triangle"][:, 1], int)] = sign(
                   variables_techno["triangle"][:, 2]) * random.triangular(
                       array(variables_techno["triangle"][:, 3], float32),
                       abs(array(variables_techno["triangle"][:, 2], float32)),
                       array(variables_techno["triangle"][:, 4], float32))
        if len(variables_techno["deterministe"]) > 0:
            MA[array(variables_techno["deterministe"][:, 0], int),
               array(variables_techno["deterministe"][:, 1], int
                     )] = variables_techno["deterministe"][:, 2]

        if len(variables_interv["lognormal"]) > 0:
            MB[array(variables_interv["lognormal"][:, 0], int),
               array(variables_interv["lognormal"][:, 1], int)] = sign(
                   variables_interv["lognormal"][:, 2]) * random.lognormal(
                       array(variables_interv["lognormal"][:, 3], float32),
                       array(variables_interv["lognormal"][:, 4], float32))
        if len(variables_interv["normal"]) > 0:
            MB[array(variables_interv["normal"][:, 0], int),
               array(variables_interv["normal"][:, 1], int)] = random.normal(
                   array(variables_interv["normal"][:, 2], float32),
                   array(variables_interv["normal"][:, 3], float32))
        if len(variables_interv["triangle"]) > 0:
            MB[array(variables_interv["triangle"][:, 0], int),
               array(variables_interv["triangle"][:, 1], int)] = sign(
                   variables_interv["triangle"][:, 2]) * random.triangular(
                       array(variables_interv["triangle"][:, 3], float32),
                       abs(array(variables_interv["triangle"][:, 2], float32)),
                       array(variables_interv["triangle"][:, 4], float32))
        if len(variables_interv["agregated"]) > 0:
            MB[array(variables_interv["agregated"][:, 0], int),
               array(
                   variables_interv["agregated"][:, 1], int
               )] = variables_interv["agregated"][:, 2] * random.lognormal(
                   array(variables_interv["agregated"][:, 3], float32),
                   array(variables_interv["agregated"][:, 4], float32))
        if len(variables_interv["deterministe"]) > 0:
            MB[array(variables_interv["deterministe"][:, 0], int),
               array(variables_interv["deterministe"][:, 1], int
                     )] = variables_interv["deterministe"][:, 2]

        return MA, MB
Beispiel #8
0
    def solve(self, Halos):

        import numpy.random as npr
        from Objects import RtoD

        n_data = Halos.number_of_halos
        IP = Halos.InputParam

        # --- set up Lx , Tx scaling relations
        LxS = IP.LxScaling  # Lxm_Parameters
        TxS = IP.TxScaling  # Txm_Parameters

        # calcualte evolution factor
        E = Evolution_factor(IP, Halos.Z_red)

        # calculate mean-log-Lx + scatter
        Ep = Evolution_factor(IP, LxS.z_p)
        Mp = LxS.M_p * IP.h_0

        Halos.lgLx = np.log(LxS.Norm) + LxS.M_slope * np.log(Halos.M500/Mp) +\
                     LxS.E_slope * np.log(E/Ep) + npr.normal(0.0, LxS.sig, n_data)
        Halos.lgLx /= np.log(10.0)
        Halos.lgLx += -44.

        # calculate mean-log-Tx + scatter
        Ep = Evolution_factor(IP, TxS.z_p)
        Mp = TxS.M_p * IP.h_0

        Halos.lgT = np.log(TxS.Norm) + TxS.M_slope*np.log(Halos.M500/Mp) +\
                    TxS.E_slope*np.log(E/Ep) + npr.normal(0.0,TxS.sig,n_data)
        Halos.lgT /= np.log(10.0)

        # calculate core radius, flux, and beta
        for i in range(n_data):

            fx = IP.xray_band.Luminosity2FluxWithCube(Halos.Z_red[i],
                                                      Halos.lgT[i],
                                                      Halos.lgLx[i])
            Halos.lgFx[i] = np.log10(fx + 1e-40)

            Rcbar = RtoD * (IP._param['xc_bar'] * Halos.R500[i]) * (
                1.0 + Halos.Z_red[i]) / Halos.pd[i]
            Halos.Rc[i] = npr.lognormal(np.log(Rcbar), IP._param['xc_sig'])

            betaBarC = np.log(IP._param['SB_beta_bar']) +\
                       IP._param["xc_Beta_r"] * IP._param['SB_beta_sig'] / IP._param['xc_bar'] *\
                       (np.log(Halos.Rc[i]) - np.log(Rcbar))

            sigBeta = np.sqrt(
                1.0 - IP._param["xc_Beta_r"]**2) * IP._param['SB_beta_sig']
            Halos.beta[i] = npr.lognormal(betaBarC, sigBeta)

        print "Fluxes, Tempratures, and Luminosities are assigned successfully!"
        """
def task(w,lines):

    #- error catcher
    retries = []
    flag = 0

    #- retrieve worker id:
    w_id = int(multiprocessing.current_process().name[-2:].lstrip("-"))

    #- randomly select connection and user agent:
    proxy, ua, ua_ind  = match_id(w_id,w) 

    #- message:
    print 
    print "HELLO I AM WORKER #" + str(w_id)+" WORKING WITH "+proxy+" AND UA #"+ua_ind
    print "time: " + str(datetime.now().strftime("%H:%M:%S"))
    print 

    #- random big nap
    seed(w_id+random.randint(0,1000))
    time.sleep(min(14.3/w,lognormal(1.8/w,1.5)))

    #- get a cookie for multoproptax.com:
    cookie = make_cookie(proxy,ua)
    if cookie == "NO_COOKIE":
        print
        print "I DIDN'T GET A COOKIE :( GOODBYE!"
        print
        retries.extend(lines)
        return retries
  
    #- iterate through houses:
    for line in lines:

        #- random little nap
        time.sleep(min(7.4/w,lognormal(0.9/w,1.05)))

        house = get_house(line,cookie,proxy,ua)

        if house["NOTE"] == 'SUCCESS':
            filename = path + house["property_id"] + '.p'
            pickle.dump( house, open( filename, "wb" ) )
            print 'house #'+str(line[17])+': '+house["NOTE"]

        elif house["NOTE"] in ['CONNECTION_PROBLEM','SUSPICIOUS_RESP','OOPS']:
            retries.append(line)
            print 'house #'+str(line[17])+': '+house["NOTE"]+' with proxy'+proxy
            flag +=1
            if flag > 7: return retries

        else:
            print 'house #'+str(line[17])+' with id '+str(line[0])+' : '+house["NOTE"]

    return retries
Beispiel #10
0
def simulate(R_pre, R_post, instant):
    print ".",
    sys.stdout.flush()

    # empirical tau values to get ~3 day doubling

    tau = 10 if instant else 5

    # counts of infectious people and deaths per day

    infect = [0 for i in range(1000)]
    deaths = [0 for i in range(1000)]

    # remember how many new cases we created each day

    cases = [0 for i in range(50)]

    # dirty startup with 1000 cases at March 1
    # ringing appears to dissipate by day 10
    # TODO: check nothing horrid lurking here

    infect[0] = 1000

    # run for 50 days

    for t in range(50):
        # number of new cases to create: spread R evenly across tau days

        cases[t] = int(infect[t] * (R_pre if t < 23 else R_post) / tau)

        # create the cases

        for i in range(cases[t]):
            # pick infection-to-onset and onset-to-death from lognormal distributions
            # TODO: surely these aren't independent variables

            onset = lognormal(INCUBATION_MU, INCUBATION_SIGMA)
            death = lognormal(ONSETDEATH_MU, ONSETDEATH_SIGMA)

            # increment tau infectious people counters starting either at
            # t+1 (Cheianov model), or at t+onset-1

            for j in range(tau):
                infect[t + max(1, int(0 if instant else onset) - 1) + j] += 1

            # increment appropriate death counter

            deaths[t + int(onset + death)] += 1

    # return day of peak, and per-day case counts normalised to maximum

    return max([(deaths[i], i) for i in range(1000)
                ])[1], [float(n) / max(cases) for n in cases]
Beispiel #11
0
 def __call__(self, x, p=1.0):
     batch, _, height, width = x.shape
     centering = inverse_translation(-height / 2 + 0.5, -width / 2 + 0.5)
     affine = indentity @ centering
     condition = uniform(size=batch) < p
     scale = lognormal(sigma=self.scale, size=batch)
     isotropic_scaling = stack([
         inverse_scaling(s, s) if c else indentity
         for c, s in zip(condition, scale)
     ])
     affine = affine @ isotropic_scaling
     condition = uniform(size=batch) < 1 - sqrt(1 - p)
     theta = uniform(low=-self.rotation, high=self.rotation, size=batch)
     pre_rotation = stack([
         inverse_rotation(t) if c else indentity
         for c, t in zip(condition, theta)
     ])
     affine = affine @ pre_rotation
     condition = uniform(size=batch) < p
     scale = lognormal(sigma=self.scale, size=batch)
     anisotropic_scaling = stack([
         inverse_scaling(1 / s, s) if c else indentity
         for c, s in zip(condition, scale)
     ])
     affine = affine @ anisotropic_scaling
     condition = uniform(size=batch) < 1 - sqrt(1 - p)
     theta = uniform(low=-self.rotation, high=self.rotation, size=batch)
     post_rotation = stack([
         inverse_rotation(t) if c else indentity
         for c, t in zip(condition, theta)
     ])
     affine = affine @ post_rotation
     condition = uniform(size=batch) < p
     th = height * normal(scale=self.translation, size=batch)
     tw = width * normal(scale=self.translation, size=batch)
     translation = stack([
         inverse_translation(h, w) if c else indentity
         for c, h, w in zip(condition, th, tw)
     ])
     affine = affine @ translation
     inverse_centering = inverse_translation(height / 2 - 0.5,
                                             width / 2 - 0.5)
     affine = affine @ inverse_centering
     affine = self.xp.asarray(affine)
     indices = self.xp.indices((height, width), dtype=self.xp.float32)
     ones = self.xp.ones((1, height, width), dtype=self.xp.float32)
     coordinate = self.xp.concatenate((indices, ones)).transpose(1, 2, 0)
     resampling_coordinate = affine.reshape(
         batch, 1, 3, 3) @ coordinate.reshape(1, height * width, 3, 1)
     resampling_grid = resampling_coordinate.reshape(
         batch, height, width, 3).transpose(3, 0, 1, 2)[0:2]
     return self.lanczos_resampling(x, resampling_grid)
Beispiel #12
0
def hilleret_model2(switch_no_increase_energy, Ngen, sigmafit, mufit, E_th, En_impact_eV, thresh_low_energy):
    
    if switch_no_increase_energy==0:
        en_eV=lognormal(mufit,sigmafit,Ngen);
        flag_above_th=(en_eV>E_th);
        Nabove_th=sum(flag_above_th);
        
        while Nabove_th>0:
            en_eV[flag_above_th]=lognormal(mufit,sigmafit,Nabove_th);
            flag_above_th=(en_eV>E_th);
            Nabove_th=sum(flag_above_th);
  
    
    
    
    if switch_no_increase_energy==1:
        
        en_eV=0*En_impact_eV;
        
        
        flag_low_energy = En_impact_eV<thresh_low_energy
        flag_high_energy = ~(flag_low_energy)
        N_low_ene=sum(flag_low_energy)
        N_high_ene=sum(flag_high_energy)
        
        #generate low energy
        en_eV_le=randn(N_low_ene)   #in eV   
        flag_negat=logical_or(en_eV_le<0., en_eV_le>4.)
        N_neg=sum(flag_negat);
        while(N_neg>0):
            en_eV_le[flag_negat]=randn(N_neg)  #in eV
            flag_negat=logical_or(en_eV_le<0., en_eV_le>4.)
            N_neg=sum(flag_negat);
        sigma_le=En_impact_eV[flag_low_energy]/4.
        en_eV_le=(en_eV_le+2.)*sigma_le
    
        
        #generate high energy
        en_eV_he=lognormal(mufit,sigmafit,N_high_ene);
        flag_above_th=logical_or(en_eV_he>E_th,(en_eV_he-En_impact_eV[flag_high_energy])>0)
        Nabove_th=sum(flag_above_th);
        
        while Nabove_th>0:
            en_eV_he[flag_above_th]=lognormal(mufit,sigmafit,Nabove_th);
            flag_above_th=logical_or(en_eV_he>E_th,(en_eV_he-En_impact_eV[flag_high_energy])>0)
            Nabove_th=sum(flag_above_th);
            
        en_eV[flag_high_energy]=en_eV_he
        en_eV[flag_low_energy]=en_eV_le
    
    return en_eV
Beispiel #13
0
def hilleret_model2(switch_no_increase_energy, Ngen, sigmafit, mufit, E_th, En_impact_eV, thresh_low_energy):
    
    if switch_no_increase_energy==0:
        en_eV=lognormal(mufit,sigmafit,Ngen);
        flag_above_th=(en_eV>E_th);
        Nabove_th=sum(flag_above_th);
        
        while Nabove_th>0:
            en_eV[flag_above_th]=lognormal(mufit,sigmafit,Nabove_th);
            flag_above_th=(en_eV>E_th);
            Nabove_th=sum(flag_above_th);
  
    
    
    
    if switch_no_increase_energy==1:
        
        en_eV=0*En_impact_eV;
        
        
        flag_low_energy = En_impact_eV<thresh_low_energy
        flag_high_energy = ~(flag_low_energy)
        N_low_ene=sum(flag_low_energy)
        N_high_ene=sum(flag_high_energy)
        
        #generate low energy
        en_eV_le=randn(N_low_ene)   #in eV   
        flag_negat=logical_or(en_eV_le<0., en_eV_le>4.)
        N_neg=sum(flag_negat);
        while(N_neg>0):
            en_eV_le[flag_negat]=randn(N_neg)  #in eV
            flag_negat=logical_or(en_eV_le<0., en_eV_le>4.)
            N_neg=sum(flag_negat);
        sigma_le=En_impact_eV[flag_low_energy]/4.
        en_eV_le=(en_eV_le+2.)*sigma_le
    
        
        #generate high energy
        en_eV_he=lognormal(mufit,sigmafit,N_high_ene);
        flag_above_th=logical_or(en_eV_he>E_th,(en_eV_he-En_impact_eV[flag_high_energy])>0)
        Nabove_th=sum(flag_above_th);
        
        while Nabove_th>0:
            en_eV_he[flag_above_th]=lognormal(mufit,sigmafit,Nabove_th);
            flag_above_th=logical_or(en_eV_he>E_th,(en_eV_he-En_impact_eV[flag_high_energy])>0)
            Nabove_th=sum(flag_above_th);
            
        en_eV[flag_high_energy]=en_eV_he
        en_eV[flag_low_energy]=en_eV_le
    
    return en_eV
Beispiel #14
0
def run_SEIR_BAYES_model(
        N: 'population size', E0: 'init. exposed population',
        I0: 'init. infected population', R0: 'init. removed population',
        R0__params: 'repr. rate mean and std',
        gamma_inv_params: 'removal rate mean and std',
        alpha_inv_params: 'incubation rate mean and std',
        fator_subr: 'subreporting factor, multiples I0 and E0',
        t_max: 'numer of days to run', runs: 'number of runs'):

    I0 = fator_subr * I0
    E0 = fator_subr * E0
    S0 = N - (I0 + R0 + E0)
    t_space = np.arange(0, t_max)

    size = (t_max, runs)

    S = np.zeros(size)
    E = np.zeros(size)
    I = np.zeros(size)
    R = np.zeros(size)

    S[0, ], E[0, ], I[0, ], R[0, ] = S0, E0, I0, R0

    R0_ = npr.lognormal(*map(np.log, R0__params), runs)
    gamma = 1 / npr.lognormal(*map(np.log, gamma_inv_params), runs)
    alpha = 1 / npr.lognormal(*map(np.log, alpha_inv_params), runs)
    beta = R0_ * gamma

    for t in t_space[1:]:

        SE = npr.binomial(S[t - 1, ].astype('int'),
                          expon(scale=1 / (beta * I[t - 1, ] / N)).cdf(1))
        EI = npr.binomial(E[t - 1, ].astype('int'),
                          expon(scale=1 / alpha).cdf(1))
        IR = npr.binomial(I[t - 1, ].astype('int'),
                          expon(scale=1 / gamma).cdf(1))

        dS = 0 - SE
        dE = SE - EI
        dI = EI - IR
        dR = IR - 0

        S[t, ] = S[t - 1, ] + dS
        E[t, ] = E[t - 1, ] + dE
        I[t, ] = I[t - 1, ] + dI
        R[t, ] = R[t - 1, ] + dR

    return S, E, I, R, t_space
Beispiel #15
0
    def sampleTheParameterFromPrior(self, sampled_models):
        ret = []

        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

            reti = [
                0 for it in range(self.models[sampled_models[i]].nparameters)
            ]

            for n in range(self.models[sampled_models[i]].nparameters):
                if self.models[sampled_models[i]].prior[n][0] == 0:
                    reti[n] = self.models[sampled_models[i]].prior[n][1]

                if self.models[sampled_models[i]].prior[n][0] == 1:
                    reti[n] = rnd.normal(
                        loc=self.models[sampled_models[i]].prior[n][1],
                        scale=numpy.sqrt(
                            self.models[sampled_models[i]].prior[n][2]))

                if self.models[sampled_models[i]].prior[n][0] == 2:
                    reti[n] = rnd.uniform(
                        low=self.models[sampled_models[i]].prior[n][1],
                        high=self.models[sampled_models[i]].prior[n][2])

                if self.models[sampled_models[i]].prior[n][0] == 3:
                    reti[n] = rnd.lognormal(
                        mean=self.models[sampled_models[i]].prior[n][1],
                        sigma=numpy.sqrt(
                            self.models[sampled_models[i]].prior[n][2]))

            ret.append(reti[:])

        return [x[:] for x in ret]
Beispiel #16
0
def life_dist(mean=0, sigma=50):
    # Sandberg: We used a lognormal distribution for the life emergence rate (log lambda ~ N(0,50)) and then
    # transformed it into a probability as fLife = 1-exp(-lambda).
    result = 0
    while result == 0:
        result = 1 - nexp(-lognormal(mean, sigma))
    return result
Beispiel #17
0
 def _create_data(self, *names):
     numpoints = self.numpoints
     plotdata = ArrayPlotData(times=create_dates(numpoints))
     for name in names:
         plotdata.set_data(
             name, cumprod(random.lognormal(0.0, 0.04, size=numpoints)))
     self.plotdata = plotdata
def get_dist_num(args):
    dist = args[0]

    for i in range(len(args[1:])):
        args[i + 1] = float(args[1:][i])

    if dist == 'EXP':
        return exponential(args[1])
    elif dist == 'NOR':
        return normal(loc=args[1],
                      scale=args[2])  # loc = média , scale = desvio
    elif dist == 'TRI':
        return triangular(args[1], args[2], args[3])
    elif dist == 'UNI':
        return uniform(low=args[1], high=args[2])
    elif dist == 'BET':
        return beta(args[1], args[2])
    elif dist == 'WEI':
        return weibull(args[1])
    elif dist == 'CAU':  # CAU: Cauchy
        return 0
    elif dist == 'CHI':
        return chisquare(args[1])
    elif dist == 'ERL':  # ERL: Erlang
        return 0
    elif dist == 'GAM':
        return gamma(args[1], scale=args[2])
    elif dist == 'LOG':
        return lognormal(mean=args[1], sigma=args[2])
    elif dist == 'PAR':
        return pareto(args[1])
    elif dist == 'STU':
        return standard_t(args[1])
Beispiel #19
0
class defo_calibration:
    def __init__(
            self,
        (beta, TFP, gamma, theta, costEXP, costTFP, mu, sigma, landtoemissions,
         paymentstodollars),
        (gridLand, gridPayments, initialState, tol),
    ):
        # Initialises code with parameters
        self.beta = beta  # discount rate
        self.TFP = TFP  # TFP on ag output
        self.gamma = gamma  # output share land
        self.theta = theta  # utility parameter
        self.costEXP = costEXP  # exponent on cost function
        self.costTFP = costTFP  # multiplicative constant on cost fn
        self.mu = mu  # mean of defo error
        self.sigma = sigma  # standard deviation of defo error
        # parameter conversion factors
        self.landtoemissions = landtoemissions
        self.paymentstodollars = paymentstodollars
        # constructing grid
        self.shocks = lognormal(mu, sigma, 50)
        self.grid1 = gridLand
        self.grid2 = gridPayments
        grid = meshgrid(self.grid2, self.grid1)
        grid[0], grid[1] = grid[1], grid[0]
        self.gridLand = grid[0]
        self.gridPayments = grid[1]
        self.state = array(grid).reshape(2, -1).T
        self.maxLand = max(gridLand)  # maximum amount of landholdings
        self.minLand = min(gridLand)  # minimum amount of landholdings
        self.initialState = initialState  # initial state value for sequences
        self.tol = tol  # tolerance for fitted value iteration
    def account_id_formula(column_values):
        account_segment = data_gen.datasets['account_segment']
        account_id = account_segment['account_id']
        account_count = account_segment['account_count']
        current_account_cat = account_segment['current_account_cat']

        if column_values[
                'account_cat'] == current_account_cat and account_count > 0:
            # continue with the current account_id if there are still any to take
            # but first decrement account count
            account_count += -1
            account_segment['account_count'] = account_count

            return account_id
        else:
            # use new account id
            account_id += 1
            # generate a random number of opportunties to associate to an account
            account_count = int(round(lognormal(1))) + randint(1, 7)
            current_account_cat = column_values['account_cat']

            # update account segment dataset for next iteration
            account_count += -1
            account_segment['account_id'] = account_id
            account_segment['account_count'] = account_count
            account_segment['current_account_cat'] = current_account_cat

            return account_id
    def sampleTheParameterFromPrior(self, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

			reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 0: 
					reti[n]=self.models[ sampled_models[i] ].prior[n][1]

				if self.models[ sampled_models[i] ].prior[n][0] == 1: 
					reti[n]=rnd.normal( loc=self.models[ sampled_models[i] ].prior[n][1],
										scale=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
		
				if self.models[ sampled_models[i] ].prior[n][0] == 2: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],
										 high=self.models[ sampled_models[i] ].prior[n][2])

				if self.models[ sampled_models[i] ].prior[n][0] == 3: 
					reti[n]=rnd.lognormal(mean=self.models[ sampled_models[i] ].prior[n][1],
										  sigma=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
								  
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])
			
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
						reti[n]=self.models[ sampled_models[i] ].prior[n][1]     

				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
						reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],high=self.models[ sampled_models[i] ].prior[n][2])

						
			begincount = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					mean_n[begincount] = reti[n]
					begincount = begincount + 1
					
			begincountv = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
					var_n[begincountv]  = reti[n]
					begincountv = begincountv + 1

			count = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
					#reti[n]= mean_n[count]
					reti[n]=stats.truncnorm( (0 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],(1000000 - mean_n[int(self.models[ sampled_models[i] ].prior[n][2])])/var_n[int(self.models[ sampled_models[i] ].prior[n][2])],loc=mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],scale=var_n[int(self.models[ sampled_models[i] ].prior[n][2])] ).rvs(1)
					count = count + 1 
	     
            
			ret.append( reti[:] )

            
        return [x[:] for x in ret]
    def generate(self, reference_sequence, read_index):
        if self.distribution == "normal":
            self.fragment_length = int(random.gauss(self.mean, self.sigma))
        elif self.distribution == "uniform":
            self.fragment_length = int(random.uniform(self.min_size, self.max_size))
        elif self.distribution == "lognormal":
            self.fragment_length = int(
                lognormal(self.mean, self.sigma)
            )  # one sample at a time to conform with the implementation...

        if self.fragment_length >= len(reference_sequence):
            raise Exception(
                "To short reference sequence length for \
                simulated read. \nRead fragment: {0}\nTranscript \
                length:{1}".format(
                    self.fragment_length, len(reference_sequence)
                )
            )

        self.start_pos = random.randrange(len(reference_sequence) - self.fragment_length)
        self.read1 = reference_sequence[self.start_pos : self.start_pos + self.read_length]
        self.read2 = reverse_complement(
            reference_sequence[
                self.start_pos + self.fragment_length - self.read_length : self.start_pos + self.fragment_length
            ]
        )
        self.reference_accession = "reference_genome"
        self.read_index = read_index
Beispiel #23
0
    def sprink_noeff(self, name, property=None):
        ffile = rcsv('{}.ful'.format(name), sep=',')
        fire_site, ases = self.mc_rand(ffile)

        config = ffile.iloc[fire_site]

        fuel_xes = (config.XA, config.XB)
        fuel_yes = (config.YA, config.YB)
        fuel_zes = (config.ZA, config.ZB)

        hrrpua = triangular(config.hrrpua_min, config.hrrpua_max, mode=config.hrrpua_mode) * 1000  # [kW]

        if not property:
            alpha = triangular(config.alpha_min, config.alpha_max, mode=config.alpha_mode)  # [kW/s2]
        elif property == 'store':
            alpha = hrrpua * random.lognormal(-9.72, 0.97)   # [kW/s2]

        # q_0 = min(alpha * config.t_sprink ** 2, self.a_max * hrrpua)    # [kW]
        q_0 = alpha * config.t_sprink ** 2  # [kW]

        area = q_0 / hrrpua     # [m2]

        print('alpha:{}, hrrpua:{}'.format(alpha, hrrpua))
        hrr = []
        for t_frag in range(0, 120):
            t = self.t_end * t_frag/120

            if t >= config.t_sprink:
                #                                 [min], [kW/s2 * 1k * s2]=[MW]
                hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (config.t_sprink ** 2)]])
            else:
                hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (t ** 2)]])

        return hrr, area, fuel_zes, fuel_xes, fuel_yes, hrrpua, alpha
Beispiel #24
0
def make_dataset1():
    '''Make a dataset of single samples with labels from which distribution they come from'''
    # now lets make some samples 
    lns      = min_max_scale(lognormal(size=bsize)) #log normal
    powers   = min_max_scale(power(0.1,size=bsize)) #power law
    norms    = min_max_scale(normal(size=bsize))    #normal
    uniforms = min_max_scale(uniform(size=bsize))    #uniform
    # add our data together
    data = np.concatenate((lns,powers,norms,uniforms))
    
    # concatenate our labels
    labels = np.concatenate((
        (np.repeat(LOGNORMAL,bsize)),
        (np.repeat(POWER,bsize)),
        (np.repeat(NORM,bsize)),
        (np.repeat(UNIFORM,bsize))))
    tsize = len(labels)
    
    # make sure dimensionality and types are right
    data = data.reshape((len(data),1))
    data = data.astype(np.float32)
    labels = labels.astype(np.int32)
    labels = labels.reshape((len(data),))
    
    return data, labels, tsize
Beispiel #25
0
def imf(model, n):
    '''Initial mass function'''
    i = 0
    s = []
    M0 = 1.0
    M = [1, 10]
    beta = 2.35
    if model == 'A':
        #Uniform star mass
        return [M0 for _ in range(n)]
    elif model == 'B':
        #Salpeter mass distribution
        while i != n:
            rnd = random()
            sM = (((1.0 - rnd) / (M[0]**(beta - 1.0))) +
                  (rnd / (M[1]**(beta - 1.0))))**((-1.0) / (beta - 1.0))
            s.append(sM)
            if 10 > sM > 1:
                i += 1
        return s
    elif model == 'C':
        #Lognormal mass distribution
        for _ in range(n):
            sM = lognormal() + 1
            s.append(sM)
            if 10 > sM > 1:
                i += 1
        return s
Beispiel #26
0
 def broadcast(self, node, network):
     for receiver in network.getProcesses():
         event = ReceiveEvent(self, receiver, node, network)
         latency = (random.lognormal(
             0.8, 0.5)) * 10  # See README for latency explanation
         delay = self.skew + latency
         network.addToQueue(event, delay)
Beispiel #27
0
def main():
    import sys
    from PyQt5.QtCore import Qt
    from PyQt5.QtGui import QPainter
    from PyQt5.QtWidgets import QApplication, QMainWindow
    from numpy import random

    app = QApplication(sys.argv)

    rand = random.lognormal(size=100, mean=10, sigma=0.6)
    rand = rand.clip(min=1.1)

    chart = XChartProbit()
    chartView = QChartView(chart)

    chart.loadSeries(rand, "Log-Normal Rand")

    #chartView.chart.addLinearReg()

    chartView.setRenderHint(QPainter.Antialiasing)
    window = QMainWindow()
    window.setCentralWidget(chartView)
    window.resize(800, 600)
    window.show()

    #chartView.addLinearReg("Log-Normal Rand")

    sys.exit(app.exec_())
Beispiel #28
0
    def alfa_t2(self, name, property=None):
        ffile = rcsv('{}.ful'.format(name), sep=',')
        fire_site = self.mc_rand(ffile)
        config = ffile.iloc[fire_site]

        fuel_xes = (config.XA, config.XB)
        fuel_yes = (config.YA, config.YB)
        fuel_zes = (config.ZA, config.ZB)

        hrrpua = triangular(config.hrrpua_min, config.hrrpua_max, mode=config.hrrpua_mode) * 1000     # kW/m2

        if not property:
            alpha = triangular(config.alpha_min, config.alpha_max, mode=config.alpha_mode)      # kW/s2
        elif property == 'store':
            alpha = hrrpua * random.lognormal(-9.72, 0.97)       # kW/s2

        area = min(config.hrr_max / hrrpua * 1000, self.a_max)     # m2
        area = config.hrr_max / hrrpua * 1000  # m2

        print('alpha:{}, hrrpua:{}'.format(alpha, hrrpua))
        hrr = []
        for t_frag in range(0, 120):
            t = self.t_end * t_frag/119
            hrr.extend([round(i, 4) for i in [t/60, alpha / 1000 * (t ** 2)]])
            if hrr[-1] > area * hrrpua:
                hrr[-1] = area * hrrpua

        return hrr, area, fuel_zes, fuel_xes, fuel_yes, hrrpua, alpha
Beispiel #29
0
 def alpha(self, hrrpua):
     if not self.occ:
         return triangular(self.config.alpha_min,
                           self.config.alpha_max,
                           mode=self.config.alpha_mode)  # [kW/s2]
     elif self.occ == 'store':
         return hrrpua * random.lognormal(-9.72, 0.97)  # [kW/s2]
def resample_counts(X, depth, kappa=1):
    mu = depth * closure(X)
    n_samples = len(X)
    new_samples = np.vstack([
        poisson(lognormal(np.log(mu[i, :]), kappa)) for i in range(n_samples)
    ])
    return new_samples
Beispiel #31
0
def get_value_from_distribution(params):
    params_distribution = params["distribution"]
    result = 0
    if params_distribution == "normal":
        loc = params["loc"]
        scale = params["scale"]
        result = ran.normal(loc, scale)
    elif params_distribution == "exponential":
        scale = params["scale"]
        result = ran.exponential(scale)
    elif params_distribution == "fixed":
        result = params["value"]
    elif params_distribution == "poisson":
        result = ran.poisson(params["mean"])
    elif params_distribution == "lognormal":
        result = ran.lognormal(params["mean"], params["sigma"])
    elif params_distribution == "binned_uniform":
        #binned uniform distribution
        #find bin
        ran_num = [ ran.random() ]
        bin_value = np.digitize(ran_num, params["cumulative_p"])[0]
        result = ran.uniform(params["bins"][bin_value], params["bins"][bin_value+1])
    elif params_distribution == "choice":
        result = ran.choice(params["selection"], p=params["p"])


    else:
        print "error: unknown distribution."
        exit(1)
    return result
Beispiel #32
0
    def _container_default(self):
        self.plot = None

        # Create the data and datasource objects
        # In order for the date axis to work, the index data points need to
        # be in units of seconds since the epoch.  This is because we are using
        # the CalendarScaleSystem, whose formatters interpret the numerical values
        # as seconds since the epoch.
        numpoints = 500
        index = create_dates(numpoints)

        returns = random.lognormal(0.00, 0.04, size=numpoints)
        average = 100.0 * cumprod(returns)
        high = average + abs(random.normal(0, 20.0, size=numpoints))
        low = average - abs(random.normal(0, 20.0, size=numpoints))
        delta = high - low
        open = low + delta * random.uniform(0.05, 0.95, size=numpoints)
        close = low + delta * random.uniform(0.05, 0.95, size=numpoints)
        price = vstack((open, high, low, close, average))

        time_ds = ArrayDataSource(index)
        price_ds = PriceDataSource(price, sort_order="none")

        # Create the price plot
        price_plot = self._create_plot(time_ds, price_ds)
        self.plot = price_plot

        container = OverlayPlotContainer(padding=35)
        container.add(price_plot)
        return container
Beispiel #33
0
def replace_text_with_uncertainties(input_text, lognormal_replacement_values=None,
                                    normal_replacement_values=None,
                                    switch_replacement_values=None):
    """
    takes text for a batch script with keywords for uncertainty and replaces them
    with random values.

    input_text = text to replace values
    lognormal_replacement_values = dict with key as text to replace and value as the sigma of lognormal distribution
    normal_replacement_values =  dict with key as text to replace and value as the sigmal of normal distribution
    switch_replacement_values = dict containting tuple of (string, probability) allowing weight-based categorical decisions

    Returns a tuple with the first item the input_text and the second item a list of the random values inserted.
    """
    random_list = {}
    if lognormal_replacement_values==None:
        lognormal_replacement_values = {'***u_sigma***': 0.2,
                                        '***u_ljalpha***': 0.5,
                                        '***u_negfreq***': 0.2,
                                        '***u_ilt***': 5,
                              }
    if normal_replacement_values==None:
        normal_replacement_values = {'***u_E0***': 10,
                                     '***u_ljn***': 0.15,
                                     }
    if switch_replacement_values == None:
        switch_replacement_values = {'***u_method***':[('reservoir state',.75),('modified strong collision',.25)]}
    # get all replaced values
    str_triggers = [key for keys in [lognormal_replacement_values.keys(), normal_replacement_values.keys(), switch_replacement_values.keys()] for key in keys]
    for key in str_triggers:
        random_list[key] = []
    
    
    
    for key, value in lognormal_replacement_values.items():
        while key in input_text:
            random_value = rd.lognormal(0, value)
            random_list[key].append(random_value)
            input_text=input_text.replace(key, str(random_value), 1)
            
    for key, value in normal_replacement_values.items():
        while key in input_text:
            random_value = rd.normal(0, value)
            random_list[key].append(random_value)
            input_text=input_text.replace(key, str(random_value), 1)
        
    for key, value in switch_replacement_values.items():
        # normalize cutoff values and use that to decide which string should be replaced
        cutoff_values = np.cumsum([_tuple[1] for _tuple in value])
        #normalize to one
        cutoff_values /= cutoff_values[-1]
        while key in input_text:
            random_value = rd.uniform()
            for index, cutoff_value in enumerate(cutoff_values):
                if random_value < cutoff_value:
                    random_value = value[index][0]
                    random_list[key].append(random_value)
                    input_text=input_text.replace(key, random_value, 1)
                    break
    return input_text, random_list
Beispiel #34
0
def make_dataset1():
    '''Make a dataset of single samples with labels from which distribution they come from'''
    # now lets make some samples 
    lns      = min_max_scale(lognormal(size=bsize)) #log normal
    powers   = min_max_scale(power(0.1,size=bsize)) #power law
    norms    = min_max_scale(normal(size=bsize))    #normal
    uniforms = min_max_scale(uniform(size=bsize))    #uniform
    # add our data together
    data = np.concatenate((lns,powers,norms,uniforms))
    
    # concatenate our labels
    labels = np.concatenate((
        (np.repeat(LOGNORMAL,bsize)),
        (np.repeat(POWER,bsize)),
        (np.repeat(NORM,bsize)),
        (np.repeat(UNIFORM,bsize))))
    tsize = len(labels)
    
    # make sure dimensionality and types are right
    data = data.reshape((len(data),1))
    data = data.astype(np.float32)
    labels = labels.astype(np.int32)
    labels = labels.reshape((len(data),))
    
    return data, labels, tsize
Beispiel #35
0
    def sampleTheParameterFromPrior(self, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

            reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

            for n in range(self.models[ sampled_models[i] ].nparameters):
                if self.models[ sampled_models[i] ].prior[n][0] == 0: 
                    reti[n]=self.models[ sampled_models[i] ].prior[n][1]

                if self.models[ sampled_models[i] ].prior[n][0] == 1: 
                    reti[n]=rnd.normal( loc=self.models[ sampled_models[i] ].prior[n][1],
                                        scale=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
                
                if self.models[ sampled_models[i] ].prior[n][0] == 2: 
                    reti[n]=rnd.uniform( low=self.models[ sampled_models[i] ].prior[n][1],
                                         high=self.models[ sampled_models[i] ].prior[n][2])

                if self.models[ sampled_models[i] ].prior[n][0] == 3: 
                    reti[n]=rnd.lognormal(mean=self.models[ sampled_models[i] ].prior[n][1],
                                          sigma=numpy.sqrt(self.models[ sampled_models[i] ].prior[n][2]) )
            
            ret.append( reti[:] )
            
        return [x[:] for x in ret]
Beispiel #36
0
def main():
    import sys
    from PyQt5.QtCore import Qt
    from PyQt5.QtWidgets import QApplication, QMainWindow
    from numpy import random

    app = QApplication(sys.argv)

    rand = random.lognormal(size=50, mean=2, sigma=0.1)
    randn = random.normal(size=50, loc=10, scale=5)
    rand = rand.clip(min=1.1)
    randn = randn.clip(min=1.1)

    chartWid = widgetIDProbit()
    chartWid.chart.loadSeries(rand, "Log-Normal Rand")
    chartWid.chart.loadSeries(randn, "Normal Rand")
    chartWid.chart.redrawChart()

    #chartView.chart.addLinearReg()

    window = QMainWindow()
    window.setCentralWidget(chartWid)
    window.resize(800, 600)

    window.show()

    sys.exit(app.exec_())
Beispiel #37
0
def MiLognormal(mean,sd):
    """LogNormal Distribution Function
        mean: Mean
        sd: Standard Distribution"""
    global manflag
    if not manflag:
        setManual()
    return np.lognormal(mean,sd,1)
Beispiel #38
0
def ScannC():  
	if not NUMEROS_ALEATORIOS_COMUNES:
		mu = 95.4
		sig = 84.7
		xi2 = log((sig/mu)**2+1)
		cambio_lambda = log(mu)-0.5*xi2
		return min(18 + lognormal(cambio_lambda, sqrt(xi2)), max_esperar_examen)
	else:
		return next(generador_ScannC)
Beispiel #39
0
def _create_prices(t):
    last_average = 100 if t==0 else source.data['average'][-1]
    returns = asarray(lognormal(mean.value, stddev.value, 1))
    average =  last_average * cumprod(returns)
    high = average * exp(abs(gamma(1, 0.03, size=1)))
    low = average / exp(abs(gamma(1, 0.03, size=1)))
    delta = high - low
    open = low + delta * uniform(0.05, 0.95, size=1)
    close = low + delta * uniform(0.05, 0.95, size=1)
    return open[0], high[0], low[0], close[0], average[0]
Beispiel #40
0
 def mk_rnd_pfuncs(self):
     rndpars = self.pars.copy()
     for k,v in self.fuzz.items():
         if v[0] == 'exponential':
             rndpars[k] = rndpars[k] + random.exponential(v[1])
         elif v[0] == 'normal':
             rndpars[k] = random.normal(rndpars[k], v[1])
         elif v[0] == 'lognormal':
             rndpars[k] = rndpars[k] + random.lognormal(0, v[1])
     return self.mk_pfuncs(rndpars)
Beispiel #41
0
    def sampleTheParameter(self, sampled_models):
        if self.debug == 2:print "\t\t\t***sampleTheParameter"
        ret = []

        for i in range(self.nbatch):
            np = self.models[ sampled_models[i] ].nparameters
            reti = [ 0 for it in range(np) ]
            #print '\n\t\t\tsampleTheParameter, model np prior:', sampled_models[i], self.models[ sampled_models[i] ].name, np, self.models[ sampled_models[i] ].prior
            
            prior_prob = -1
            while prior_prob <= 0 :

				# sample putative particle from previous population
				p = sample_particle(self.nparticles, sampled_models[i], self.margins_prev, self.model_prev, self.weights_prev )
			
				for nn in range(np):
					#print reti[nn], self.parameters_prev[ p ][nn]
					reti[nn] = self.parameters_prev[ p ][nn]

					
				prior_prob = self.perturbfn( reti, self.models[ sampled_models[i] ].prior, self.kernels[sampled_models[i]], self.kernel_type, self.special_cases[sampled_models[i]] )
				mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
				var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

				begincount = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 4: 
						mean_n[begincount] = reti[n]
						begincount = begincount + 1
						
				begincountv = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 6: 
						var_n[begincountv]  = reti[n]
						begincountv = begincountv + 1

				count = 0
				for n in range(self.models[ sampled_models[i] ].nparameters):
					if self.models[ sampled_models[i] ].prior[n][0] == 5: 
						#reti[n]= mean_n[count]
						pos = int(self.models[ sampled_models[i] ].prior[n][2])
						#print "pos", pos
						#print "var", var_n[pos]
						reti[n]=rnd.lognormal( mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],abs(var_n[0]),1)[0] 
						count = count + 1 

				if self.debug == 2:print "\t\t\tsampled p prob:", prior_prob
				if self.debug == 2:print "\t\t\tnew:", reti
				if self.debug == 2:print "\t\t\told:", self.parameters_prev[p]

            ret.append( reti )

        return [x[:] for x in ret]
 def landSequenceGenerator(self, sequenceLength, baselinePolicy):
     """
     takes an optimal policy and an initial pair.
     returns a path of landholdings
     """
     landsequence = [self.initialState[0]]
     shocksequence = lognormal(self.mu, self.sigma, sequenceLength)
     for t in range(sequenceLength):
         landsequence.append(
             self.newpolicy(landsequence[-1], shocksequence[t]))
     self.landsequence = array(landsequence)
     self.emissionssequence = self.landtoemissions * array(landsequence)
def compute_sorption(sorption_dict, sorption_values, pocet):
    """
    computes new sorption values for substances stored in sorption dict
    """
    result = {}
    for subst_nr, subst_sorption in sorption_dict.iteritems():
        f_subst_sorption = float(subst_sorption)
        sigma = sorption_values[subst_nr]
        values = lognormal(log(f_subst_sorption), sigma, pocet)
        result[subst_nr] = values

    return result
Beispiel #44
0
 def install_lognormal(self):
     '''
     Installs the file system with a Lognormal distribution
     with the specified mean and standard deviation
     '''
     while opCounter<=osSettings.nOperations:
         fileSize = int(random.lognormal(fsDist.mean,fsDist.stdDev))    
         #filesystem.create_file(fileSize)
         if fileSize>maxSize: #TODO: meter aqui la condicion de crear exitosamente el archivo
             maxSize = maxSize
             fsSize = fsSize + fileSize
             opCounter = opCounter +1
Beispiel #45
0
def correlation():
    output_file("correlation.html", title="correlation.py example")

    hold()

    num_points = 300

    now = time.time()
    dt = 24*3600 # days
    dates = linspace(now, now + num_points*dt, num_points)
    acme = cumprod(random.lognormal(0.0, 0.04, size=num_points))
    choam = cumprod(random.lognormal(0.0, 0.04, size=num_points))

    line(dates, acme,
         x_axis_type = "datetime",
         color='#1F78B4', tools="pan,zoom,resize", legend='ACME')
    line(dates, choam, color='#FB9A99', legend='CHOAM')

    curplot().title = "Stock Returns"
    xgrid()[0].grid_line_dash=""
    xgrid()[0].grid_line_alpha=0.3
    ygrid()[0].grid_line_dash=""
    ygrid()[0].grid_line_alpha=0.3

    figure()

    scatter(
        acme, choam,
        color='#A6CEE3', radius=3,
        tools="pan,zoom,resize", legend='close',
        name="correlation"
    )

    curplot().title = "ACME / CHOAM Correlations"
    xgrid()[0].grid_line_dash=""
    xgrid()[0].grid_line_alpha=0.3
    ygrid()[0].grid_line_dash=""
    ygrid()[0].grid_line_alpha=0.3

    return curplot()
Beispiel #46
0
def _create_data(numpoints):
    index = arange(numpoints)

    returns = random.lognormal(0.00, 0.04, size=numpoints)
    average = 100.0 * cumprod(returns)
    high = average + abs(random.normal(0, 20.0, size=numpoints))
    low = average - abs(random.normal(0, 20.0, size=numpoints))
    delta = high - low
    open = low + delta * random.uniform(0.05, 0.95, size=numpoints)
    close = low + delta * random.uniform(0.05, 0.95, size=numpoints)
    sorted_vals = vstack((open, high, low, close, average))
    sorted_vals.sort(0)
    return index, sorted_vals
def compute_conductivity(material_type_spec, sigma, pocet):
    """
    computes new hydraulic conductivity value, for all directions
    using log normal distribution
    """
    result = []

    for direction_value in material_type_spec:
        hydraulic_cond = float(direction_value)
        val = lognormal(log(hydraulic_cond), sigma, pocet)
        result.append(val)

    return result
Beispiel #48
0
    def UpdateLatents(self, this_model_parameters, sampled_models):
        ret = []
 
        for i in range(self.nbatch):
            #print "sampleTheParameterFromPrior", i, sampled_models[i], self.models[ sampled_models[i] ].name, self.models[ sampled_models[i] ].nparameters

			reti = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			mean_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]
			var_n = [ 0 for it in range(self.models[ sampled_models[i] ].nparameters) ]

			begincount = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 4: 
					mean_n[begincount] = this_model_parameters[i][n]
					begincount = begincount + 1
					
			begincountv = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 6: 
					var_n[begincountv]  = reti[n]
					begincountv = begincountv + 1

			count = 0
			for n in range(self.models[ sampled_models[i] ].nparameters):
				if self.models[ sampled_models[i] ].prior[n][0] == 0:
					reti[n]= this_model_parameters[i][n]
			
				if self.models[ sampled_models[i] ].prior[n][0] == 1:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 2:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 3:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 4:
					reti[n]= this_model_parameters[i][n]
				
				if self.models[ sampled_models[i] ].prior[n][0] == 5: 
					#reti[n]= mean_n[count]
					reti[n]=rnd.lognormal( mean_n[int(self.models[ sampled_models[i] ].prior[n][2])],var_n[0],1)[0]
					count = count + 1 
					
				if self.models[ sampled_models[i] ].prior[n][0] == 6:
					reti[n]= this_model_parameters[i][n]
	
			ret.append( reti[:] )

            
        return [x[:] for x in ret]
Beispiel #49
0
def draw_lensparam(n,thetaE):
    """Draw ellipticity, shear, shear angle and source positions.

    Draw n sets of lens parameters from the given distributions (see Oguri &
    Marshall 2010)
    Arguments:
    n -- number of sets to draw
    thetaE -- typical Einstein radius of the lens
    Outputs:
    e -- ellipticity
    gamma -- shear strength
    theta_g -- shear angle
    x,y -- position of the source in arcseconds
    """
    e = rand.normal(0.3,0.16,n)
    for i in range(n):
        while ((e[i]<0.0) or (e[i]>0.9)):
            e[i] = rand.normal(0.3,0.16)

    # Note that lognormal takes as parameters the mean and sigma of the 
    # _underlying_ normal distribution.
    # This implies the following relation between mu and sigma (to feed to
    # rand.lognormal) and M and S (from the Oguri & Marshall article)
    M = 0.05
    S = 0.2
    sigma = np.sqrt(np.log(S/M**2+1))
    mu = np.log(M)-sigma**2/2
    gamma = rand.lognormal(mu,sigma,n)
    for i in range(n):
        while (gamma[i]>1.0):
            gamma[i] = rand.lognormal(mu,sigma)

    theta_g = rand.uniform(0,180,n)

    x = rand.uniform(-thetaE,thetaE,n)
    y = rand.uniform(-thetaE,thetaE,n)

    return e,gamma,theta_g,x,y
Beispiel #50
0
    def initPopularidad(self, numNoticiaInicio, numNoticiaFinal):
        self.noticias = []
        self.probabilidades = []
        mu = -0.10
        sigma = 2.43
        acumulacion = 0
        for i in range(0, numNoticiaFinal-numNoticiaInicio):
        	self.noticias.append(random.randint(numNoticiaInicio,numNoticiaFinal))
        	aux = rand.lognormal(mu,sigma)
        	acumulacion = acumulacion + aux
        	self.probabilidades.append(acumulacion)
        for i in range(0, numNoticiaFinal-numNoticiaInicio):
            self.probabilidades[i] = self.probabilidades[i]/acumulacion

        self.probabilidades.sort()
        return self.probabilidades,self.noticias
def compute_single_property(method, coeficient, sigma, pocet):
    """
    computes new property value - for storativty, porosity or geometry_spec
    using log normal distribution
    """
    met_dic = {
        "storativity": solver_utils.round_storativity,
        "porosity": solver_utils.round_porosity,
        "geometry_spec": solver_utils.round_to_positive_zero,
    }
    f_coeficient = float(coeficient)
    values = lognormal(log(f_coeficient), sigma, pocet)
    result = []
    for val in values:
        result.append(met_dic[method](val))
    return result
Beispiel #52
0
    def _create_window(self):

        # Create the data and datasource objects
        # In order for the date axis to work, the index data points need to
        # be in units of seconds since the epoch.  This is because we are using
        # the CalendarScaleSystem, whose formatters interpret the numerical values
        # as seconds since the epoch.
        numpoints = 500
        index = create_dates(numpoints)
        returns = random.lognormal(0.01, 0.1, size=numpoints)
        price = 100.0 * cumprod(returns)
        volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)

        time_ds = ArrayDataSource(index)
        vol_ds = ArrayDataSource(volume, sort_order="none")
        price_ds = ArrayDataSource(price, sort_order="none")

        # Create the price plots
        price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)
        price_plot.index_mapper.domain_limits = (index[0], index[-1])
        self.price_plot = price_plot
        self.mini_plot = mini_plot

        # Create the volume plot
        vol_plot = self._create_vol_plot(time_ds, vol_ds)
        vol_plot.index_mapper.domain_limits = (index[0], index[-1])

        # Set the plot's bottom axis to use the Scales ticking system
        ticker = ScalesTickGenerator(scale=CalendarScaleSystem())
        for plot in price_plot, mini_plot, vol_plot:
            bottom_axis = PlotAxis(plot, orientation="bottom",
                                   tick_generator = ticker)
            plot.overlays.append(bottom_axis)
            plot.overlays.append(PlotAxis(plot, orientation="left"))
            hgrid, vgrid = add_default_grids(plot)
            vgrid.tick_generator = bottom_axis.tick_generator

        container = VPlotContainer(bgcolor = "lightgray",
                                   spacing = 40,
                                   padding = 50,
                                   fill_padding=False)
        container.add(mini_plot, vol_plot, price_plot)

        return Window(self, -1, component=container)
Beispiel #53
0
def make_widedataset(width=width):
    # we're going to make rows of 40 features unsorted
    wlns      = min_max_scale(lognormal(size=(bsize,width))) #log normal
    wpowers   = min_max_scale(power(0.1,size=(bsize,width))) #power law
    wnorms    = min_max_scale(normal(size=(bsize,width)))    #normal
    wuniforms = min_max_scale(uniform(size=(bsize,width)))    #uniform
    
    wdata = np.concatenate((wlns,wpowers,wnorms,wuniforms))
    
    # concatenate our labels
    wlabels = np.concatenate((
        (np.repeat(LOGNORMAL,bsize)),
        (np.repeat(POWER,bsize)),
        (np.repeat(NORM,bsize)),
        (np.repeat(UNIFORM,bsize))))
    
    joint_shuffle(wdata,wlabels)
    wdata = wdata.astype(np.float32)
    wlabels = wlabels.astype(np.int32)
    wlabels = wlabels.reshape((len(data),))
    return wdata, wlabels
def sample_lognormal(parameter_ic, size, cv=0.25):
    """

    :param parameter_ic:
    :param size:
    :param cv:
    :return:
    """
    mean = np.log(parameter_ic.value)
    cv = cv
    if parameter_ic.name == 'C3_0':
        cv = 0.282
    elif parameter_ic.name == 'XIAP_0' or parameter_ic == 'Bid_0':
        cv = 0.288
    elif parameter_ic.name == 'Bax_0':
        cv = 0.271
    else:
        parameter_ic.name == 'Bcl2_0'

    sd = cv
    return lognormal(mean, sd, size)
Beispiel #55
0
def main(mean, sigma):
    a = [str(int(x)) for x in rd.lognormal(mean, sigma, SIZE)]
    with open("/tmp/lognormal.txt", "w") as f:
        print >> f, " ".join(a)

    make_table_cmd = "./iproute2/netem/maketable /tmp/lognormal.txt \
        > /usr/lib/tc/lognormal.dist"
    stats_cmd = "./iproute2/netem/stats /tmp/lognormal.txt | \
        awk '{if ($1 ==\"mu\" || $1 == \"sigma\") print $3}' \
        > /tmp/musigma.txt"   

    os.system(make_table_cmd)
    os.system(stats_cmd)

    with open("/tmp/musigma.txt") as f:
        mu = f.readline().split()[0]
        sigma = f.readline().split()[0]
        print ("mean = %s, sd = %s" % (mu, sigma))

    tc_cmd = "tc qdisc replace dev %s root netem delay \
        %sms %sms distribution lognormal" % (device, mu, sigma)
    print(tc_cmd)
    os.system(tc_cmd)
Beispiel #56
0
 def getInstance(self):
     dist = self.dist
     p = self.params
     small_correction = random.random() * 0.001
     if dist == 'exponential':
         return random.exponential(p[0]) + small_correction
     elif dist == 'normal':
         return random.normal(p[0],p[1]) + small_correction
     elif dist == 'uniform':
         return random.uniform(p[0],p[1]) + small_correction
     elif dist == 'poisson':
         return random.poisson(p[0]) + small_correction
     elif dist == 'binomial':
         return random.binomial(p[0],p[1]) + small_correction
     elif dist == 'geometric':
         return random.geometric(p[0]) + small_correction
     elif dist == 'weibull':
         return random.weibull(p[0]) + small_correction
     elif dist == 'gamma':
         return random.gamma(p[0],p[1]) + small_correction
     elif dist == 'beta':
         return random.beta(p[0],p[1]) + small_correction
     elif dist == 'lognormal':
         return random.lognormal(p[0],p[1]) + small_correction
Beispiel #57
0
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot

from numpy import cumprod, linspace, random
import time

from bokeh.sampledata.stocks import AAPL, FB, GOOG, IBM, MSFT
from bokeh.plotting import *


num_points = 300

now = time.time()
dt = 24*3600 # days
dates = linspace(now, now + num_points*dt, num_points)
acme = cumprod(random.lognormal(0.0, 0.04, size=num_points))
choam = cumprod(random.lognormal(0.0, 0.04, size=num_points))

output_server("correlation")

figure(x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")

hold()

line(dates, acme, color='#1F78B4', legend='ACME')
line(dates, choam, color='#FB9A99', legend='CHOAM')

curplot().title = "Stock Returns"
grid().grid_line_alpha=0.3

figure(tools="pan,wheel_zoom,box_zoom,reset,previewsave")
def log_normal(nsamp):
    return rand.lognormal(size=nsamp)
Beispiel #59
0
def static_traffic_matrix(topology, mean, stddev, max_u=0.9, 
                          origin_nodes=None, destination_nodes=None):
    """
    Return a TrafficMatrix object, i.e. a single traffic matrix, representing
    the traffic volume exchanged over a network at a specific point in time
    
    This matrix is generated by assigning traffic volumes drawn from a 
    lognormal distribution and assigned to specific origin-destination pairs
    using the Ranking Metrics Heuristic method proposed by Nucci et al. [1]_
    
    Parameters
    ----------
    topology : topology
        The topology for which the traffic matrix is calculated. This topology
        can either be directed or undirected. If it is undirected, this 
        function assumes that all links are full-duplex.
    
    mean : float
        The mean volume of traffic among all origin-destination pairs
    
    stddev : float
        The standard deviation of volumes among all origin-destination pairs.
    
    max_u : float, optional
        Represent the max link utilization. If specified, traffic volumes are
        scaled so that the most utilized link of the network has an utilization
        equal to max_u. If None, volumes are not scaled, but in this case links
        may end up with an utilization factor greater than 1.0
    
    origin_nodes : list, optional
        A list of all nodes which can be traffic sources. If not specified,
        all nodes of the topology are traffic sources
    
    destination_nodes : list, optional
        A list of all nodes which can be traffic destinations. If not 
        specified, all nodes of the topology are traffic destinations
        
    Returns
    -------
    tm : TrafficMatrix
    
    References
    ----------
    .. [1] Nucci et al., The problem of synthetically generating IP traffic 
       matrices: initial recommendations, ACM SIGCOMM Computer Communication 
       Review, 35(3), 2005
    """
    try:
        mean = float(mean)
        stddev = float(stddev)
    except ValueError:
        raise ValueError('mean and stddev must be of type float')
    if mean < 0 or stddev < 0:
        raise ValueError('mean and stddev must be not negative')
    topology = topology.copy() if topology.is_directed() \
               else topology.to_directed()
    volume_unit = topology.graph['capacity_unit']
    mu = log(mean**2/sqrt(stddev**2 + mean**2))
    sigma = sqrt(log((stddev**2/mean**2) + 1))
    if origin_nodes is None and destination_nodes is None:
        od_pairs = od_pairs_from_topology(topology)
    else:
        all_nodes = topology.nodes()
        origins = origin_nodes if origin_nodes is not None \
                  else all_nodes
        destinations = destination_nodes if destination_nodes is not None \
                       else all_nodes
        od_pairs = [(o, d) for o in origins for d in destinations if o != d]
    nr_pairs = len(od_pairs)
    volumes = sorted(lognormal(mu, sigma, size=nr_pairs))
    #volumes = sorted([lognormvariate(mu, sigma) for _ in range(nr_pairs)])
    if any(isinf(vol) for vol in volumes):
        raise ValueError('Some volumes are too large to be handled by a '\
                         'float type. Set a lower value of mu and try again.')
    sorted_od_pairs = __ranking_metrics_heuristic(topology, od_pairs)
    # check if the matrix matches and scale if needed
    assignments = dict(zip(sorted_od_pairs, volumes))
    if max_u is not None:
        if origin_nodes is not None:
            shortest_path = dict(
                    (node, nx.single_source_dijkstra_path(topology,
                                                          node, 
                                                          weight='weight'))
                    for node in origin_nodes)
            # remove OD pairs not connected
            for o in shortest_path:
                for d in destinations:
                    if o != d and d not in shortest_path[o]:
                        od_pairs.remove((o, d))
        else:
            shortest_path = nx.all_pairs_dijkstra_path(topology, 
                                                       weight='weight')
        for u, v in topology.edges_iter():
            topology.edge[u][v]['load'] = 0.0
        # Find max u
        for o, d in od_pairs:
            path = shortest_path[o][d]
            if len(path) > 1:
                for hop in range(len(path) - 1):
                    topology.edge[path[hop]][path[hop + 1]]['load'] \
                            += assignments[(o, d)]
        # Calculate scaling
        current_max_u = max((float(topology.edge[u][v]['load']) \
                             /float(topology.edge[u][v]['capacity']) 
                             for u, v in topology.edges_iter()))
        norm_factor = max_u/current_max_u
        for od_pair in assignments:
            assignments[od_pair] *= norm_factor
            
    # write to traffic matrix
    traffic_matrix = TrafficMatrix(volume_unit=volume_unit)
    for (o, d), flow in assignments.items():
        traffic_matrix.add_flow(o, d, flow)
    return traffic_matrix
Beispiel #60
0

def bin(row):
    return np.histogram(row,bins=len(row),range=(0.0,1.0))[0]/float(len(row))

print "Apply the histogram to all the data rows"
bdata = np.apply_along_axis(bin,1,wdata).astype(np.float32)
blabels = wlabels

# ensure we have our test data
test_bdata = np.apply_along_axis(bin,1,test_wdata).astype(np.float32)
test_blabels = test_wlabels

# helper data 
enum_funcs = [
    (LOGNORMAL,"log normal",lambda size: lognormal(size=size)),
    (POWER,"power",lambda size: power(0.1,size=size)),
    (NORM,"normal",lambda size: normal(size=size)),
    (UNIFORM,"uniforms",lambda size: uniform(size=size)),
]

# uses enum_funcs to evaluate PER CLASS how well our classify operates
def classify_test(bnet,ntests=1000):
    for tup in enum_funcs:
        enum, name, func = tup
        lns = min_max_scale(func(size=(ntests,width))) #log normal
        blns = np.apply_along_axis(bin,1,lns).astype(np.float32)
        blns_labels = np.repeat(enum,ntests)
        blns_labels.astype(np.int32)
        classification = bnet.classify(blns)
        print "%s %s / %s ::: %s " % (name,sum(classification == blns_labels),ntests, collections.Counter(classification))