def param_grid(feat_selec, mod_type, hyper_selc, rand_search_iter):
    # Look into making PCA, Kbest and l1 dynamic
    feat_param = {
        'PCA': {'PCA__n_components': [5, 20, 300] if hyper_selc == 'Grid' else unint(5, 30, rand_search_iter)},
        'KBest': {'KBest__k': [5, 20, 30] if hyper_selc == 'Grid' else unint(5, 30, rand_search_iter)},
        'L1': {'L1__max_features': [5, 20, 30] if hyper_selc == 'Grid' else unint(5, 30, rand_search_iter)}}

    mod_param = {'gbc': {'gbc__learning_rate': [0.1, 0.005, 0.001],
                         'gbc__n_estimators': [50, 100, 200],
                         'gbc__min_samples_split': [0.1, 0.3, 0.6, 0.8]},
                 'rf': {'rf__max_depth': [5, 8, 12, 15, 25] if hyper_selc == 'Grid' else unint(4, 25, rand_search_iter),
                        'rf__n_estimators': [30, 50, 80, 100, 130] if hyper_selc == 'Grid' else unint(30, 150,
                                                                                                      rand_search_iter),
                        'rf__max_features': [0.2, 0.5, 0.8] if hyper_selc == 'Grid' else roundnp(
                            uni(0.1, 0, rand_search_iter)),
                        'rf__min_samples_split': [2, 8, 10, 15, 20, 30] if hyper_selc == 'Grid' else unint(2, 30,
                                                                                                           rand_search_iter)},
                 'lr': {'lr__penalty': ['l2', 'l1', 'elasticnet'],
                        'lr__alpha': [0.0001, 0.0002, 0.0005],
                        'lr__max_iter': [3, 5, 7, 9]},
                 'svm': {'svm__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000] if hyper_selc == 'Grid' else roundnp(
                     uni(0.0001, 1000, rand_search_iter)),
                         'svm__gamma': [0.01, 0.1, 1, 10, 100] if hyper_selc == 'Grid' else roundnp(
                             uni(0.0001, 1000, rand_search_iter)),
                         'svm__kernel': ['linear', 'poly', 'rbf']}
                 }
    final_parm = feat_param[feat_selec]
    final_parm.update(mod_param[mod_type])
    return final_parm
def delta_EF_asym(mu, e, comp, f, p, alpha=None, max_ave_H=1):
    """computes the EF with asymptotic f, f(N) = f_i*H_i*N_i/(N_i+H_i)
    
    For more information see S10
    H_i is uniformly distributed in [0,2*ave_H]
    
    Input
        mu, e, comp, f, p:
            As in output of rand_par
        alpha: optional
            Is not needed. They are just used s.t. one can
            run delta_EF_asym
        max_ave_H: scalar, optional
            maximum for the average of H, maximum over all communities
        
    returns: 
        deltaEF/EF: array
            Array containing 100*deltaEF/EF, asymptotic contribution to EF"""
    num = len(alpha)  #number of species
    # choose distributions of H: H ~u[0,2*ave]
    temp = uni(0, max_ave_H, 3)
    gam = {'avb': temp[0], 'avu': temp[1], 'avc': temp[2]}
    temp = uni(-1 / sqrt, 1 / sqrt, 3)
    gam.update({'tb': temp[0], 'tu': temp[1], 'tc': temp[2]})
    H = lambda x,t: gam['av'+t]*(1+gam['t'+t]*sqrt*x)\
                    *mu['av'+t]*(1+mu['t'+t]*x*sqrt)
    #asymptotic EF in N, f(N) = f_i*H_i*N_i/(N_i+H_i)
    #change to consider different contribution to function
    eco_fun = lambda x,t, N: f['av'+t]*(1+f['t'+t]*x*sqrt)*H(x,t)*N(x,t)\
                                   /(N(x,t)+H(x,t))

    # growthrates in different sites
    mu_ref = lambda x, t: mu['av' + t] * (1 + x * sqrt * mu['t' + t])
    mu_change = lambda x,t: mu['av'+t]*(1+x*sqrt*mu['t'+t])*\
                            (1-e['av'+t]*(1+e['t'+t]*sqrt*x))
    # computes the equilibrium densities of species N, in changed and ref site
    N = lambda x, t, mu, avmu: (mu(x, t) - comp * avmu) / (1 + alpha)
    N_ref = lambda x, t: N(x, t, mu_ref, p * mu['avb'] + (1 - p) * mu['avu'])
    N_change = lambda x, t: N(x, t, mu_change, mu['av_change'])

    # integrate over all species for EF
    x_simp = np.array(num * [np.linspace(-1, 1, 51)])  #x_axes
    y_ref = {
        'b': eco_fun(x_simp.T, 'b', N_ref).T,
        'u': eco_fun(x_simp.T, 'u', N_ref).T
    }  #y_values in ref
    y_cha = {
        'b': eco_fun(x_simp.T, 'b', N_change).T,
        'c': eco_fun(x_simp.T, 'c', N_change).T
    }  #y_values in change
    # compute the EF
    EF_ref = n * (p * simps(y_ref['b'], x_simp) +
                  (1 - p) * simps(y_ref['u'], x_simp))
    EF_change = n * (p * simps(y_cha['b'], x_simp) +
                     (1 - p) * simps(y_cha['c'], x_simp))
    return 100 * (EF_change - EF_ref) / EF_ref  #multiply by 100 for percent
def experiment_n(start, end, step, trials, eps, c, m, sigma):

    upper_bound = end + step
    error = np.zeros(shape=(int((end - start) / step) + 1, 4, trials))

    indices = np.zeros(shape=((upper_bound - start) / step, 1))

    for n in np.arange(start, upper_bound, step):
        print n
        indices[(n - start) / step] = n
        row = int((n - start) / step)
        n0 = int(n * c)
        n1 = int(n * (1 - c))

        for i in range(trials):
            l = uni(high=m)
            data = nor(loc=l, scale=m / 2.0, size=n)
            tcm_data = data[:n0]
            loc_data = data[n0:]
            mean = np.mean(data)
            full_loc_err1 = (mean - lapLM(data, eps, m))**2
            error[row][0][i] += full_loc_err1

            l = uni(high=m)
            data = nor(loc=l, scale=m / 6.0, size=n)
            tcm_data = data[:n0]
            loc_data = data[n0:]
            mean = np.mean(data)
            full_loc_err2 = (mean - lapLM(data, eps, m))**2
            error[row][1][i] += full_loc_err2

            l = uni(high=m)
            data = nor(loc=l, scale=m / 10.0, size=n)
            tcm_data = data[:n0]
            loc_data = data[n0:]
            mean = np.mean(data)
            full_loc_err3 = (mean - lapLM(data, eps, m))**2
            error[row][2][i] += full_loc_err3

            full_loc_ana = fullLM_err(eps, m, n)
            error[row][3][i] += full_loc_ana

    error = np.mean(error, axis=2)
    error = np.sqrt(error)
    error = np.hstack((indices, error))
    # error = np.log10(error)

    print error
    header = "n, k = 2, k = 6, k = 10, analyt"
    np.savetxt("fullLM_plot.csv", error, header=header, delimiter=",")
Exemplo n.º 4
0
def Thermalize(
):  # function to calculate number of collisions required to thermalize.
    NumCol = 0
    E_n = StartingE
    while E_n > ThermalEnergy:
        NumCol += 1
        E_loss_factor = uni(alpha, 1)
        E_n = E_n * E_loss_factor
    return NumCol
Exemplo n.º 5
0
def experiment_n(start, end, step, trials, eps, c, m, sigma):

    upper_bound = end + step
    error = np.zeros(shape=(int((end - start) / step) + 1, 3, trials))

    indices = np.zeros(shape=((upper_bound - start) / step, 1))

    for n in np.arange(start, upper_bound, step):
        print n
        c = np.log(n) / n
        indices[(n - start) / step] = n
        row = int((n - start) / step)
        n0 = int(n * c)
        n1 = int(n * (1 - c))

        for i in range(trials):
            l = uni(high=m)
            data = nor(loc=l, scale=sigma, size=n)
            data = m * (data / float(max(data)))
            # data = truncate(data, 0.0, 1.0)
            tcm_data = data[:n0]
            loc_data = data[n0:]
            mean = np.mean(data)

            only_tcm_err = (mean - tcm(tcm_data, eps, m))**2
            error[row][0][i] = only_tcm_err

            full_loc_err = (mean - lapLM(data, eps, m))**2
            error[row][1][i] += full_loc_err

            top_coeff = (c**2) * n
            top_par_first = (eps**2) * (sigma**2)
            top_par_sec = 2 * (m**2)
            bot_first = c * n * (eps**2) * (sigma**2)
            bot_coeff = 2 * (m**2)
            bot_par = ((c**2) * n) + 1 - c
            top = top_coeff * (top_par_first + top_par_sec)
            bot = bot_first + bot_coeff * bot_par
            w1 = top / bot

            hybrid_err2 = (mean - hybrid(tcm_data, loc_data, eps, m, c, w1))**2
            error[row][2][i] += hybrid_err2

    error = np.mean(error, axis=2)
    error = np.hstack((indices, error))
    # error = np.log10(error)

    print error
    header = "n, OnlyTCM, FullLM, Hybrid"
    np.savetxt("squared_error_" + str(eps) + "_" + str(c) + ".csv",
               error,
               header=header,
               delimiter=",")
def delta_EF_asym(ave, t_e, t_mu, comp, t_f, n, alpha=None, max_ave_H=1):
    """computes the EF with asymptotic f, f(N) = f_i*H_i*N_i/(N_i+H_i)
    
    For more information see S10
    H_i is uniformly distributed in [0,2*ave_H]
    
    Input
        ave, t_e, t_mu, t_f, comp,n:
            As in output of rand_par
        alpha: optional
            Is not needed. They are just used s.t. one can
            run delta_EF_asym
    returns: 
        deltaEF/EF: array
            Array containing 100*deltaEF/EF"""
    num = len(ave)  #number of communities
    # choose distribution of H: H ~u[0,2*ave]
    ave_H = uni(0, max_ave_H, num)
    t_H = uni(-1 / sqrt, 1 / sqrt, num)  #stdv/mean of H
    H = lambda x: ave_H * (1 + t_H * sqrt * x
                           )  #H_i for each species in a community

    #asymptotic EF in N, EF(N) = f_i*H_i*N_i/(N_i+H_i)
    #change to consider different contribution to function
    eco_fun = lambda x, N: n * (1 + t_f * x * sqrt) * H(x) * N(x) / (N(x) + H(
        x))

    # computes the equilibrium densities of species N, in changed and ref site
    N_ref = lambda x: (1 + t_mu * sqrt * x - comp) / (1 + alpha)
    N_change = lambda x: ((1+x*t_mu*sqrt)*(1-ave*(1+t_e*sqrt*x))-\
                comp*(1-ave*(1+t_mu*t_e)))/(1+alpha)

    # integrate over all species for EF
    x_simp = np.array(num * [np.linspace(-1, 1, 51)])  #x_axes
    y_ref = eco_fun(x_simp.T, N_ref).T  #y_values in ref
    y_change = eco_fun(x_simp.T, N_change).T  #y values in changed site
    EF_ref = simps(y_ref, x_simp)
    EF_change = simps(y_change, x_simp)
    return 100 * (EF_change - EF_ref) / EF_ref  #multiply by 100 for percent
Exemplo n.º 7
0
    def _update_next_time(self, time):
        # Invalidate the current entry
        self._invalidate_queue_entry()

        # Calculate the reaction propensity
        p = self.rate
        for m, s in self.participants:
            N = s.number
            for n in range(m):
                p *= N - n

        # Update the queue with the next reaction time only if the reaction
        # actually occurs
        if p > 0.:
            self.next_time = time - log(uni()) / p
            self._update_queue()
Exemplo n.º 8
0
    def _update_next_time(self, time):
        # Invalidate the current entry
        self._invalidate_queue_entry()

        # Calculate the reaction propensity
        p = self.rate
        for m, s in self.participants:
            N = s.number
            for n in range(m):
                p *= N - n

        # Update the queue with the next reaction time only if the reaction
        # actually occurs
        if p > 0.0:
            self.next_time = time - log(uni()) / p
            self._update_queue()
Exemplo n.º 9
0
def hybrid_online(tcm_data, loc_data, n, eps, m, c):
    tcm_mean = tcm(tcm_data, c * n, eps, m)
    run_mean = tcm_mean
    ptrue = np.exp(eps) / (np.exp(eps) + 1)
    probs = uni(size=(1 - c) * n)
    for i in range(len(loc_data)):
        if loc_data[i] < run_mean:
            if probs[i] < ptrue:
                run_mean += m / (c * n + i)
            else:
                run_mean -= m / (c * n + i)
        if loc_data[i] > run_mean:
            if probs[i] < ptrue:
                run_mean -= m / (c * n + i)
            else:
                run_mean += m / (c * n + i)
        # run_mean = truncate(run_mean, 0.0, 1.0)
    return run_mean
Exemplo n.º 10
0
    def __init__(self, robot: Robot):
        self.robot: Robot = robot

        # STATE MU
        self.mu = numpy.matrix(
            [[self.robot.x], [self.robot.y], [self.robot.theta]],
            dtype='float')
        self.mu_prediction = self.mu.copy()

        # MOTION MODEL VALUES
        self.u = numpy.matrix([[self.robot.v], [self.robot.w]], dtype='float')

        # STATE COVARIANCE ESTIMATE
        self.sigma = numpy.diag(
            (SETTINGS["INITIAL_COVARIANCE"], SETTINGS["INITIAL_COVARIANCE"],
             SETTINGS["INITIAL_COVARIANCE"]))
        self.sigma_prediction = self.sigma.copy()

        # UNCONTROLLED TRANSITION MATRIX A
        self.A = numpy.identity(3)

        # CONTROL TRANSITION MATRIX B
        self.B = numpy.matrix([[Robot.DELTA_T * math.cos(self.robot.theta), 0],
                               [Robot.DELTA_T * math.sin(self.robot.theta), 0],
                               [0, Robot.DELTA_T]],
                              dtype='float')

        # MOTION NOISE ESTIMATION
        self.R = numpy.matrix(
            [[uni(0, SETTINGS["MOTION_NOISE_ESTIMATION"]), 0, 0],
             [0, uni(0, SETTINGS["MOTION_NOISE_ESTIMATION"]), 0],
             [0, 0, uni(0, SETTINGS["MOTION_NOISE_ESTIMATION"])]],
            dtype='float')

        # MAPPING STATES TO OBSERVATIONS
        self.C = numpy.identity(3)

        # IDENTITY MATRIX
        self.I = numpy.identity(3)

        # SENSOR NOISE ESTIMATION
        self.Q = numpy.matrix(
            [[uni(0, SETTINGS["SENSOR_NOISE_ESTIMATION"]), 0, 0],
             [0, uni(0, SETTINGS["SENSOR_NOISE_ESTIMATION"]), 0],
             [0, 0, uni(0, SETTINGS["SENSOR_NOISE_ESTIMATION"])]],
            dtype='float')

        # STATE ESTIMATED FROM SENSOR DATA
        self.z = numpy.zeros((3, 1))

        # KALMAN GAIN
        self.K_trace = [0]
Exemplo n.º 11
0
def spawn_random_tiles(tiles):
    dimension = len(tiles)
    empty_tiles = []
    for i in range(dimension):
        for j in range(dimension):
            if not tiles[i][j]:
                empty_tiles.append((i, j))
    if len(empty_tiles) == 0:
        return None

    global number_of_spawns

    for i in range(min(number_of_spawns, len(empty_tiles))):
        rand_index = randint(len(empty_tiles))
        i, j = empty_tiles.pop(rand_index)
        # set value 2 or 4
        tiles[i][j] = 2 if uni() < 0.9 else 4

    return tiles
Exemplo n.º 12
0
def fit(confirmed0, death0, reopen_day_gov, n_0):
    np.random.seed()
    confirmed = confirmed0.copy()
    death = death0.copy()
    size = len(confirmed)
    # if metric2 != 0 or metric1 != 0:
    #     scale1 = pd.Series(np.random.normal(1, metric1, size))
    #     confirmed = [max(confirmed[i] * scale1[i], 1) for i in range(size)]
    #     scale2 = pd.Series(np.random.normal(1, metric2, size))
    #     death = [max(death[i] * scale2[i], 1) for i in range(size)]
    c_max = 0
    min_loss = 10000
    for reopen_day in range(reopen_day_gov, reopen_day_gov + 14):
        for c1 in np.arange(c1_range[0], c1_range[1], 0.01):
            # optimal = minimize(loss, [10, 0.05, 0.01, 0.1, 0.1, 0.1, 0.02], args=(c1, confirmed, death, n_0, SIDRG_sd),
            optimal = minimize(loss_combined, [uni(beta_range[0], beta_range[1]),
                                               uni(gammaE_range[0], gammaE_range[1]),
                                               uni(alpha_range[0], alpha_range[1]),
                                               uni(gamma_range[0], gamma_range[1]),
                                               uni(gamma2_range[0], gamma2_range[1]),
                                               uni(gamma3_range[0], gamma3_range[1]),
                                               uni(a1_range[0], a1_range[1]),
                                               uni(a2_range[0], a2_range[1]),
                                               uni(a3_range[0], a3_range[1]),
                                               uni(eta_range[0], eta_range[1]),
                                               uni(h_range[0], h_range[1]),
                                               uni(Hiding_init_range[0], Hiding_init_range[1]),
                                               uni(I_initial_range[0], I_initial_range[1])],
                               args=(c1, confirmed, death, n_0, reopen_day), method='L-BFGS-B',
                               bounds=[beta_range,
                                       gammaE_range,
                                       alpha_range,
                                       gamma_range,
                                       gamma2_range,
                                       gamma3_range,
                                       a1_range,
                                       a2_range,
                                       a3_range,
                                       eta_range,
                                       h_range,
                                       Hiding_init_range,
                                       I_initial_range])
            current_loss = loss_combined(optimal.x, c1, confirmed, death, n_0, reopen_day)
            if current_loss < min_loss:
                # print(f'updating loss={current_loss} with c1={c1}')
                min_loss = current_loss
                c_max = c1
                reopen_max = reopen_day
                beta = optimal.x[0]
                gammaE = optimal.x[1]
                alpha = optimal.x[2]
                gamma = optimal.x[3]
                gamma2 = optimal.x[4]
                gamma3 = optimal.x[5]
                a1 = optimal.x[6]
                a2 = optimal.x[7]
                a3 = optimal.x[8]
                eta = optimal.x[9]
                h = optimal.x[10]
                Hiding_init = optimal.x[11]
                I_initial = optimal.x[12]

    c1 = c_max
    reopen_day = reopen_max
    S = [n_0 * eta * (1 - Hiding_init)]
    E = [0]
    I = [n_0 * eta * I_initial * (1 - alpha)]
    A = [n_0 * eta * I_initial * alpha]
    IH = [0]
    IN = [I[-1] * gamma2]
    D = [death[0]]
    R = [0]
    G = [confirmed[0]]
    H = [n_0 * eta * Hiding_init]
    # H = [0]
    # Betas = [beta]

    result, [S, E, I, A, IH, IN, D, R, G, H, betas] \
        = simulate_combined(size, S, E, I, A, IH, IN, D, R, G, H, beta, gammaE, alpha, gamma, gamma2, gamma3, a1, a2,
                            a3, h, Hiding_init, eta, c1, n_0, reopen_day)
        

    # data1 = [(confirmed[i] - G[i]) / confirmed[i] for i in range(size)]
    # data2 = [(death[i] - D[i]) / death[i] for i in range(size)]

    size1 = reopen_day
    size2 = size - size1
    weights1 = [Geo ** n for n in range(size1)]
    weights1.reverse()
    weights2 = [Geo ** n for n in range(size2)]
    weights2.reverse()
    weights = weights1
    weights.extend(weights2)

    # weights = [Geo ** n for n in range(size)]
    # weights.reverse()

    # sum_wt = sum(weights)
    # metric1 = math.sqrt(sum([data1[i] ** 2 * weights[i] for i in range(size)])
    #                     /
    #                     ((size - 12) * sum_wt / size)
    #                     )
    # metric2 = math.sqrt(sum([data2[i] ** 2 * weights[i] for i in range(size)])
    #                     /
    #                     ((size - 12) * sum_wt / size)
    #                     )
    metric1 = weighted_relative_deviation(weights, confirmed, G, start_dev, num_para)
    metric2 = weighted_relative_deviation(weights, death, D, start_dev, num_para)

    r1 = r2_score(confirmed, G)
    r2 = r2_score(death, D)

    return [beta, gammaE, alpha, gamma, gamma2, gamma3, a1, a2, a3, eta, h, Hiding_init, c1, I_initial, metric1,
            metric2, r1, r2, reopen_day], min_loss
Exemplo n.º 13
0
def initialize(param_dict:dict=None):
    """
    Set up initial strains and phages
    Return: com (Community)
    """
    from Enums import Type

    from numpy.random import uniform as uni, randint
    
    if not param_dict is None:
        param_dict = dict() # will contain parameters already specified at the command line
        
        for arg in params:
            if arg in globals() and not globals()[arg] is None:
                param_dict[arg] = globals()[arg]

    # for arg,val in globals().items():
    #     if arg in params and not val is None:
    #         param_dict[arg] = val


    # if parameters aren't specified, draw them from distributions
    pS = param_dict.get( "pS", 10**-( uni(6,9) ) ) # default: random float 10^-5 - 10^-9
    b = param_dict.get( "b", uni(0.9, 2) )
    a = param_dict.get( "a", 10**( uni(5,8) ) )
    c = param_dict.get( "c", 10**-( uni(1,3) ) )
    f = param_dict.get( "f", 10**-( uni(5,7) ) )
    
    beta = param_dict.get( "beta", randint(50,200) ) # default: random int from 1-200
    adsp = param_dict.get( "adsp", 10**-( uni(7,9) ) )
    d = param_dict.get( "d", uni(0.0, 0.3) )
    m = param_dict.get( "m", 10**-( uni(6,9) ) )
    l = param_dict.get( "l", uni(0.0, 1.0) )

    popinit = param_dict.get( "popinit", 10**( uni(5,7) ) )
    phageinit = param_dict.get( "phageinit", 10**( uni(5,7) ))

    receptor1 = PhageReceptor.PhageReceptor( name = "r1" ) # change numbering system

    crispr0 = Crispr.Crispr()


    strain1 = Strain.Strain(
        name = "s0001",
        type_ = 'init',
        a=a,b=b,c=c,y=y,f=f,pS=pS,
        crispr = crispr0,
        phReceptors = {
            receptor1.name:receptor1
            },
        pop = popinit,
        evoTraits=strain_evolved
    )

    # strain2 = Strain.Strain(
    #     name = "s2",
    #     a=a,b=b,c=c,y=y,f=f,
    #     crispr = crispr0,
    #     phReceptors = {
    #         receptor1.name:receptor1
    #         },
    #     pop = popinit/100
    # )

    nameGenerator = gen.NameGenerator()

    protospacers = set()

    for i in range(8):

        protospacers.add( nameGenerator.generateName(Type.PROTO))

    phage1 = Phage.Phage(
        name = "p0001",
        adsp = adsp,beta = beta, d = d, m = m,
        receptor = receptor1,
        pop = phageinit,
        protospacers = protospacers,
        evoTraits=phage_evolved

    )

    phage2 = Phage.Phage(
        name = "p2",
        adsp = adsp,beta = beta, d = d, m = m*10,
        receptor = receptor1,
        pop = phageinit,
        protospacers = protospacers,
        evoTraits=phage_evolved

    )

    # spacer = strain2.crispr.makeSpacer("AGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGTAGT")
    # spacer = strain2.crispr.makeSpacer(phage1.genome)

    # strain2.crispr.addSpacer(spacer)

    # pop1 = Population.Population(
    #     name = "pop1",
    #     strains = {
    #         strain1.name: strain1
    #     } 
    # )

    com = Community.Community(
        c=c,l=l,
        strains = {
            strain1.name: strain1,
            # strain2.name: strain2
        },
        phages = {
            phage1.name: phage1,
            phage2.name: phage2
        },
        nameGenerator=nameGenerator,
        evoTraits=evolved_params,
        le = 0.2
    )

    com.summary = pd.Series(
        data = {
            "id": uuid.uuid1(),
            "pop":np.nan,
            "phage":np.nan,
            "immune":np.nan,
            "susceptible":np.nan,
            "richness":np.nan,
            "phageRichness":np.nan,
            "pS":pS,
            "b":b,
            "a":a, 
            "c":c,
            "f":f, 
            "beta":beta, 
            "adsp":adsp, 
            "d":d, 
            "m":m, 
            "l":l, 
            "popinit":popinit, 
            "phageinit":phageinit,
            "nodf":np.nan,
            "Q":np.nan,
            }        
    )
    return com 
def rand_par(e_min=-1,
             ave_min=-0.5,
             ave_max=0.5,
             e_max=1,
             p='rand',
             ad_com=0.005,
             num=100000):
    """ returns randomized parameters for num_com communities
    
    The function randomly generates num_com*(1+ad_com) communities until it
    finds num_com communities fullfilling all coexistence requirements. This
    method slightly shifts the distribution of alpha and e towards 0. The 
    error in the distribution is smaller than ad_com   
    Pleasse refer to supplementary data 7 to understand the code
    
    Input:
        num_com: scalar
            number of species to be generated
        p:  scalar or string
            Percent of species that are in ref and changed site (Type b species)
            p*n must be an integer or p ='rand'
            'rand will randomly generate values for p
            Note: p = 1 is NOT equivalent to coex.rand_par, because of the
            coexistence requirements.
        ave_max, ave_min: scalar<1, ave_min<=ave_max
            the maximum/minimum that are allowed for the
            average sensitivity of each species type
        e_max, e_min: scalar<1, e_min<=ave_min,ave_max<=e_max
            the maximum/minimum that are allowed for the sensitivity
            of each species (individually)
        ad_com: scalar
            Proportion of additionally computed communities.
    
    returns:
        mu: dict
            Contains all growth rates and some associated values
        e:  dict
            contains the sensitivity
        comp: array
            Relative competition
        f:  dict
            contains the per capita contributions
        p_ret:  array
            Percent of species that are in ref and changed site (Type b species)
            if p was a scalar, then p_ret = p*np.ones(num_com).
        alpha: array
            competition parameter        
        """
    #check input correctness
    if not (e_min <= ave_min <= ave_max <= e_max):
        raise InputError(
            "Please sort the input: e_min<=ave_min<=ave_max<=e_max")
    if e_max > 1:  #growth rate of that species would be negative
        raise InputError("e_max>1, effects above 1 are not allowed")
    if not (p == 'rand'
            or p * n == int(p * n)):  #a species goes extinct or not
        raise InputError("p must either be 'rand' or p*n must be an integer")

    #save the original number of communites
    num_com = num
    #number of communities to construct
    num = int(np.ceil(num_com * (1 + ad_com)))

    # fixed parameters, do not change to find communities
    e_fix = {
        'avb': dist(ave_min, ave_max, num),  #average effect on species b
        'avc': np.zeros(num),  #will be filled with data while running
        'tc': np.zeros(num),
        'tb': np.zeros(num)
    }
    mu_fix = {
        'avb': np.zeros(num),  #will be filled with data while running
        'avc': np.zeros(num),
        'tc': np.zeros(num),
        'avu': np.zeros(num),
        'tu': np.zeros(num),
        'tb': np.zeros(num)
    }
    alpha = uni(-0.95, -0.05, num)  # interaction coecfficient
    comp_fix = -alpha * n / (1 - alpha *
                             (n - 1))  #effective competition , computed
    # Fixed communities fullfill coexistence requirements
    not_fix = np.array(num * [True])
    # percent of species with type C
    if p is 'rand':
        p_fix = np.random.randint(1, n - 1, num) / n
    else:
        p_fix = p * np.ones(num)

    #randomly generate communities, until num_com many fullfill coex. req.
    #attention, changing settings might turn this into an infinite loop
    while num > num_com * ad_com:
        #copy the predefined values into arrays to be used
        e = {'avb': e_fix['avb'][not_fix]}
        p = p_fix[not_fix]
        q = 1 - p
        comp = comp_fix[not_fix]

        # min(mu['avb'],mu['avu'])/(p*mu['avb']+q*mu['avu'])>comp
        mu = {'avb': dist(0, 10, num)}
        mu['avu'] = dist(
            mu['avb'] * comp * p / (1 - q * comp),
            np.amin(
                [mu['avb'] * (1 - p * comp) / (q * comp), 10 * np.ones(num)],
                axis=0))

        #coexistence limit, min(mu)/mean(mu)>comp
        tresh1 = comp * (p * mu['avb'] + q * mu['avu'])
        # chosen such that min (mu_u,mu_b) > tresh1, i.e. coexist
        mu['tb'] = dist(-(1 - tresh1 / mu['avb']) / sqrt,
                        (1 - tresh1 / mu['avb']) / sqrt)
        mu['tu'] = dist(-(1 - tresh1 / mu['avu']) / sqrt,
                        (1 - tresh1 / mu['avu']) / sqrt)

        # mu['avc']*(1-ave_min) must be able to coexist in changed site
        tresh2 = mu['avb'] * (1 - e['avb']) * p * comp / (1 - comp * q) / (
            1 - ave_min)
        # we always have treshhold2<treshhold1
        mu['avc'] = dist(tresh2, tresh1)

        # ensure, that min(mu_c) fullfills same conditions as mu['avc']
        bound = np.amin([tresh1 / mu['avc'] - 1, 1 - tresh2 / mu['avc']],
                        axis=0) / sqrt
        mu['tc'] = dist(-bound, bound)

        # mu['avc']*(1-e['avc']) fullfills coexistence conditions
        # choose min for e['avc']
        tresh1 = np.amax([1-mu['avb']/mu['avc']*(1-e['avb'])*(1-comp*p)\
                     /(q*comp),ave_min*np.ones(num)],axis = 0)
        # choose max for e['avc']
        tresh2 = np.amin([1-mu['avb']/mu['avc']*(1-e['avb'])/(1-comp*q)\
                     *(p*comp),ave_max*np.ones(num)],axis = 0)
        e['avc'] = dist(tresh1, tresh2)

        # choose borders, that e_i are within [e_min, e_max]
        minimum = np.amin([
            np.sign(e['avb']) * (e_max / e['avb'] - 1),
            np.sign(e['avb']) * (1 - e_min / e['avb'])
        ],
                          axis=0)
        e['tb'] = uni(-minimum / sqrt, minimum / sqrt)
        minimum = np.amin([
            np.sign(e['avc']) * (e_max / e['avc'] - 1),
            np.sign(e['avc']) * (1 - e_min / e['avc'])
        ],
                          axis=0)
        e['tc'] = dist(-minimum / sqrt, minimum / sqrt)

        # average growthsrates in changed site of the species types
        mu['avb_change'] = mu['avb'] * e['avb'] * (1 / e['avb'] - 1 -
                                                   mu['tb'] * e['tb'])
        mu['avc_change'] = mu['avc'] * e['avc'] * (1 / e['avc'] - 1 -
                                                   mu['tc'] * e['tc'])
        # average growthrate of entire community in changed site
        mu['av_change'] = p * mu['avb_change'] + q * mu['avc_change']

        # reference types are assumed to have e_i = 1, always
        # if this part of the code is changed, please also change in coex_test
        # e['avu'] = 1 #change if desired differently
        # e['tu'] = 0

        #copy the parameters into the fixed parameters
        for k in e_fix.keys():
            if k == 'avb':  #do not copy into fixed 'avb'
                pass
            e_fix[k][not_fix] = e[k]
        for k in mu_fix.keys():
            mu_fix[k][not_fix] = mu[k]

        #check which species can coexist and update not_fix
        coex = coex_test(mu, e, comp)
        not_fix[not_fix] = np.logical_not(coex)
        num = np.count_nonzero(not_fix)  #number of not fixed communities

    fix = np.logical_not(not_fix)  #communities that are fixed, i.e. coex
    # choose only num_com coexisting communities
    comp_ret = comp_fix[fix][:num_com]
    alpha_ret = alpha[fix][:num_com]
    p_ret = p_fix[fix][:num_com]
    mu_ret = {key: mu_fix[key][fix][:num_com] for key in mu_fix.keys()}
    e_ret = {key: e_fix[key][fix][:num_com] for key in e_fix.keys()}

    # average growthsrates in changed site of the species types
    mu_ret['avb_change'] = mu_ret['avb']*e_ret['avb']*\
                        (1/e_ret['avb']-1 - mu_ret['tb']*e_ret['tb'])
    mu_ret['avc_change'] = mu_ret['avc']*e_ret['avc']*\
                        (1/e_ret['avc']-1 - mu_ret['tc']*e_ret['tc'])
    # average growthrate of entire community
    mu_ret['av_change'] = p_ret*mu_ret['avb_change']+\
                            (1-p_ret)*mu_ret['avc_change']

    # generate distribution of per capita contributions for species types
    t_fb, t_fu, t_fc = uni(-1 / sqrt, 1 / sqrt, [3, num_com])  # stdv/mean
    avfb, avfu, avfc = uni(0.5, 1.5, [3, num_com])  #averages of f

    f = {'avb':avfb,'avu':avfu,'avc':avfc,\
         'tb':t_fb,'tu':t_fu,'tc':t_fc}
    # communities fullfill coexistence
    return mu_ret, e_ret, comp_ret, f, p_ret, alpha_ret
Exemplo n.º 15
0
from numpy.random import uniform as uni

qual = 1000000
result = 0
for i in range(qual):
    x = uni(-3, 3)
    y = uni(-3, 3)
    z = uni(-3, 3)

    if x**2 + y**2 < 9 and z**2 + y**2 < 9:
        result += 1

print(6**3 * result / qual)
    actual = [30.8,14.8,3.7, 6.0,3.0,0.8]
    simulated = map(lambda x: 100*x, GraphsAndData.number_of_partners_data(s, year = s.NUMBER_OF_YEARS-0))
    sexual_partners += sum([abs(actual[i] - simulated[i]) for i in range(len(actual))])    

    #return result of test
    return age_disparate + sexual_partners

#MPI variables
name = MPI.Get_processor_name()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()

#%% 0. Setup 
# Every prior distribution is a uniform and given by a bottom and top
prior = {
            1: lambda: uni(-0.01, -0.5),   # probability multiplier
            2: lambda: uni(-0.01, -0.5),   # preferred age difference
            3: lambda: uni(0.01, 2),       # preferred age difference growth
            
            4: lambda: uni(1, 4),          # DNP scale
            5: lambda: uni(0.05, 0.9),     # DNP shape 
            6: lambda: uni(15, 30),        # durations scale
            7: lambda: uni(0, 5),          # durations shape 
}
posterior = {i:[] for i in prior.keys()}
    
##file to write
##rows 0-7
#print "#,ProbMult, PAD, PADgrowth, DNPscale, DNPshape, DURAscale, DURAshape,"  # parameters
##rows 8-11, 12-15    
#print "NonInterMale05,InterMale05,NonInterFemale05,InterFemale05,"  # intergernational sex 2005
            r_step = sqrt((x_new - x_prev)**2 + (y_new - y_prev)**2)
            NS = NS + 1
            n = n + 1
        else:
            break
    return x_new, y_new, NS


#Local_x,Local_y,NS=Localsearch(1,0,3)
'''solving with VNS'''
Ackley = []
X = []
Y = []
Time = []
for j in range(20):
    x, y = uni(-32.768, 32.768), uni(-32.768, 32.768)
    start_time = time.time()
    for i in range(1000):
        for r in (0.1, 0.5, 3, 5, 8, 13, 21, 34):
            j = True
            while j:
                a = neighborpoints(x, y, r)
                b = Localsearch(a[0], a[1])
                if ackley(b[0], b[1]) < ackley(x, y):
                    x, y = b[0], b[1]
                else:
                    j = False
    end_time = time.time()
    Ackley.append(ackley(x, y))
    X.append(x)
    Y.append(y)