コード例 #1
0
    def otimizar(self):
        # Definindo os parâmetros a serem utilizados
        parametros = {
            'C': loguniform.rvs(2**-5, 2**15, size=self.tamanho_grid),
            'gamma': loguniform.rvs(2**-15, 2**3, size=self.tamanho_grid),
            'epsilon': uniform.rvs(0.0, 1, size=self.tamanho_grid)
        }

        cv_ = ShuffleSplit(n_splits=1, test_size=0.1, train_size=0.9)

        # Executando otimização dos parâmetros
        self.iniciar_tempo()
        grid = GridSearchCV(estimator=SVR(kernel='rbf'),
                            param_grid=parametros,
                            scoring="neg_mean_absolute_error",
                            cv=cv_,
                            n_jobs=-1)
        grid.fit(self.X_treinamento, self.Y_treinamento)
        self.finalizar_tempo()

        # extraindo os melhores hiperparametros
        C = grid.best_params_['C']
        gamma = grid.best_params_['gamma']
        epsilon = grid.best_params_['epsilon']

        # Treinando SVM final com os parâmetros encontrados
        self.svm = SVM(gamma, C, epsilon)
        self.svm.treinar(self.X_treinamento, self.Y_treinamento)
        self.svm.testar(self.X_teste, self.Y_teste)
    def create_random_LR(
            penalty: tuple = ("none", "l1", "l2", "elasticnet"),
            solver: tuple = ("liblinear", "sag", "saga", "newton-cg", "lbfgs"),
            max_iter: tuple = (100, 1000),
            C: tuple = (1e-5, 10),
    ) -> LogisticRegression:
        """Create a Logistic Regression model using random hyperparameters.

        :param penalty: Tuple with the penalty hyperparameters, defaults to ("none", "l1", "l2", "elasticnet")
        :type penalty: tuple, optional
        :param solver: Tuple with the solver hyperparameters, defaults to ("liblinear", "sag", "saga", "newton-cg", "lbfgs")
        :type solver: tuple, optional
        :param max_iter: Tuple with the max_iter hyperparameters, defaults to (100, 1000)
        :type max_iter: tuple, optional
        :param C: Tuple with the C hyperparameters, defaults to (1e-5, 10)
        :type C: tuple, optional
        :return: A LogisticRegresion model with the random hyperparameters.
        :rtype: LogisticRegression
        """
        model = LogisticRegression(
            penalty=random.choice(penalty),
            solver=random.choice(solver),
            max_iter=round(loguniform.rvs(max_iter[0], max_iter[1])),
            C=round(loguniform.rvs(C[0], C[1]), int(abs(math.log(C[0], 10)))),
        )
        return model
コード例 #3
0
def generate_parameters(seed):
    np.random.seed(seed)
    out = {}
    out['nfeatures'] = np.random.randint(3, 25)
    out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))
    out['gamma'] = np.random.uniform(0.75, 0.05)
    out['penalty'] = float(loguniform.rvs(0.00001, 0.1, size=1))
    out['batch'] = np.random.choice([32, 64])
    return out
コード例 #4
0
def generate_p0(df_config, nwalkers, fix=None):
    config = df_config.copy()
    adopts_logprior = config.prior == "log"
    _a = config.lo.values
    _b = config.hi.values
    a = _a[adopts_logprior]
    b = _b[adopts_logprior]
    loc = _a[~adopts_logprior]
    scale = (_b - _a)[~adopts_logprior]

    #print(loc,scale)

    p0 = empty((nwalkers, len(config)))
    #print(p0.shape)
    p0[:, ~adopts_logprior] = uniform.rvs(size=(nwalkers,
                                                (~adopts_logprior).sum()),
                                          loc=loc,
                                          scale=scale)
    p0[:, adopts_logprior] = loguniform.rvs(size=(nwalkers,
                                                  adopts_logprior.sum()),
                                            a=a,
                                            b=b)

    if fix is not None:
        deleted_index = [list(config.name).index(pname) for pname in fix]
        p0 = np.delete(p0, deleted_index, axis=1)

    return p0
コード例 #5
0
    def gen_params_value(self):
        """
        Generates new values (samples randomly) for each parameter.

        Returns:
            dict: A dictionary containing the parameters whose values should be sampled.
        """
        np.random.seed(None)
        auto_params_val = {}
        for param in self.auto_params:
            if param["type"] == "fixed":
                auto_params_val.update({param["name"]: param["value"]})
            elif param["type"] == "choice":
                auto_params_val.update(
                    {param["name"]: np.random.choice(param["values"])})
            elif param["type"] == "range":
                if "log_scale" in param and param["log_scale"]:
                    auto_params_val.update(
                        {param["name"]: loguniform.rvs(*param["bounds"])})
                else:
                    auto_params_val.update(
                        {param["name"]: np.random.uniform(*param["bounds"])})
            elif param["type"] == "int":
                auto_params_val.update(
                    {param["name"]: np.random.randint(np.iinfo(np.int32).max)})

        return auto_params_val
    def create_random_SVC(
        kernel: tuple = ("linear", "poly", "rbf", "sigmoid", "precomputed"),
        gamma: tuple = ("scale", "auto"),
        C: tuple = (1e-5, 10),
        decision_function_shape: tuple = ("ovo", "ovr"),
        probability: bool = True,
    ) -> SVC:
        """Create a Support Vector Machine model using random hyperparameters.

        :param kernel: Tuple with the kernel hyperparameters, defaults to ("linear", "poly", "rbf", "sigmoid", "precomputed")
        :type kernel: tuple, optional
        :param gamma: Tuple with the gamma hyperparameters, defaults to ("scale", "auto")
        :type gamma: tuple, optional
        :param C: Tuple with C hyperparameters, defaults to (1e-5, 10)
        :type C: tuple, optional
        :param decision_function_shape: Tuple with the decision_function_shape hyperparameters, defaults to ("ovo", "ovr")
        :type decision_function_shape: tuple, optional
        :param probability: Boolean to enable probability prediction, defaults to True
        :type probability: tuple, optional
        :return: A SVC model with the random hyperparameters.
        :rtype: SVC
        """
        model = SVC(
            kernel=random.choice(kernel),
            gamma=random.choice(gamma),
            C=round(loguniform.rvs(C[0], C[1]), int(abs(math.log(C[0], 10)))),
            decision_function_shape=random.choice(decision_function_shape),
            probability=probability,
        )
        return model
コード例 #7
0
def get_samples(args):
    """Get different samples of hyperparameters

    Arguments:
        args: Arguments read from command line

    returns list of samples for hyperparameters out of given hparam space
    """

    # Put values multiple times into list to increase probability to be chosen
    param_grid = {
        "pooling_type": ["avg"],
        "pool_size_y_factor": [i / 30 for i in range(1, 15)],
        "pool_size_x": [i for i in range(8, 22)],
        "learning_rate": loguniform.rvs(a=1e-3, b=5e-1, size=10000),
        "optimizer": ["Adam"],
        "regularizer": [i / 100 for i in range(0, 20)],
        "filter_rows_lower": [i for i in range(300)],
        "filter_cols_upper": [i for i in range(8, 20)],
        "filter_cols_lower": [i for i in range(10, 20)],
        "batch_size": [32, 64, 128, 256],
        "num_units_l1": [i for i in range(3, 10)],
        "num_units_l2": [i for i in range(1, 5)],
        "activation": ["relu"],
    }

    dynamic_params = list(ParameterSampler(param_grid, n_iter=args.n_runs))
    static_params = get_static_hparams(args)

    return [{**dp, **static_params} for dp in dynamic_params]
コード例 #8
0
    def sample(self, nsamples=None):
        """
        Function that sample points from the prior. Uniform for the slope and intercept --- log-uniform for the standard-deviation.
        """
        if nsamples is None:
            nsamples = self.nsamples

        # Evaluate samples:
        beta_samples = np.random.uniform(self.beta1, self.beta2, nsamples)
        sigma_poisson_samples = loguniform.rvs(self.sigma_poisson1,
                                               self.sigma_poisson2,
                                               size=nsamples)
        sigma_flicker_samples = loguniform.rvs(self.sigma_flicker1,
                                               self.sigma_flicker2,
                                               size=nsamples)

        # Return them:
        return beta_samples, sigma_poisson_samples, sigma_flicker_samples
コード例 #9
0
    def sample(self, nsamples=None):
        """
        Function that sample points from the prior. Uniform for the slope and intercept --- log-uniform for the standard-deviation.
        """
        if nsamples is None:
            nsamples = self.nsamples

        # Evaluate samples:
        a_samples = np.random.uniform(self.a1, self.a2, nsamples)
        b_samples = np.random.uniform(self.b1, self.b2, nsamples)
        sigma_samples = loguniform.rvs(self.sigma1, self.sigma2, size=nsamples)

        # Return them:
        return a_samples, b_samples, sigma_samples
コード例 #10
0
def sample_values():
    p2v = dict()
    for p in config['tune_params']:
        a = config['{}_algo'.format(p)]
        if a != 'choice':
            min_v, max_v = config['{}_range'.format(p)]
            if a == 'loguniform':
                p2v[p] = loguniform.rvs(min_v, max_v)
            elif a == 'uniform-integer':
                p2v[p] = np.random.randint(min_v, max_v + 1)
            elif a == 'uniform-float':
                p2v[p] = uniform.rvs(min_v, max_v)
            else:
                print("ERROR: sampling method specified as {}".format(a))
        else:  # a == 'choice'
            p2v[p] = np.random.choice(config['{}_range'.format(p)])
    return p2v
コード例 #11
0
def get_samples(args):
    """Get random samples of hyperparameters

    Arguments:
        args: Arguments read from command line

    returns list of samples for hyperparameters out of given hparam space
    """
    from scipy.stats import loguniform

    # Put values multiple times into list to increase probability to be chosen
    param_grid = {
        "pooling_type": ["avg"],
        "kernel_size": [i for i in range(5, 6)],
        "kernel_number": [i for i in range(11, 15)],
        "pool_size_y": [2],
        "pool_size_x": [2],
        "learning_rate": loguniform.rvs(a=7e-6, b=5e-4, size=10000),
        "optimizer": ["Adam"],  # Nadam, Adagrad
        "layer_number": [5],
        "batch_normalization": [False, True],
        "regularization": [0],
        "filter_rows_lower": [0],
        "filter_cols_upper": [0],
        "filter_cols_lower": [0],
        "batch_size": [64, 128],
        "loss": ["mae"],
        "Recurrent_Celltype": ["GRU"],
        "units": [i for i in range(11, 14)],
        "squeeze_method": ["1x1_conv", "squeeze"],
        "1x1_conv_filters": [i + 1 for i in range(5, 11)],
    }

    randint = int(
        tf.random.uniform(shape=[],
                          minval=0,
                          maxval=10,
                          dtype=tf.dtypes.int32,
                          seed=None))
    dynamic_params = list(
        ParameterSampler(param_grid, n_iter=args.n_runs, random_state=randint))

    static_params = get_static_hparams(args)

    return [{**dp, **static_params} for dp in dynamic_params]
def sample_values(trial_num):
    p2v = dict()
    for p in config['tune_params']:
        a = config['{}_algo'.format(p)]
        if a == 'selection':  #To select in order from a list of hyperparam values
            p2v[p] = config['{}_range'.format(p)][trial_num]
        elif a == 'choice':  #To randomly select any value from a list of hyperparam values
            p2v[p] = np.random.choice(config['{}_range'.format(p)])
        else:  #To randomly select a value from a given range
            min_v, max_v = config['{}_range'.format(p)]
            if a == 'loguniform':
                p2v[p] = loguniform.rvs(min_v, max_v)
            elif a == 'uniform-integer':
                p2v[p] = np.random.randint(min_v, max_v + 1)
            elif a == 'uniform-float':
                p2v[p] = uniform.rvs(min_v, max_v)
            else:
                print("ERROR: sampling method specified as {}".format(a))

    return p2v
コード例 #13
0
def get_samples(args):
    """Get different samples of hyperparameters

    Arguments:
        args: Arguments read from command line

    returns list of samples for hyperparameters out of given hparam space
    """
    from scipy.stats import loguniform

    # Put values multiple times into list to increase probability to be chosen
    param_grid = {
        "pooling_type": ["avg", "max"],
        "kernel_size": [i for i in range(3, 5)],
        "kernel_number": [i for i in range(2, 5)],
        "pool_size_y": [2],
        "pool_size_x": [2],
        "learning_rate": loguniform.rvs(a=1e-6, b=1e-4, size=100000),
        "optimizer": ["Adam"],
        "layer_number": [1, 2, 3],
        "batch_normalization": [False, True],
        "regularization": [0],
        "filter_cols_upper": [i for i in range(15, 35)],
        "filter_cols_lower": [i for i in range(15, 30)],
        "batch_size": [32, 64, 128],
        "units": [i for i in range(2, 14)],
        "loss": ["categorical_crossentropy"],
    }

    randint = int(
        tf.random.uniform(shape=[],
                          minval=0,
                          maxval=10,
                          dtype=tf.dtypes.int32,
                          seed=None))
    dynamic_params = list(
        ParameterSampler(param_grid, n_iter=args.n_runs, random_state=randint))

    static_params = get_static_hparams(args)

    return [{**dp, **static_params} for dp in dynamic_params]
コード例 #14
0
def generate_p0(config_fname):
    file_dir = os.path.dirname(__file__) + "/"
    path_config_fname = file_dir + config_fname

    config = LeptophilicDM(path_config_fname).config
    adopts_logprior = config.prior=="log"
    _a = config.lo.values
    _b = config.hi.values
    a = _a[adopts_logprior]
    b = _b[adopts_logprior]
    loc = _a[~adopts_logprior]
    scale = (_b-_a)[~adopts_logprior]

    #print(loc,scale)

    p0 = empty((nwalkers,len(config)))
    #print(p0.shape)
    p0[:,~adopts_logprior]  = uniform.rvs(size=(nwalkers,(~adopts_logprior).sum()),loc=loc,scale=scale) 
    p0[:,adopts_logprior] = loguniform.rvs(size=(nwalkers,adopts_logprior.sum()),a=a,b=b) 
    
    return p0
コード例 #15
0
def generate_p0(df_config, nwalkers):
    config = df_config.copy()
    adopts_logprior = config.prior == "log"
    _a = config.lo.values
    _b = config.hi.values
    a = _a[adopts_logprior]
    b = _b[adopts_logprior]
    loc = _a[~adopts_logprior]
    scale = (_b - _a)[~adopts_logprior]

    #print(loc,scale)

    p0 = empty((nwalkers, len(config)))
    #print(p0.shape)
    p0[:, ~adopts_logprior] = uniform.rvs(size=(nwalkers,
                                                (~adopts_logprior).sum()),
                                          loc=loc,
                                          scale=scale)
    p0[:, adopts_logprior] = loguniform.rvs(size=(nwalkers,
                                                  adopts_logprior.sum()),
                                            a=a,
                                            b=b)

    return p0
    def create_random_LDA(
            solver: tuple = ("svd", "lsqr", "eigen"),
            shrinkage: tuple = ("auto", round(random.uniform(1e-5, 1),
                                              5), "none"),
            tol: tuple = (1e-5, 1e-3),
    ) -> LinearDiscriminantAnalysis:
        """Create a Linear Discriminant model using random hyperparameters.

        :param solver: Tuple with the solver hyperparameters, defaults to ("svd", "lsqr", "eigen")
        :type solver: tuple, optional
        :param shrinkage: Tuple with the shrinkage hyperparameters, defaults to ("auto", round(random.uniform(1e-5, 1), 5), "none")
        :type shrinkage: tuple, optional
        :param tol: Tuple with the tolerance hyperparameters, defaults to (1e-5, 1e-3)
        :type tol: tuple, optional
        :return: A LinearDiscriminantAnalysis model with the random hyperparameters.
        :rtype: LinearDiscriminantAnalysis
        """
        model = LinearDiscriminantAnalysis(
            solver=random.choice(solver),
            shrinkage=random.choice(shrinkage),
            tol=round(loguniform.rvs(tol[0], tol[1]),
                      int(abs(math.log(tol[0], 10)))),
        )
        return model
コード例 #17
0
    def fit(self, spaceLocsMeas, timeInstants, y, optimize = False, fun = None):
        """
        INPUT:
            spaceLocsMeas: Spatial indices of measurements to be fitted
            timeInstants: Temporal indices of measurements to be fitted
            y: Target values for the given spatial and temporal indices
            optimize: Whether to optimize kernel parameters or not
            fun: Objective function to be minimized in case of optimization
        """
        def obj(theta):
            # Set hyperparameters to theta
            self.kernel_time.set_hyperparams(theta[:time_params_n])
            self.kernel_space.set_hyperparams(theta[time_params_n:])
            
            print(theta)
            
            # Calculate covariances for space kernel
            Kss = self.kernel_space.sample(spaceLocsMeas)
            self.Ks_chol = np.linalg.cholesky(Kss).conj().T
            
            self.a, self.c, self.v0, self.q = self.kernel_time.createDiscreteTimeSys(self.params.data['samplingTime'])

            # initialize quantities needed for kalman estimation
            A = np.kron(I, self.a)
            C = self.Ks_chol @ np.kron(I,self.c)
            V0 = np.kron(I, self.v0)
            Q = np.kron(I, self.q)
            
            score = None
            
            if fun == 'nll':
                x,V,xp,Vp,logMarginal = self.kalmanEst(A,C,Q,V0,R, y, computeLikelihood = True)
                score = logMarginal
            elif fun == 'RMSE':
                score = self.score(spaceLocsMeas, timeInstants, y, metric = 'RMSE')
            
            print(score)
            
            return score
        
        if self.normalize_y:
            self.y_train_mean = np.nanmean(y)
            self.y_train_std = np.nanstd(y)
            self.y_train = y - self.y_train_mean
        else:
            self.y_train_mean = 0
            self.y_train_std = 1
            self.y_train = y
        
        self.y_train_timeInstants = timeInstants
        
        if optimize is True and fun is not None:
            # Getting some useful values
            numSpaceLocs, numTimeInstants = y.shape
            time_params_n = len(self.kernel_time.kernel[:])
            space_params_n = len(self.kernel_space.kernel[:])

            # Create noise matrix
            self.noiseVar = (self.params.data['noiseStd']**2) * np.ones(y.shape)

            I = np.eye(numSpaceLocs)
            R = np.zeros((numSpaceLocs, numSpaceLocs, numTimeInstants))
            for t in np.arange(0, numTimeInstants):
                R[:,:,t] = np.diag(self.noiseVar[:,t]).copy()

            # Use ML to find best kernel parameters using initial guess
            res = minimize(obj, np.concatenate((self.kernel_time.kernel[:], self.kernel_space.kernel[:]), axis=0), 
                   bounds=[(1e-3, 1e+4) for i in range(time_params_n + space_params_n)],
                   method='L-BFGS-B')

            # Repeat for random guesses using loguniform
            for r in np.arange(0, self.n_restarts):
                print('Optimizer restart:', r+1, ' of ',  self.n_restarts)

                random_theta0 = loguniform.rvs(1e-4, 1e+4, size= time_params_n + space_params_n)

                new_res = minimize(obj, random_theta0, 
                   bounds=[(1e-3, 1e+4) for i in range(time_params_n + space_params_n)],
                   method='L-BFGS-B')

                # Store best values
                if new_res.fun < res.fun:
                    res = new_res

            # Update parameters with best results
            self.kernel_time.set_hyperparams(res.x[:time_params_n])
            self.kernel_space.set_hyperparams(res.x[time_params_n:])

            # Update state-space representation
            self.a, self.c, self.v0, self.q = self.kernel_time.createDiscreteTimeSys(self.params.data['samplingTime'])
            self.Ks_chol = np.linalg.cholesky(self.kernel_space.sample(spaceLocsMeas)).conj().T

            print('Best hyperparameters and marginal log value')
            print(res.x)
            print(res.fun)
コード例 #18
0
    def create_random_network(
        m: int,
        n_classes: int,
        metrics: str = "accuracy",
        layers: tuple = (1, 5),
        n: tuple = (10, 20),
        activation: tuple = ("relu", "sigmoid"),
        lr: tuple = (1e-5, 1e-3),
        optimizer: keras.optimizers.Optimizer = keras.optimizers.Adam,
        loss: str = "categorical_crossentropy",
    ) -> tuple((keras.models.Sequential, keras.optimizers.Optimizer, str)):
        """Return a deep neural network model with pseudo-random hyperparameters according to its arguments, each time it is called.

        :param m: Number of input variables which will enter in the neural network
        :type m: int
        :param n_classes: Number of classes to be classificated
        :type n_classes: int
        :param metrics: String with the metric to evaluate and compare the different random networks, defaults to "accuracy"
        :type metrics: str, optional
        :param layers: Tuple with the number of layers of the neural network as hyperparameter, defaults to (1, 5)
        :type layers: tuple, optional
        :param n: Tuple with the min and max number of neurons per layer, defaults to (10,20)
        :type n: tuple, optional
        :param activation: Tuple with the activation functions as hyperparameters, defaults to ("relu","sigmoid")
        :type activation: tuple, optional
        :param lr: Tuple with the min and max values for the learning rate as hyperparameter, defaults to (1e-5, 1e-3)
        :type lr: tuple, optional
        :param optimizer: A keras Optimizer to be used to train the neural network, defaults to keras.optimizers.Adam
        :type optimizer: keras.optimizers.Optimizer
        :param loss: String with the lossing function to be used to measure the error, defaults to "categorical_crossentropy"
        :type loss: str
        :return: A tuple formed by the keras.models.Sequential, keras.optimizers.Optimizer, and the lossing function
        :rtype: tuple
        """
        # Defining the model class Sequential in order to add layers one by one
        model = keras.models.Sequential()
        # First layer of de network
        model.add(
            keras.layers.Dense(
                random.randint(n[0], n[1]),
                input_dim=m,
                activation=random.choice(activation),
            )
        )
        # Input Normalization, working as a StandardScaler() tranformer for input data
        keras.layers.BatchNormalization()
        # Loop which adds layers following a uniform random distribution
        for _ in range(random.randint(layers[0], layers[1])):
            model.add(
                keras.layers.Dense(
                    random.randint(n[0], n[1]),
                    activation=random.choice(activation),
                )
            )
        model.add(keras.layers.Dense(n_classes, activation="softmax"))
        # Choosing a learning rate from a logarithmic uniform distrbution
        optimizer = optimizer(
            learning_rate=round(
                loguniform.rvs(lr[0], lr[1]), int(abs(math.log(lr[0], 10)))
            )
        )
        # Define some characteristics for the training process
        model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
        return model, optimizer, loss
コード例 #19
0
eta_1_distr = lambda: random.uniform(0, 0.1)
eta_2_distr = lambda eta_1: random.uniform(0, eta_1)
delta_eta_distr = lambda eta_1: random.uniform(0, min(eta_1,1.7e-2))

gammap_distr = lambda gamma: random.uniform(gamma, 2*gamma)
gammapp_distr = lambda gammap: random.uniform(gammap, 2*gammap)

sigma_distr = lambda: random.uniform(1/5, 1/2)
sigma_1_distr = lambda: random.uniform(1/5, 1/2)
sigma_2_distr = lambda: random.uniform(1/5, 1/2)

IFR_distr = lambda: random.uniform(1e-3, 1e-1)
IFR1_distr = lambda IFR: random.uniform(1e-3, IFR)

I0_distr = lambda: loguniform.rvs(1e-7, 3e-1)
S0p_distr = lambda: random.uniform(1e-4, 0.1)
S0pp_distr = lambda: random.uniform(1e-4, 0.1)

td_distr = lambda: random.choice(np.arange(7,35))

samples = int(1000)

file = open("decision_tree_output.dat", "w")
file.write("#seed: %d \n"%seed)
file.write("beta,betap,betapp,beta_1,beta_1p,beta_1pp,\
beta_2,beta_2p,beta_2pp,nu_max,eta_1,eta_2,\
gamma,gammap,gammapp,sigma,sigma_1,sigma_2,IFR,IFR1,IFR2,td,\
S0,S0p,S0pp,E0,E0p,E0pp,I0,I0p,I0pp,R0,D0,d1,d2,D1,D2,\
delta < 0,Delta < 0\n")
コード例 #20
0
ファイル: evolve.py プロジェクト: clairesworld/exo-top
def bulk_planets_mc(n=100,
                    names=None,
                    mini=None,
                    maxi=None,
                    pl_kwargs={},
                    model_kwargs={},
                    t_eval=None,
                    log=False,
                    T_m0_options=None,
                    propagate_fit_err=False,
                    verbose=False,
                    beta_h=p.beta_h,
                    cov_beta_h=p.cov_beta_h,
                    **kwargs):
    """varying multiple parameters in 'names' between mini and maxi, use default values otherwise.
    update_kwargs can include any TerrestrialPlanet attribute
    initial_kwargs can include T_m0, T_c0, D_l0, t0, tf. names, mini, and maxi are in order and must have same lenghts"""
    # pl_kwargs and model_kwargs should already be taken from inputs file
    if T_m0_options is None:
        T_m0_options = np.array([1000, 1250, 1500, 1750, 2000
                                 ]) + 273  # [ 1273, 1523, 1773, 2023, 2273]

    planets = []
    ii = 0
    while ii < n:
        new_kwargs_pl = pl_kwargs.copy()
        new_kwargs_model = model_kwargs.copy()

        for iii, name in enumerate(names):
            if name == 'T_m0':
                # for initial temps do discrete values after johnny seales
                val = np.random.choice(T_m0_options, size=1)[0]
            elif log:
                val = loguniform.rvs(mini[iii], maxi[iii], size=1)
            else:
                val = rand.uniform(mini[iii], maxi[iii])

            if name in ['T_m0', 'T_c0', 'D_l0']:
                new_kwargs_model.update({name: val})
            else:
                new_kwargs_pl.update({name: val})
            if verbose:
                print(ii, '/', n - 1, ': drew random', name, val)
        if propagate_fit_err:
            beta_h_new = np.random.multivariate_normal(beta_h, cov_beta_h)
            new_kwargs_model.update({'beta_h': beta_h_new})
            if verbose:
                print('       & drew random errors on h scaling', beta_h_new)

        if t_eval is None and ii > 0:
            t_eval = planets[0].t
        pl = None
        while pl is None:  # returns None if numerical issue e.g. D_l0 negative
            pl = build_planet(planet_kwargs=new_kwargs_pl,
                              run_kwargs=new_kwargs_model,
                              t_eval=t_eval,
                              verbose=verbose,
                              **kwargs)
        planets.append(pl)
        ii += 1
    return planets
コード例 #21
0

	linxbins = np.linspace(10,1500,20)
	finxwidths = []
	for nlinx,linx in enumerate(linxbins):
		try:
			linxwidths.append(linxbins[nlinx+1] - linxbins[nlinx])
		except:
			pass
	linxwidths = np.array(linxwidths)
	#### for UNIFORM AREAS IN LINEAR SPACE, HEIGHTS NEED TO BE 1/linxwidths
	linyheights = 1/linxwidths 
	"""

	uniform_draws = np.random.uniform(10,1500,size=10000)
	loguniform_draws = loguniform.rvs(10,1500,size=10000)
	logbins = np.logspace(np.log10(10), np.log10(1500),20)

	fig, (ax1, ax2) = plt.subplots(2)
	ax1.hist(uniform_draws, bins=np.linspace(10,1500,20))
	ax1.set_ylabel('uniform draws, uniform binning')
	ax2.hist(uniform_draws, bins=logbins)
	ax2.set_ylabel('uniform draws, log binning')
	plt.show()


	uniform_nperbin = plt.hist(uniform_draws, bins=logbins)[0]
	plt.xscale('log')
	plt.title('uniform nperbin')
	plt.show()
	uniform_nperbin_20x20 = np.zeros(shape=(19,19)) #### there are actually 19 bins on a side.
コード例 #22
0
 def sample_from_prior(self, size=1, width=1.0):
     ret = loguniform.rvs(*loguniform.interval(width, self.llim, self.rlim),
                          size=size)
     return ret if size != 1 else ret[0]
コード例 #23
0
    
    min_[np.where(min_ == max_)] = 0

    return (data_ - min_) / (max_ - min_), min_, max_

def checkprior(theta, prior):
    flag = prior.pdf(theta) > 0
    print(f'inside of initial prior: {sum(flag)}')
    print(f'out of total: {len(theta)}')

    return theta[flag,:]

def loguniform_prior(Ndata=2_500, log=True):
    a0, b0 = 0.002, 2
    a1, b1 = 0.002, 2
    k1 = loguniform.rvs(a0,b0,size=Ndata)
    k2 = loguniform.rvs(a1,b1,size=Ndata)
    k3 = loguniform.rvs(a0,b0,size=Ndata)
    theta = np.vstack((k1,k2,k3)).T
    if log:
        return np.log(theta)
    return theta

from scipy.stats import uniform

class uniform_prior:
    
    def __init__(self, left = [0.002,0.002,0.002], right =[2,2,2]):
        self.left = np.asarray(left)
        self.right = np.asarray(right)
        self.m = (self.left+self.right)/2
コード例 #24
0
def run(fname_prefix,
        p0=None,
        nwalkers = 20,
        nsample = 1000,
        nburnin = 100,
        enable_vacuum_stability=False,
        enable_collider_const=False,
        enable_micromegas_likeli=False,
        enable_micromegas_prior=False,
        enable_gm2=False,
        use_pool=False,
        config_fname = "config.csv",
        project_name = "LeptophilicDM",
        dir_models = "/from_taisuke/models",
        overwrite=False
       ):
    """
    fname_prefix: 
        if like "test", save chains into
            test_chain.npy
        etc. if like "test/", save
            test/_chain.npy
        etc.
    """
    executed_time = datetime.datetime.now()
    
    file_dir = os.path.dirname(__file__) + "/"
    path_config_fname = file_dir + config_fname
    
    configs = [enable_vacuum_stability,
               enable_collider_const,
               enable_micromegas_likeli,
               enable_micromegas_prior,
               enable_gm2,
               use_pool]
    
    config_int = (2**arange(len(configs)) * configs).sum()
    
    fname_prefix += f"_nwalkers={nwalkers}_nsample={nsample}_nburnin={nburnin}_config={config_int}"
    print(f"Results will be exported to {fname_prefix}_.gz")

    model = LeptophilicDM(path_config_fname,
                          enable_vacuum_stability,
                          enable_collider_const,
                          enable_micromegas_likeli,
                          enable_micromegas_prior,
                          enable_gm2,
                          project_name,
                          dir_models
                         )
    #nwalkers = 20

    #loc = (model.config.hi.values + model.config.lo.values ) / 2
    #scale = (model.config.hi.values - model.config.lo.values ) * 0.1
    #p0 = norm.rvs(size=(nwalkers,len(model.config)),loc=loc,scale=scale)
    
    # Note: uniform(loc,scale) = [loc, loc+scale]
    # Here 
    

    if p0 is None:
        config = LeptophilicDM(path_config_fname).config
        adopts_logprior = config.prior=="log"
        _a = config.lo.values
        _b = config.hi.values
        a = _a[adopts_logprior]
        b = _b[adopts_logprior]
        loc = _a[~adopts_logprior]
        scale = (_b-_a)[~adopts_logprior]

        #print(loc,scale)

        p0 = empty((nwalkers,len(config)))
        #print(p0.shape)
        p0[:,~adopts_logprior]  = uniform.rvs(size=(nwalkers,(~adopts_logprior).sum()),loc=loc,scale=scale) 
        p0[:,adopts_logprior] = loguniform.rvs(size=(nwalkers,adopts_logprior.sum()),a=a,b=b) 

    #pnames = model.param_names
    #idx_mchi = pnames[pnames=="m_chi"].index[0]
    #loc[idx_mchi] = 500
    
    #idx_mchi = pnames[pnames=="m_phi_L"].index[0]
    #loc[idx_mchi] = 1000
    
    #idx_mchi = pnames[pnames=="m_phi_R"].index[0]
    #loc[idx_mchi] = 1000

    sampler = Sampler(model.lnposterior,p0,nwalkers)

    #nsample = 1000
    sampler.sample(nburnin,use_pool=use_pool,burnin=True)
    sampler.sample(nsample,use_pool=use_pool)

    sampler.save(fname_prefix)
    sampler.save_pickle(fname_prefix,overwrite=overwrite)
    
    log = dict(fname_prefix=fname_prefix,
        p0 = p0,
        nwalkers = nwalkers,
        nsample = nsample,
        nburnin = nburnin,
        enable_vacuum_stability = enable_vacuum_stability,
        enable_collider_const = enable_collider_const,
        enable_micromegas_likeli = enable_micromegas_likeli,
        enable_micromegas_prior = enable_micromegas_prior,
        enable_gm2 = enable_gm2,
        use_pool = use_pool,
        config_fname = config_fname,
        project_name = project_name,
        dir_models = dir_models,
        overwrite = overwrite,
        executed_time = executed_time)
    
    
    
    return sampler