Esempio n. 1
0
def sampling(phase_1_path, phase_2_path, phase_3_path):

    e = korali.Experiment()
    theta = korali.Experiment()
    psi = korali.Experiment()

    print(phase_1_path)
    print(phase_2_path)

    theta.loadState(phase_1_path)
    psi.loadState(phase_2_path)

    e["Problem"]["Type"] = "Hierarchical/Theta"
    e["Problem"]["Theta Experiment"] = theta
    e["Problem"]["Psi Experiment"] = psi

    e["Solver"]["Type"] = "Sampler/TMCMC"
    e["Solver"]["Population Size"] = 1000
    e["Solver"]["Default Burn In"] = 2
    e["Solver"]["Max Chain Length"] = 1
    e["Solver"]["Target Coefficient Of Variation"] = 0.6

    e["Console Output"]["Verbosity"] = "Detailed"
    e["File Output"]["Path"] = phase_3_path + '/_korali_samples/'

    # Starting Korali's Engine and running experiment
    k = korali.Engine()
    # k["Conduit"]["Type"] = "Concurrent"
    # k["Conduit"]["Concurrent Jobs"] = 12
    k.run(e)
Esempio n. 2
0
def model(p):
    x = p["Parameters"][0]
    # Starting Model's Korali Engine
    import korali
    k = korali.Engine()

    # Creating new experiment
    e = korali.Experiment()

    # Configuring Problem
    e["Random Seed"] = 0xC0FEE
    e["Problem"]["Type"] = "Optimization"
    e["Problem"]["Objective Function"] = lambda sampleData: subModel(
        sampleData, x)

    # Defining the problem's variables
    e["Variables"][0]["Name"] = "Y"
    e["Variables"][0]["Lower Bound"] = -10.0
    e["Variables"][0]["Upper Bound"] = +10.0

    # Configuring CMA-ES parameters
    e["Solver"]["Type"] = "Optimizer/CMAES"
    e["Solver"]["Population Size"] = 4
    e["Solver"]["Termination Criteria"][
        "Min Value Difference Threshold"] = 1e-15
    e["Solver"]["Termination Criteria"]["Max Generations"] = 100

    e["Console Output"]["Verbosity"] = "Silent"
    e["File Output"]["Enabled"] = False

    # Running Korali
    k.run(e)

    # Storing the best Y as result of evaluation
    p["F(x)"] = e["Results"]["Best Sample"]["F(x)"]
Esempio n. 3
0
    def trainPolicy(self, policy, env, numIterations):
        """Evaluate `numIterations` updating iterations on the given policy for
        the given environment."""
        returns = np.zeros([numIterations * self.populationSize])
        # Define the objective.
        i = 0

        def objective(p):
            """Update the policy with given parameters, evalute and return the reward."""
            weights = p["Parameters"]
            overwriteNetworkParams(policy.layers, weights)

            meanReward = 0
            for ep in range(env.numEpisodesPerEvaluation
                            ):  # Each new episode starts here.
                states, actions, policyEvaluations, rewards = env.performOneEpisode(
                    policy)
                meanReward += sum(rewards)
            meanReward /= env.numEpisodesPerEvaluation

            p["Evaluation"] = meanReward
            returns[i] = meanReward
            i += 1

        # Create Korali and problem objects.
        k = korali.Engine()
        e = korali.Experiment()

        # Configure the problem.
        e["Problem"]["Type"] = "Direct/Basic"  # Use Korali v1.0.1.
        e["Problem"]["Objective Function"] = objective

        # Define the problem variables.
        numParams = getNumNetworkParams(policy.layers)
        print("Number of policy parameters =", numParams)
        for i in range(numParams):
            e["Variables"][i]["Name"] = "X" + str(i)
            # Initial distribution of population samples.
            e["Variables"][i]["Initial Mean"] = 0.0
            e["Variables"][i]["Initial Standard Deviation"] = self.sigma
            # Bounds are necessary to avoid pathological cases generated by Gaussians.
            e["Variables"][i]["Lower Bound"] = -100.0
            e["Variables"][i]["Upper Bound"] = +100.0

        # Configure CMA-ES parameters.
        e["Solver"]["Type"] = "Optimizer/CMAES"
        e["Solver"]["Population Size"] = self.populationSize
        # e["Solver"]["Termination Criteria"]["Min Value Difference Threshold"] = 1e-7
        e["Solver"]["Termination Criteria"]["Max Generations"] = numIterations

        # Run Korali.
        k.run(e)

        maxR = np.max(returns.reshape(numIterations, self.populationSize),
                      axis=1)
        file_object = open('CMAES.txt', 'a')
        for i in range(numIterations):
            file_object.write('%e ' % maxR[i])
        file_object.write('\n')
        file_object.close()
Esempio n. 4
0
    def optimize(self, populationSize, maxiter=1000):

        self.nSamples = 1

        self.e = korali.Experiment()

        self.e['Problem']['Type'] = 'Bayesian/Reference'
        self.e['Problem']['Likelihood Model'] = self.likelihoodModel
        self.e['Problem']['Reference Data'] = list(
            map(float, self.data['Model']['y-data']))
        self.e['Problem']['Computational Model'] = self.computational_model

        self.e["Solver"]["Type"] = "Optimizer/CMAES"
        self.e["Solver"]["Population Size"] = populationSize
        self.e["Solver"]["Termination Criteria"]["Max Generations"] = maxiter
        self.e["Solver"]["Termination Criteria"][
            "Min Value Difference Threshold"] = 1e-9

        js = self.get_variables_and_distributions()
        self.set_variables_and_distributions(js)

        self.set_korali_output_files(self.saveInfo['korali samples'], maxiter)
        self.e['Console Output']['Verbosity'] = 'Detailed'

        if self.silent:
            self.e['Console Output']['Verbosity'] = 'Silent'

        k = korali.Engine()
        k['Conduit']['Type'] = 'Concurrent'
        k['Conduit']['Concurrent Jobs'] = self.nThreads

        k.run(self.e)

        printlog('Copy variables from Korali to Epidemics...')
        self.parameters = []
        myDatabase = self.e['Results']['Best Sample']['Parameters']
        for j in range(self.nParameters):
            self.parameters.append({})
            self.parameters[j]['Name'] = self.e['Variables'][j]['Name']
            self.parameters[j]['Values'] = np.asarray([myDatabase[j]])

        self.has_been_called['optimize'] = True
        self.has_been_called['propagate'] = False
        printlog('Done copying variables.')

        names = []
        best = []
        for j in range(self.nParameters):
            best.append(myDatabase[j])
            names.append(self.parameters[j]['Name'])

        js = {}
        js["Value"] = self.e['Results']['Best Sample']['F(x)']
        js["Parameter"] = best
        js["Names"] = names
        save_file(js, self.saveInfo['cmaes'], 'Optimum', fileType='json')
Esempio n. 5
0
def run_cmaes_with_termination_criterion(criterion, value):

    print("[Korali] Prepare CMAES run with Termination Criteria "\
            "'{0}'".format(criterion))

    e = korali.Experiment()

    e["Problem"]["Type"] = "Evaluation/Direct/Basic"
    e["Problem"]["Objective"] = "Maximize"
    e["Problem"]["Objective Function"] = evaluateModel

    e["Variables"][0]["Name"] = "X"
    e["Variables"][0]["Lower Bound"] = +1.0
    e["Variables"][0]["Upper Bound"] = +10.0

    e["Solver"]["Type"] = "Optimizer/CMAES"
    e["Solver"]["Population Size"] = 8
    e["Solver"]["Termination Criteria"][criterion] = value

    e["Random Seed"] = 1337

    k = korali.Engine()
    k.run(e)

    if (criterion == "Max Generations"):
        assert_value(e["Internal"]["Current Generation"], value)

    elif (criterion == "Max Infeasible Resamplings"):
        assert_greatereq(e["Solver"]["Internal"]["Infeasible Sample Count"],
                         value)

    elif (criterion == "Max Condition Covariance Matrix"):
        minEw = e["Solver"]["Internal"]["Minimum Covariance Eigenvalue"]
        maxEw = e["Solver"]["Internal"]["Maximum Covariance Eigenvalue"]
        assert_greatereq(maxEw / minEw, value)

    elif (criterion == "Max Value"):
        assert_greatereq(e["Solver"]["Internal"]["Best Ever Value"], value)

    elif (criterion == "Min Value Difference Threshold"):
        previous = e["Solver"]["Internal"]["Previous Best Ever Value"]
        current = e["Solver"]["Internal"]["Best Ever Value"]
        assert_smallereq(previous - current, value)

    elif (criterion == "Min Standard Deviation"):
        assert_smallereq(
            e["Solver"]["Internal"]["Current Min Standard Deviation"], value)

    elif (criterion == "Max Standard Deviation"):
        assert_greatereq(
            e["Solver"]["Internal"]["Current Max Standard Deviation"], value)

    else:
        print("Termination Criterion not recognized!")
        exit(-1)
Esempio n. 6
0
    def sample(self, nSamples=1000, cov=0.4, maxiter=100):

        self.e = korali.Experiment()

        self.nSamples = nSamples

        self.e['Problem']['Type'] = 'Bayesian/Reference'
        self.e['Problem']['Likelihood Model'] = self.likelihoodModel
        self.e['Problem']['Reference Data'] = list(
            map(float, self.data['Model']['y-data']))
        self.e['Problem']['Computational Model'] = self.computational_model

        self.e['Solver']['Type'] = "Sampler/TMCMC"
        self.e['Solver']['Version'] = self.sampler
        self.e['Solver']['Step Size'] = 0.1
        self.e['Solver']['Population Size'] = self.nSamples
        self.e['Solver']['Target Coefficient Of Variation'] = cov
        self.e['Solver']['Termination Criteria']['Max Generations'] = maxiter

        js = self.get_variables_and_distributions()
        self.set_variables_and_distributions(js)

        self.set_korali_output_files(self.saveInfo['korali samples'], maxiter)
        self.e['Console Output']['Verbosity'] = 'Detailed'
        if (self.silent): self.e['Console Output']['Verbosity'] = 'Silent'

        k = korali.Engine()
        k['Conduit']['Type'] = 'Concurrent'
        k['Conduit']['Concurrent Jobs'] = self.nThreads

        k.run(self.e)

        js = {}
        js['Log Evidence'] = self.e['Solver']['LogEvidence']
        printlog(f"Log Evidence = {js['Log Evidence']}")
        save_file(js,
                  self.saveInfo['evidence'],
                  'Log Evidence',
                  fileType='json')

        printlog('Copy variables from Korali to Epidemics...')
        self.parameters = []
        myDatabase = self.e['Results']['Sample Database']
        for j in range(self.nParameters):
            self.parameters.append({})
            self.parameters[j]['Name'] = self.e['Variables'][j]['Name']
            self.parameters[j]['Values'] = np.asarray(
                [myDatabase[k][j] for k in range(self.nSamples)])

        self.has_been_called['sample'] = True
        self.has_been_called['propagate'] = False
        printlog('Done copying variables.')
Esempio n. 7
0
def propagate_uncertainty(args, jskSamples, jsData):
    sir.set_custom_params(reduction=args.reduction,
                          infer_reduction=args.infer_reduction,
                          duration=args.duration,
                          infer_duration=args.infer_duration)
    jsOde = sir.getReferenceData(jsData)

    Ns = jskSamples['Solver']['Population Size']
    Np = len(jskSamples['Samples'][0]['Parameters'])

    db = jskSamples['Results']['Sample Database']
    p = []
    for j in range(Np):
        tmp = []
        for k in range(Ns):
            tmp.append(db[k][j])
        p.append(tmp)

    # days of prediction = days in the data + future days
    T = jsOde['Time'][-1] + args.futureDays
    t = np.linspace(0, T, args.nPoints)
    jsOde['Time'] = t.tolist()

    e = korali.Experiment()

    e['Problem']['Type'] = 'Propagation'
    e['Problem']['Execution Model'] = \
                lambda modelData: sir.model_for_korali_execute(
            modelData, jsOde)

    for i, var in enumerate(sir.params_to_infer):
        e['Variables'][i]['Name'] = var
        e['Variables'][i]['Precomputed Values'] = p[i]

    e['Solver']['Type'] = 'Executor'

    e['File Output'] = tools.fileOutputDefaults(args, '_korali_propagation/')

    if (args.silent):
        e['Console Output']['Verbosity'] = 'Silent'

    e['Store Sample Information'] = True

    k = korali.Engine()

    k['Conduit']['Type'] = 'Concurrent'
    k['Conduit']['Concurrent Jobs'] = args.nThreads

    k.run(e)

    return e
Esempio n. 8
0
    def trainPolicy(self, policy, env, numIterations):
        """Evaluate `numIterations` updating iterations on the given policy for
        the given environment."""

        # Define the objective.
        def objective(p):
            """Update the policy with given parameters, evalute and return the reward."""
            weights = p["Parameters"]
            overwriteNetworkParams(policy.network, weights)

            meanTotalReward = 0
            for ep in range(env.numEpisodesPerEvaluation):  # Each new episode starts here.
                states, actions, policyEvaluations, rewards = env.performOneEpisode(policy)
                meanTotalReward += sum(rewards)
            meanTotalReward /= env.numEpisodesPerEvaluation
            print("meanTotalReward", meanTotalReward)

            p["Evaluation"] = meanTotalReward  # Korali v1.0.1.
            # p["F(x)"] = meanTotalReward        # Korali master branch (?)


        # Create Korali and problem objects.
        k = korali.Engine()
        e = korali.Experiment()

        # Configure the problem.
        e["Problem"]["Type"] = "Evaluation/Direct/Basic"  # Korali v1.0.1.
        # e["Problem"]["Type"] = "Optimization/Stochastic"  # Korali master branch (?)
        e["Problem"]["Objective Function"] = objective

        # Define the problem variables.
        numParams = getNumNetworkParams(policy.network);
        print("Number of policy parameters =", numParams)
        for i in range(numParams):
            e["Variables"][i]["Name"] = "X" + str(i)
            # Initial distribution of population samples.
            e["Variables"][i]["Initial Mean"] = 0.0
            e["Variables"][i]["Initial Standard Deviation"] = self.sigma
            # Bounds are necessary to avoid pathological cases generated by Gaussians.
            e["Variables"][i]["Lower Bound"]  = -100.0
            e["Variables"][i]["Upper Bound"]  = +100.0

        # Configure CMA-ES parameters.
        e["Solver"]["Type"] = "Optimizer/CMAES"
        e["Solver"]["Population Size"] = self.populationSize
        # e["Solver"]["Termination Criteria"]["Min Value Difference Threshold"] = 1e-7
        e["Solver"]["Termination Criteria"]["Max Generations"] = numIterations

        # Run Korali.
        k.run(e)
Esempio n. 9
0
def sample_parameters(args, jsData):
    sir.set_custom_params(reduction=args.reduction,
                          infer_reduction=args.infer_reduction,
                          duration=args.duration,
                          infer_duration=args.infer_duration)
    jsOde = sir.getReferenceData(jsData)

    # Creating new experiment
    e = korali.Experiment()

    # Setting up the reference likelihood for the Bayesian Problem
    e['Problem']['Type'] = 'Bayesian/Reference'
    e['Problem']['Likelihood Model'] = sir.likelihood
    e['Problem']['Reference Data'] = jsOde['Data']
    e['Problem'][
        'Computational Model'] = lambda sampleData: sir.model_for_korali_sample(
            sampleData, jsOde)

    # Configuring TMCMC parameters
    e['Solver']['Type'] = 'TMCMC'
    e['Solver']['Population Size'] = args.nSamples

    for i, var in enumerate(sir.params_to_infer):
        distname = 'Uniform ' + var
        e['Distributions'][i]['Name'] = distname
        e['Distributions'][i]['Type'] = 'Univariate/Uniform'
        minmax = sir.params_prior[var]
        e['Distributions'][i]['Minimum'] = minmax[0]
        e['Distributions'][i]['Maximum'] = minmax[1]
        e['Variables'][i]['Name'] = var
        e['Variables'][i]['Prior Distribution'] = distname

    if (args.silent):
        e['Console Output']['Verbosity'] = 'Silent'
    else:
        e['Console Output']['Verbosity'] = 'Detailed'

    e['File Output'] = tools.fileOutputDefaults(args, '_korali_samples/')
    e['File Output']['Frequency'] = 50
    e["Store Sample Information"] = True

    k = korali.Engine()

    k['Conduit']['Type'] = 'Concurrent'
    k['Conduit']['Concurrent Jobs'] = args.nThreads

    k.run(e)

    return e
Esempio n. 10
0
    def trainPolicy(self, policy, env, numIterations):
        """Evaluate `numIterations` updating iterations on the given policy for
        the given environment."""

        # Define the objective.
        def objective(p):
            """Update the policy with given parameters, evalute and return the reward."""
            weights = p["Parameters"]

            # TODO: Update the network weights with values given by Korali (see overwriteNetworkParams).
            #       Run env.numEpisodesPerEvaluation episodes and compute the mean total reward.
            #
            #
            #
            #
            meanTotalReward = 0


            p["Evaluation"] = meanTotalReward  # Korali v1.0.1.
            # p["F(x)"] = meanTotalReward        # Korali master branch (?)


        # Create Korali and problem objects.
        k = korali.Engine()
        e = korali.Experiment()

        # Configure the problem.
        e["Problem"]["Type"] = "Evaluation/Direct/Basic"  # Korali v1.0.1.
        # e["Problem"]["Type"] = "Optimization/Stochastic"  # Korali master branch (?)
        e["Problem"]["Objective Function"] = objective

        # TODO: Define the problem variables (see getNumNetworkParams).
        #
        #       Don't forget to set all parameter values: initial mean, variance, lower/upper bound!
        #
        #
        #
        #

        # TODO: Set up CMA-ES.
        #
        #
        #
        #

        # Run Korali.
        k.run(e)
Esempio n. 11
0
def run_nested_with_termination_criterion(criterion, value):

    print("[Korali] Prepare Nested run with Termination Criteria "\
            "'{0}'".format(criterion))

    e = korali.Experiment()
    e["Problem"]["Type"] = "Bayesian/Custom"
    e["Problem"]["Likelihood Model"] = evaluateLogLikelihood

    e["Distributions"][0]["Name"] = "Uniform 0"
    e["Distributions"][0]["Type"] = "Univariate/Uniform"
    e["Distributions"][0]["Minimum"] = -10.0
    e["Distributions"][0]["Maximum"] = +10.0

    e["Variables"][0]["Name"] = "X"
    e["Variables"][0]["Prior Distribution"] = "Uniform 0"

    e["Solver"]["Type"] = "Sampler/Nested"
    e["Solver"]["Number Live Points"] = 1500
    e["Solver"]["Batch Size"] = 1
    e["Solver"]["Add Live Points"] = True
    e["Solver"]["Resampling Method"] = "Box"
    e["Solver"]["Termination Criteria"][criterion] = value
    e["File Output"]["Enabled"] = False

    e["Random Seed"] = 1337

    k = korali.Engine()
    k.run(e)

    if (criterion == "Max Generations"):
        assert_value(e["Current Generation"], value)

    elif (criterion == "Target Annealing Exponent"):
        assert_greatereq(e["Solver"]["Annealing Exponent"], value)

    elif (criterion == "Max Effective Sample Size"):
        assert_greatereq(e["Solver"]["Effective Sample Size"], value)

    else:
        print("Termination Criterion not recognized!")
        exit(-1)
Esempio n. 12
0
def run_dea_with_termination_criterion(criterion, value):

    print("[Korali] Prepare DEA run with Termination Criteria "\
            "'{0}'".format(criterion))

    e = korali.Experiment()
    e["Problem"]["Type"] = "Optimization"
    e["Problem"]["Objective Function"] = evaluateModel

    e["Variables"][0]["Name"] = "X"
    e["Variables"][0]["Lower Bound"] = +1.0
    e["Variables"][0]["Upper Bound"] = +10.0

    e["Solver"]["Type"] = "Optimizer/DEA"
    e["Solver"]["Population Size"] = 10
    e["Solver"]["Termination Criteria"][criterion] = value
    e["File Output"]["Enabled"] = False
    e["Random Seed"] = 1337

    k = korali.Engine()
    k.run(e)

    if (criterion == "Max Generations"):
        assert_value(e["Current Generation"], value)

    elif (criterion == "Max Infeasible Resamplings"):
        assert_greatereq(e["Solver"]["Infeasible Sample Count"], value)

    elif (criterion == "Max Value"):
        assert_greatereq(e["Solver"]["Best Ever Value"], value)

    elif (criterion == "Min Value Difference Threshold"):
        previous = e["Solver"]["Previous Best Ever Value"]
        current = e["Solver"]["Best Ever Value"]
        assert_smallereq(previous - current, value)

    else:
        print("Termination Criterion not recognized!")
        exit(-1)
Esempio n. 13
0
def run_tmcmc_with_termination_criterion(criterion, value):

    print("[Korali] Prepare DEA run with Termination Criteria "\
            "'{0}'".format(criterion))

    e = korali.Experiment()
    e["Problem"]["Type"] = "Bayesian/Custom"
    e["Problem"]["Likelihood Model"] = evaluateLogLikelihood

    e["Distributions"][0]["Name"] = "Uniform 0"
    e["Distributions"][0]["Type"] = "Univariate/Uniform"
    e["Distributions"][0]["Minimum"] = -10.0
    e["Distributions"][0]["Maximum"] = +10.0

    e["Variables"][0]["Name"] = "X"
    e["Variables"][0]["Prior Distribution"] = "Uniform 0"

    e["Solver"]["Type"] = "TMCMC"
    e["Solver"]["Population Size"] = 5000
    e["Solver"]["Covariance Scaling"] = 0.001
    e["Solver"]["Termination Criteria"][criterion] = value

    e["Random Seed"] = 1337

    k = korali.Engine()
    k.run(e)

    if (criterion == "Max Generations"):
        assert_value(e["Current Generation"], value)

    elif (criterion == "Target Annealing Exponent"):
        assert_greatereq(e["Solver"]["Annealing Exponent"], value)

    else:
        print("Termination Criterion not recognized!")
        exit(-1)
Esempio n. 14
0
def run_phase_2(phase_1_path, phase_2_path):

    # ---------------------------------------------------------------------------- #
    # ---------------------------------- Setup ----------------------------------- #
    # ---------------------------------------------------------------------------- #

    e = korali.Experiment()
    e["Problem"]["Type"] = "Hierarchical/Psi"
    e["Problem"]["Sub Problems"] = phase_1_path

    # ---------------------------------------------------------------------------- #
    # ------------------------------- Conditionals ------------------------------- #
    # ---------------------------------------------------------------------------- #
    # SIR nbin has 3 parameters: beta, gamma, [r]

    # Parameters of the conditionnal
    e["Variables"][0]["Name"] = "Psi 0"  # R0 mean
    e["Variables"][1]["Name"] = "Psi 1"  # R0 std
    e["Variables"][2]["Name"] = "Psi 2"  # gamma mean
    e["Variables"][3]["Name"] = "Psi 3"  # gamma std
    e["Variables"][4]["Name"] = "Psi 4"  # delta mean
    e["Variables"][5]["Name"] = "Psi 5"  # delta std
    e["Variables"][6]["Name"] = "Psi 6"  # td mean
    e["Variables"][7]["Name"] = "Psi 7"  # td std
    e["Variables"][8]["Name"] = "Psi 8"  # [r] mean
    e["Variables"][9]["Name"] = "Psi 9"  # [r] std

    e["Variables"][0]["Prior Distribution"] = "Uniform 0"  # R0 mean
    e["Variables"][1]["Prior Distribution"] = "Uniform 1"  # R0 std
    e["Variables"][2]["Prior Distribution"] = "Uniform 2"  # gamma mean
    e["Variables"][3]["Prior Distribution"] = "Uniform 3"  # gamma std
    e["Variables"][4]["Prior Distribution"] = "Uniform 4"  # delta mean
    e["Variables"][5]["Prior Distribution"] = "Uniform 5"  # delta std
    e["Variables"][6]["Prior Distribution"] = "Uniform 6"  # td mean
    e["Variables"][7]["Prior Distribution"] = "Uniform 7"  # td std
    e["Variables"][8]["Prior Distribution"] = "Uniform 8"  # [r] mean
    e["Variables"][9]["Prior Distribution"] = "Uniform 9"  # [r] std

    # Contidionals
    e["Problem"]["Conditional Priors"] = [
        "Conditional R0", "Conditional gamma", "Conditional delta",
        "Conditional td", "Conditional [r]"
    ]

    e["Distributions"][0]["Name"] = "Conditional R0"
    e["Distributions"][0]["Type"] = "Univariate/Normal"
    e["Distributions"][0]["Mean"] = "Psi 0"
    e["Distributions"][0]["Standard Deviation"] = "Psi 1"

    e["Distributions"][1]["Name"] = "Conditional gamma"
    e["Distributions"][1]["Type"] = "Univariate/Normal"
    e["Distributions"][1]["Mean"] = "Psi 2"
    e["Distributions"][1]["Standard Deviation"] = "Psi 3"

    e["Distributions"][2]["Name"] = "Conditional delta"
    e["Distributions"][2]["Type"] = "Univariate/Normal"
    e["Distributions"][2]["Mean"] = "Psi 4"
    e["Distributions"][2]["Standard Deviation"] = "Psi 5"

    e["Distributions"][3]["Name"] = "Conditional td"
    e["Distributions"][3]["Type"] = "Univariate/Normal"
    e["Distributions"][3]["Mean"] = "Psi 6"
    e["Distributions"][3]["Standard Deviation"] = "Psi 7"

    e["Distributions"][4]["Name"] = "Conditional [r]"
    e["Distributions"][4]["Type"] = "Univariate/Normal"
    e["Distributions"][4]["Mean"] = "Psi 8"
    e["Distributions"][4]["Standard Deviation"] = "Psi 9"

    # ---------------------------------------------------------------------------- #
    # ---------------------------------- Priors ---------------------------------- #
    # ---------------------------------------------------------------------------- #

    e["Distributions"][5]["Name"] = "Uniform 0"  # R0 mean
    e["Distributions"][5]["Type"] = "Univariate/Uniform"
    e["Distributions"][5]["Minimum"] = 0.5
    e["Distributions"][5]["Maximum"] = 1.5

    e["Distributions"][6]["Name"] = "Uniform 1"  # R0 std
    e["Distributions"][6]["Type"] = "Univariate/Uniform"
    e["Distributions"][6]["Minimum"] = 0.0
    e["Distributions"][6]["Maximum"] = 1

    e["Distributions"][7]["Name"] = "Uniform 2"  # gamma mean
    e["Distributions"][7]["Type"] = "Univariate/Uniform"
    e["Distributions"][7]["Minimum"] = 0.0
    e["Distributions"][7]["Maximum"] = 0.5

    e["Distributions"][8]["Name"] = "Uniform 3"  # gamma std
    e["Distributions"][8]["Type"] = "Univariate/Uniform"
    e["Distributions"][8]["Minimum"] = 0.0
    e["Distributions"][8]["Maximum"] = 0.1

    e["Distributions"][9]["Name"] = "Uniform 4"  # delta mean
    e["Distributions"][9]["Type"] = "Univariate/Uniform"
    e["Distributions"][9]["Minimum"] = 0.0
    e["Distributions"][9]["Maximum"] = 1.0

    e["Distributions"][10]["Name"] = "Uniform 5"  # delta std
    e["Distributions"][10]["Type"] = "Univariate/Uniform"
    e["Distributions"][10]["Minimum"] = 0.0
    e["Distributions"][10]["Maximum"] = 0.5

    e["Distributions"][11]["Name"] = "Uniform 6"  # td mean
    e["Distributions"][11]["Type"] = "Univariate/Uniform"
    e["Distributions"][11]["Minimum"] = 15.
    e["Distributions"][11]["Maximum"] = 30.

    e["Distributions"][12]["Name"] = "Uniform 7"  # td std
    e["Distributions"][12]["Type"] = "Univariate/Uniform"
    e["Distributions"][12]["Minimum"] = 0.0
    e["Distributions"][12]["Maximum"] = 4.0

    e["Distributions"][13]["Name"] = "Uniform 8"  # [r] mean
    e["Distributions"][13]["Type"] = "Univariate/Uniform"
    e["Distributions"][13]["Minimum"] = 2.0
    e["Distributions"][13]["Maximum"] = 10.0

    e["Distributions"][14]["Name"] = "Uniform 9"  # [r] std
    e["Distributions"][14]["Type"] = "Univariate/Uniform"
    e["Distributions"][14]["Minimum"] = 0.0
    e["Distributions"][14]["Maximum"] = 5.

    # ---------------------------------------------------------------------------- #
    # ---------------------------------- Solver ---------------------------------- #
    # ---------------------------------------------------------------------------- #

    e["Solver"]["Type"] = "TMCMC"
    e["Solver"]["Population Size"] = 2000
    e["Solver"]["Default Burn In"] = 3
    e["Solver"]["Target Coefficient Of Variation"] = 0.6
    e["Solver"]["Covariance Scaling"] = 0.01

    e["Console Output"]["Verbosity"] = "Detailed"
    e["File Output"]["Path"] = phase_2_path

    # Starting Korali's Engine and running experiment
    k = korali.Engine()
    k["Conduit"]["Type"] = "Concurrent"
    k["Conduit"]["Concurrent Jobs"] = 12
    k.run(e)
Esempio n. 15
0
#!/usr/bin/env python3

# Importing computational model
import sys
import os
import korali

# Creating hierarchical Bayesian problem from previous two problems
e = korali.Experiment()

e["Problem"]["Type"] = "Hierarchical/Psi"

for i in range(5):
    subProblem = korali.Experiment()
    subProblem.loadState('_setup/results_phase_1/' + str(i).zfill(3) +
                         '/latest')
    e["Problem"]["Sub Experiments"][i] = subProblem

# Add probability of theta given psi, one per subproblem variable.

e["Variables"][0]["Name"] = "Psi 1"
e["Variables"][1]["Name"] = "Psi 2"
e["Variables"][2]["Name"] = "Psi 3"
e["Variables"][3]["Name"] = "Psi 4"

e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Prior Distribution"] = "Uniform 1"
e["Variables"][2]["Prior Distribution"] = "Uniform 2"
e["Variables"][3]["Prior Distribution"] = "Uniform 3"

e["Problem"]["Conditional Priors"] = ["Conditional 0", "Conditional 1"]
Esempio n. 16
0
    def sampleLatent(self, kSample):
        # /*
        #  * probability to sample from:
        #  * p(d, z | theta) * p(theta) -- that's the (log-)posterior
        #  * - use a "Sampling" problem, with our log-posterior as "Likelihood Model" , but with the current values for theta inserted.
        # */

        hyperparameters = kSample["Hyperparameters"]
        numberSamples = kSample["Number Samples"]
        if (kSample["Number Of Latent Variables"] != self.numberLatent):
            raise ValueError("Implementation error, number of latent variables at initialization does not fit to what was passed as variable")


        #  *  Create one sampling experiment to sample all latent variables. After all, the latent vars are correlated /
        #     have a joint distribution .

        k = korali.Engine()
        e = korali.Experiment()

        def probability_function(s):

             latent_vars = s["Parameters"]
             hparams = hyperparameters

             if (len(latent_vars) != self.numberLatent):
                 raise ValueError("Implementation error, latent variable vector had wrong size")

             if (len(hparams) != self.numberHyperparameters):
                 raise ValueError("Implementation error, hyperparameter vector had wrong size")

             s["Latent Variables"] = latent_vars
             s["Hyperparameters"] = hparams

             self.S_func(s)
             self.zeta_func(s)
             self.phi_func(s)

            # -> Assume these are set: sample["S"], sample["zeta"] and sample["phi"]

             _zetaValue = s["zeta"]
             _sValues = s["S"]
             _phiValues = s["phi"]
             logP_of_x = - _zetaValue + np.inner(_sValues,  _phiValues)
             s["P(x)"] = logP_of_x

        # * * * * *  * * * * *  * * * * * probability_function end

         # * Based on tutorial a2-sampling
        e["Problem"]["Type"] = "Sampling"
        e["Problem"]["Probability Function"] = lambda s : probability_function(s)

        for i in range(self.numberLatent):

            # Defining problem's variables
            varName = "latent_" + str(i)

            e["Variables"][i]["Name"] = varName
            e["Variables"][i]["Initial Mean"] = self.previousSampleMeans[i]

            e["Variables"][i]["Initial Standard Deviation"] = 2.0

        # Configuring the MCMC sampler parameters
        e["Solver"]["Type"]  = "MCMC"
        e["Solver"]["Burn In"] = 400
        e["Solver"]["Termination Criteria"]["Max Samples"] = 1000

        # Configuring output settings
        e["File Output"]["Frequency"] = 0
        e["Console Output"]["Frequency"] = 0
        e["Console Output"]["Verbosity"] = "Silent"

        #k["Conduit"]["Type"] = "Concurrent"
        #k["Conduit"]["Concurrent Jobs"] = 2
        k.run(e)
        
        db = e["Solver"]["Sample Database"]
        samples = db[-numberSamples : ] # take samples from the end

        kSample["Samples"] = samples

        # set new "previous sample means"
        for i in range(self.numberLatent):
            self.previousSampleMeans[i] = np.sum(np.array(samples)[:, i]) / float(numberSamples)
Esempio n. 17
0
def main():

    # Initialize the distribution
    distrib1 = ExampleDistribution1()

    distrib1_S = lambda s: distrib1.S(s)
    distrib1_zeta = lambda s: distrib1.zeta(s)
    distrib1_phi = lambda s: distrib1.phi(s)

    d1_numberLatentVars = distrib1._p.nDimensions
    d1_numberHyperparams = 1

    initialSigma = 4
    d1_initialLatentValues = np.random.normal(0, 0.5, (d1_numberLatentVars, ))
    d1_initialHyperparams = np.array([initialSigma])

    gaussian_sampler_obj = MCMCLatentSampler(
        d1_numberLatentVars, d1_numberHyperparams, d1_initialLatentValues,
        d1_initialHyperparams, distrib1_zeta, distrib1_S, distrib1_phi)

    k = korali.Engine()
    e = korali.Experiment()

    e["Problem"]["Type"] = "Bayesian/Latent/ExponentialLatent"
    e["Problem"]["S Of Likelihood Model"] = distrib1_S
    e["Problem"]["Zeta Of Likelihood Model"] = distrib1_zeta
    e["Problem"]["Phi Of Likelihood Model"] = distrib1_phi
    e["Problem"]["S Dimension"] = 1
    e["Problem"][
        "Latent Variable Sampler"] = lambda sample: gaussian_sampler_obj.sampleLatent(
            sample)

    e["Solver"]["Type"] = "SAEM"
    e["Solver"]["Number Samples Per Step"] = 10
    e["Solver"]["Termination Criteria"]["Max Generations"] = 40

    e["Variables"][0]["Name"] = "sigma"
    e["Variables"][0]["Bayesian Type"] = "Hyperparameter"
    e["Variables"][0]["Prior Distribution"] = "Uniform 0"
    e["Variables"][0]["Initial Value"] = 5.0  # Initial hyperparameter value
    e["Variables"][0]["Upper Bound"] = 15
    e["Variables"][0]["Lower Bound"] = 0

    # define a variable for each coordinate of mu
    for i in range(distrib1._p.nDimensions):
        e["Variables"][1 + i]["Name"] = "mu" + str(i)
        e["Variables"][1 + i]["Bayesian Type"] = "Latent"
        e["Variables"][1 + i]["Prior Distribution"] = "Uniform 1"
        e["Variables"][1 + i]["Initial Value"] = 0

    e["Distributions"][0]["Name"] = "Uniform 0"
    e["Distributions"][0]["Type"] = "Univariate/Uniform"
    e["Distributions"][0]["Minimum"] = 0
    e["Distributions"][0]["Maximum"] = 5

    e["Distributions"][1]["Name"] = "Uniform 1"
    e["Distributions"][1]["Type"] = "Univariate/Uniform"
    e["Distributions"][1]["Minimum"] = -5
    e["Distributions"][1]["Maximum"] = 5

    # Configuring results path
    e["File Output"]["Path"] = '_korali_result_saem'
    e["Random Seed"] = 0xC0FFEE

    #k["Conduit"]["Type"] = "Concurrent"
    #k["Conduit"]["Concurrent Jobs"] = 4
    #k["Conduit"]["Type"] = "Distributed"

    k.run(e)
Esempio n. 18
0
def main():
    # Initialize the distribution
    distrib2 = ExampleDistribution2()

    distrib2_S = lambda s: distrib2.S(s)
    distrib2_zeta = lambda s: distrib2.zeta(s)
    distrib2_phi = lambda s: distrib2.phi(s)

    d2_numberLatentVars = distrib2._p.nPoints  # one for each datapoint
    d2_numberHyperparams = distrib2._p.nDimensions * distrib2._p.nClusters + 1

    d2_initialLatentValues = np.random.normal(0.5, 0.5, (d2_numberLatentVars))
    d2_initialHyperparams = np.random.normal(0, 1, (d2_numberHyperparams))

    gaussian_sampler_obj = MultimodalGaussianSampler(distrib2._p.points,
                                                     distrib2._p.nDimensions,
                                                     distrib2._p.nClusters)

    multimodal_gaussian_sampler = lambda s: gaussian_sampler_obj.sampleLatent(s
                                                                              )

    k = korali.Engine()
    e = korali.Experiment()

    e["Problem"]["Type"] = "Bayesian/Latent/ExponentialLatent"
    e["Problem"]["S Of Likelihood Model"] = distrib2_S
    e["Problem"]["Zeta Of Likelihood Model"] = distrib2_zeta
    e["Problem"]["Phi Of Likelihood Model"] = distrib2_phi
    e["Problem"]["S Dimension"] = distrib2.sufficientStatisticsDimension
    e["Problem"]["Latent Variable Sampler"] = multimodal_gaussian_sampler

    e["Solver"]["Type"] = "SAEM"
    e["Solver"]["Number Samples Per Step"] = 100
    e["Solver"]["Termination Criteria"]["Max Generations"] = 50

    e["Distributions"][0]["Name"] = "Uniform 0"
    e["Distributions"][0]["Type"] = "Univariate/Uniform"
    e["Distributions"][0]["Minimum"] = 0
    e["Distributions"][0]["Maximum"] = 5

    e["Distributions"][1]["Name"] = "Uniform 1"
    e["Distributions"][1]["Type"] = "Univariate/Uniform"
    e["Distributions"][1]["Minimum"] = -5
    e["Distributions"][1]["Maximum"] = 5

    e["Distributions"][2]["Name"] = "Multinomial 2"
    e["Distributions"][2]["Type"] = "Specific/Multinomial"

    # Define which hyperparameters we use (all mu, and sigma)
    variable_counter = 0
    for cluster_idx in range(distrib2._p.nClusters):
        for dim in range(distrib2._p.nDimensions):
            e["Variables"][variable_counter]["Name"] = "mu_" + str(
                cluster_idx) + "_" + str(dim)
            e["Variables"][variable_counter][
                "Bayesian Type"] = "Hyperparameter"
            e["Variables"][variable_counter][
                "Prior Distribution"] = "Uniform 1"  # not used (?) but required
            e["Variables"][variable_counter][
                "Initial Value"] = d2_initialHyperparams[variable_counter]
            e["Variables"][variable_counter]["Upper Bound"] = e[
                "Distributions"][1]["Maximum"]
            e["Variables"][variable_counter]["Lower Bound"] = e[
                "Distributions"][1]["Minimum"]

            variable_counter += 1

    e["Variables"][variable_counter]["Name"] = "sigma"
    e["Variables"][variable_counter]["Bayesian Type"] = "Hyperparameter"
    e["Variables"][variable_counter]["Prior Distribution"] = "Uniform 0"
    e["Variables"][variable_counter]["Initial Value"] = 2.0
    e["Variables"][variable_counter]["Upper Bound"] = 5
    e["Variables"][variable_counter]["Lower Bound"] = 0
    variable_counter += 1

    # Latent variables
    latent_counter = 0
    for cluster_idx in range(distrib2._p.nPoints):
        e["Variables"][variable_counter]["Name"] = "cluster_assignment_" + str(
            latent_counter)
        e["Variables"][variable_counter]["Bayesian Type"] = "Latent"
        e["Variables"][variable_counter][
            "Prior Distribution"] = "Multinomial 2"
        e["Variables"][variable_counter][
            "Initial Value"] = d2_initialLatentValues[latent_counter]

        variable_counter += 1
        latent_counter += 1

    # Configuring results path
    e["File Output"]["Path"] = '_korali_result_saem-mixture'
    e["File Output"]["Frequency"] = 50
    e["Console Output"]["Frequency"] = 10
    e["Console Output"]["Verbosity"] = "Detailed"
    e["Random Seed"] = 0xC0FFEE

    #k["Conduit"]["Type"] = "Concurrent"
    #k["Conduit"]["Concurrent Jobs"] = 4
    k.run(e)
Esempio n. 19
0
#!/usr/bin/env python3

# Importing computational model
import sys
import os
import korali
sys.path.append('_setup/model')
from model import *

# Creating hierarchical Bayesian problem from previous two problems
e = korali.Experiment()
theta = korali.Experiment()
psi = korali.Experiment()

theta.loadState('_setup/results_phase_1/000/latest')
psi.loadState('_setup/results_phase_2/latest')

e["Problem"]["Type"] = "Hierarchical/Theta"
e["Problem"]["Theta Experiment"] = theta
e["Problem"]["Psi Experiment"] = psi

e["Solver"]["Type"] = "Sampler/TMCMC"
e["Solver"]["Population Size"] = 1000
e["Solver"]["Termination Criteria"]["Max Generations"] = 30
e["Solver"]["Default Burn In"] = 1
e["Solver"]["Target Coefficient Of Variation"] = 0.6

e["Random Seed"] = 0xC0FFEE
e["Console Output"]["Verbosity"] = "Detailed"
e["File Output"]["Path"] = "_setup/results_phase_3b/"
Esempio n. 20
0
    def propagate(self, nPropagate=1000):

        if not self.has_been_called['sample'] and not self.has_been_called[
                'optimize']:
            abort('[Error] Sample or Optimize before propagation')
            return

        self.e = korali.Experiment()

        self.nPropagate = nPropagate

        self.e['Problem']['Type'] = 'Propagation'
        self.e['Problem'][
            'Execution Model'] = self.computational_model_propagate

        for k in range(self.nParameters):
            self.e['Variables'][k]['Name'] = self.parameters[k]['Name']
            self.e['Variables'][k]['Precomputed Values'] = self.parameters[k][
                'Values'].tolist()

        self.e['Solver']['Type'] = 'Executor'

        self.set_korali_output_files(self.saveInfo['korali propagation'])

        if (self.silent): self.e['Console Output']['Verbosity'] = 'Silent'

        self.e['Store Sample Information'] = True

        k = korali.Engine()

        k.run(self.e)

        propagate_idx = random.sample(range(self.nSamples), nPropagate)

        Nv = self.e['Samples'][0]['Saved Results']['Number of Variables']
        Nt = self.e['Samples'][0]['Saved Results']['Length of Variables']
        varNames = []
        for k in range(Nv):
            varNames.append(
                self.e['Samples'][0]['Saved Results']['Variables'][k]['Name'])

        printlog('Copy variables from Korali to Epidemics...')
        self.propagatedVariables = {}
        for i, x in enumerate(varNames):
            self.propagatedVariables[x] = np.zeros((nPropagate, Nt))
            for k, idx in enumerate(propagate_idx):
                self.propagatedVariables[x][k] = np.asarray(
                    self.e['Samples'][idx]['Saved Results']['Variables'][i]
                    ['Values'])

        varNames = []
        if (self.likelihoodModel == 'Normal'
                or self.likelihoodModel == 'Positive Normal'):
            if self.useInfections:
                varNames.append('Standard Deviation Daily Incidence')
            if self.useDeaths:
                varNames.append('Standard Deviation Daily Deaths')

        elif (self.likelihoodModel == 'StudentT'
              or self.likelihoodModel == 'Positive StudentT'):
            if self.useInfections:
                varNames.append('Degrees Of Freedom Daily Incidence')
            if self.useDeaths:
                varNames.append('Degrees Of Freedom Daily Deaths')

        elif (self.likelihoodModel == 'Poisson'):
            pass

        elif (self.likelihoodModel == 'Geometric'):
            pass

        elif (self.likelihoodModel == 'Negative Binomial'):
            if self.useInfections:
                varNames.append('Dispersion Daily Incidence')
            if self.useDeaths:
                varNames.append('Dispersion Daily Deaths')

        else:
            abort('Likelihood not found in propagate.')

        for varName in varNames:
            self.propagatedVariables[varName] = np.zeros((nPropagate, Nt))
            for k in range(nPropagate):
                self.propagatedVariables[varName][k] = np.asarray(
                    self.e['Samples'][k]['Saved Results'][varName])

        printlog('Done copying variables.')

        # TODO clear variable?
        self.e = korali.Experiment()

        self.has_been_called['propagate'] = True
Esempio n. 21
0
    def sample_knested(self,
                       nLiveSamples=1500,
                       freq=1500,
                       maxiter=1e9,
                       dlogz=0.1,
                       batch=1):

        self.e = korali.Experiment()

        self.e['Problem']['Type'] = 'Bayesian/Reference'
        self.e['Problem']['Likelihood Model'] = self.likelihoodModel
        self.e['Problem']['Reference Data'] = list(
            map(float, self.data['Model']['y-data']))
        self.e['Problem']['Computational Model'] = self.computational_model

        self.e["Solver"]["Type"] = "Sampler/Nested"
        self.e["Solver"]["Resampling Method"] = "Multi Ellipse"
        self.e["Solver"]["Number Live Points"] = nLiveSamples
        self.e["Solver"]["Proposal Update Frequency"] = freq
        self.e["Solver"]["Ellipsoidal Scaling"] = 1.10
        self.e["Solver"]["Batch Size"] = batch

        self.e["Solver"]["Termination Criteria"]["Max Generations"] = maxiter
        self.e["Solver"]["Termination Criteria"][
            "Min Log Evidence Delta"] = dlogz
        self.e["Solver"]["Termination Criteria"][
            "Max Effective Sample Size"] = 25000

        js = self.get_variables_and_distributions()
        self.set_variables_and_distributions(js)

        self.set_korali_output_files(self.saveInfo['korali samples'], maxiter)
        self.e['Console Output']['Verbosity'] = 'Detailed'
        self.e["Console Output"]["Frequency"] = 100

        if (self.silent): self.e['Console Output']['Verbosity'] = 'Silent'

        k = korali.Engine()

        k['Conduit']['Type'] = 'Concurrent'
        k['Conduit']['Concurrent Jobs'] = self.nThreads

        k.run(self.e)

        js = {}
        js['Log Evidence'] = self.e['Solver']['LogEvidence']
        js['Error'] = self.e['Solver']['LogEvidence Var']
        printlog(f"Log Evidence = {js['Log Evidence']}")
        printlog(f"Variance     = {js['Error']}")
        save_file(js,
                  self.saveInfo['evidence'],
                  'Log Evidence',
                  fileType='json')

        printlog('Copy variables from Korali to Epidemics...')

        myDatabase = self.e['Results']['Posterior Sample Database']
        self.nSamples, _ = np.shape(myDatabase)

        self.parameters = []
        for j in range(self.nParameters):
            self.parameters.append({})
            self.parameters[j]['Name'] = self.e['Variables'][j]['Name']
            self.parameters[j]['Values'] = np.asarray(
                [myDatabase[k][j] for k in range(self.nSamples)])

        self.has_been_called['sample'] = True
        self.has_been_called['propagate'] = False
        printlog('Done copying variables.')
Esempio n. 22
0
#!/usr/bin/env python3

import korali
import numpy as np

# load the surrogate
surrogate = korali.Experiment()
found = surrogate.loadState("_korali_result_surrogate/latest")
assert found

def model(sample):
  x = sample["Parameters"][0]
  y = surrogate.getEvaluation([[x]])
  # minus because we maximize
  sample["F(x)"] = -y[0][0]

k = korali.Engine()
e = korali.Experiment()

e['Random Seed'] = 0xC0FFEE
e["Problem"]["Type"] = "Optimization"
e["Problem"]["Objective Function"] = model

e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Lower Bound"] = -1.0
e["Variables"][0]["Upper Bound"] = +1.0

e["Solver"]["Type"] = "Optimizer/CMAES"
e["Solver"]["Population Size"] = 4
e["Solver"]["Termination Criteria"]["Min Value Difference Threshold"] = 1e-15
e["Solver"]["Termination Criteria"]["Max Generations"] = 100
Esempio n. 23
0
    f = open(fileName, "r")
    data = [[float(n) for n in line.split()] for line in f]
    f.close()

    if (lastColumnIsData == True):
        x = [row[:-1] for row in data]
        y = [row[-1] for row in data]
    else:
        x = data
        y = []

    return x, y


x, y = read_matrix_for_gp('data/sincos1d_train.dat')
e0 = korali.Experiment()
e0["Problem"]["Type"] = "Evaluation/GaussianProcess"
e0["Problem"]["Covariance Function"] = "CovSum ( CovSEiso, CovNoise)"
e0["Problem"]["X Data"] = x
e0["Problem"]["Y Data"] = y
e0["Solver"]["Type"] = "Optimizer/Rprop"
e0["Solver"]["Termination Criteria"]["Max Generations"] = 200
e0["Solver"]["Termination Criteria"]["Parameter Relative Tolerance"] = 1e-8
e0["Console"]["Verbosity"] = "Normal"
e0["Console"]["Frequency"] = 10
e0["Results"]["Frequency"] = 100
e0["Results"]["Path"] = "_korali_result_train"

x, y = read_matrix_for_gp('data/sincos1d_test.dat')
e1 = korali.Experiment()
e1["Problem"]["Type"] = "Execution/GaussianProcess"
Esempio n. 24
0
#!/usr/bin/env python3

# Importing computational model
import sys
import os
import korali
sys.path.append('_setup/model')
from model import *

# Creating hierarchical Bayesian problem from previous two problems
e = korali.Experiment()
sub = korali.Experiment()
psi = korali.Experiment()

# Loading previous results
sub.loadState('_setup/results_phase_1/000/latest')
psi.loadState('_setup/results_phase_2/latest')

# We need to redefine the subproblem's computational model
sub["Problem"]["Computational Model"] = lambda d: normal(N,d)

# Specifying reference data
data = getReferenceData("_setup/data/", 0)
N = len(data)
  
e["Problem"]["Type"] = "Hierarchical/Theta"
e["Problem"]["Sub Experiment"] = sub
e["Problem"]["Psi Experiment"] = psi

e["Solver"]["Type"] = "Sampler/TMCMC"
e["Solver"]["Population Size"] = 1000
Esempio n. 25
0
def run_ccmaes(constraint):

    print("[Korali] Prepare CMAES run with Termination Criteria "\
            "'{0}'".format(constraint))

    e = korali.Experiment()

    e["Problem"]["Type"] = "Optimization/Constrained"
    e["Problem"]["Objective Function"] = evaluateModel

    e["Variables"][0]["Name"] = "X"
    e["Variables"][0]["Lower Bound"] = -10.0
    e["Variables"][0]["Upper Bound"] = +10.0

    e["Variables"][1]["Name"] = "Y"
    e["Variables"][1]["Lower Bound"] = -10.0
    e["Variables"][1]["Upper Bound"] = +10.0

    e["Solver"]["Type"] = "CMAES"
    e["Solver"]["Population Size"] = 8
    e["Solver"]["Viability Population Size"] = 2
    e["Solver"]["Termination Criteria"]["Max Generations"] = 100
    e["Solver"]["Is Sigma Bounded"] = 1

    e["Console Output"]["Verbosity"] = "Detailed"
    e["Random Seed"] = 1337

    k = korali.Engine()
    
    if (constraint == "None"):
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-6*1e-10)

    elif (constraint == "Inactive"):
        e["Problem"]["Constraints"] = [ inactive1, inactive2 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-1.8*1e-10)

    elif (constraint == "Active at Max 1"):
        e["Problem"]["Constraints"] = [ activeMax1, activeMax2 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-4.826824e+00)

    elif (constraint == "Active at Max 2"):
        e["Problem"]["Constraints"] = [ activeMax1, activeMax2, activeMax3, activeMax4 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-9.653645e+00)

    elif (constraint == "Inactive at Max 1"):
        e["Problem"]["Constraints"] = [ inactiveMax1, inactiveMax2 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-2.19963e-10)

    elif (constraint == "Inactive at Max 2"):
        e["Problem"]["Constraints"] = [ inactiveMax1, inactiveMax2, inactiveMax3, inactiveMax4 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-4.626392e-10)

    elif (constraint == "Mixed"):
        e["Problem"]["Constraints"] = [ activeMax1, activeMax2, activeMax3, activeMax4, inactiveMax1, inactiveMax2, inactiveMax3, inactiveMax4 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-7.895685e+01)

    elif (constraint == "Stress"):
        e["Problem"]["Constraints"] = [ activeMax1, activeMax2, activeMax3, activeMax4, inactiveMax1, inactiveMax2, inactiveMax3, inactiveMax4, stress1, stress2, stress3, stress4, stress5, stress6, stress7, stress8 ]
        k.run(e)
        assert_greatereq(e["Solver"]["Best Ever Value"],-7.895685e+01)

    else:
        print("Constraint not recognized!")
        exit(-1)
Esempio n. 26
0
#!/usr/bin/env python3

# In this example, we demonstrate how Korali finds values for the
# variables that maximize the objective function, given by a
# user-provided computational model, subject to a set of
# constraints.

# Importing the computational model and constraints
import sys
sys.path.append('model')
from model import *

# Creating new experiment
import korali
e = korali.Experiment()

# Selecting problem type
e["Problem"]["Type"] = "Optimization/Stochastic"
e["Problem"]["Objective Function"] = model

# Creating 10 variables and setting their CMA-ES bounds
for i in range(10) :
  e["Variables"][i]["Name"] = "X" + str(i)
  e["Variables"][i]["Initial Mean"] = 1.0
  e["Variables"][i]["Lower Bound"]  = -19.0
  e["Variables"][i]["Upper Bound"]  = +21.0

# We set some of them as discrete.
e["Variables"][0]["Granularity"] = 1.0
e["Variables"][1]["Granularity"] = 1.0
e["Variables"][3]["Granularity"] = 1.0
Esempio n. 27
0
def run_phase_2(phase_1_paths, phase_2_path, variables):

    # Problem
    e = korali.Experiment()
    e["Problem"]["Type"] = "Hierarchical/Psi"

    for i in range(len(phase_1_paths)):
        print(phase_1_paths[i])
        subProblem = korali.Experiment()
        subProblem.loadState(phase_1_paths[i])
        e["Problem"]["Sub Experiments"][i] = subProblem

    # Define conditionals
    e["Problem"]["Conditional Priors"] = [
        "Conditional " + str(var['name']) for var in variables
    ]

    for i, var in enumerate(variables):

        e["Distributions"][i]["Name"] = "Conditional " + str(var['name'])

        if var['cond_type'] == 'Normal':
            e["Distributions"][i]["Type"] = "Univariate/Normal"
            e["Distributions"][i]["Mean"] = var['name'] + ' Mean'
            e["Distributions"][i]["Standard Deviation"] = var['name'] + ' Std'
        else:
            print('not implemented yet')

    # Define hyperparameters
    distrib_counter = len(variables)
    i = 0
    for var in variables:
        cond_params = [
            ele for ele in list(var.keys())
            if ele not in ['name', 'cond_type']
        ]

        for cond_param in cond_params:
            var_name = var['name'] + ' ' + cond_param
            e["Variables"][i]["Name"] = var_name
            e["Variables"][i]["Prior Distribution"] = 'Uniform ' + var_name

            j = distrib_counter + i  # offset to take into account the prior distributions
            e["Distributions"][j]["Name"] = 'Uniform ' + var_name
            e["Distributions"][j]["Type"] = "Univariate/Uniform"
            e["Distributions"][j]["Minimum"] = var[cond_param][0]
            e["Distributions"][j]["Maximum"] = var[cond_param][1]
            i += 1

    #Solver
    # e["Solver"]["Type"] = "Sampler/TMCMC"
    # e["Solver"]["Population Size"] = 2000
    # e["Solver"]["Default Burn In"] = 3;
    # e["Solver"]["Target Coefficient Of Variation"] = 0.6
    # e["Solver"]["Covariance Scaling"] = 0.01

    e["Solver"]["Type"] = "Sampler/Nested"
    e["Solver"]["Resampling Method"] = "Multi Ellipse"
    e["Solver"]["Number Live Points"] = 1500
    e["Solver"]["Proposal Update Frequency"] = 1500
    e["Solver"]["Ellipsoidal Scaling"] = 1.10
    batch = 12
    e["Solver"]["Batch Size"] = batch

    e["Solver"]["Termination Criteria"]["Max Generations"] = 1e9
    e["Solver"]["Termination Criteria"]["Min Log Evidence Delta"] = 0.1
    e["Solver"]["Termination Criteria"]["Max Effective Sample Size"] = 25000

    e["Console Output"]["Verbosity"] = "Detailed"
    e["File Output"]["Path"] = phase_2_path
    e["File Output"]["Frequency"] = 5000
    create_folder(phase_2_path)

    # Starting Korali's Engine and running experiment
    k = korali.Engine()
    # k["Conduit"]["Type"] = "Concurrent"
    # k["Conduit"]["Concurrent Jobs"] = batch
    # print('Launching Korali')
    k.run(e)