Beispiel #1
0
    def testmain(self):

        # Same specification as before
        generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [[10, 20, 30]]}
        specifications = SpecificationGenerator().generate(generation_specification)

        output_generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [10, 20, 30]}
        output_specifications = SpecificationGenerator().generate(output_generation_specification)

        name = "test"
        # This time we will run them all in parallel
        runner = ExperimentRunner()
        expr = SimpleExperiment()
        runner.run(name, specifications, expr, specification_runner=MultiprocessingRunner(),
                   use_dashboard=True, propagate_exceptions=True,context_type="spawn")
        log_base = os.path.join("experiment_runs",name,"logs")
        for root, dirs, files in  os.walk(log_base):
            for file in files:
                with open(os.path.join(root,file),"r") as f:
                    lines = f.readlines()
                    self.assertNotEqual([],lines)

        for result in experiment_iterator(name):
            if result["result"] != []:
                output_specifications.remove(result["specification"])
        self.assertEqual([],output_specifications)
Beispiel #2
0
    def testmain(self):

        # Same specification as before
        generation_specification = {
            "seed": [1, 2, 3, 4, 5, 6, 7, 8],
            "num_calls": [[10, 20, 30]]
        }
        specifications = SpecificationGenerator().generate(
            generation_specification)

        output_generation_specification = {
            "seed": [1, 2, 3, 4, 5, 6, 7, 8],
            "num_calls": [10, 20, 30]
        }
        output_specifications = SpecificationGenerator().generate(
            output_generation_specification)

        name = "test"
        # This time we will run them all in parallel
        runner = ExperimentRunner()
        runner.run(name,
                   specifications,
                   SimpleExperiment(),
                   specification_runner=MultiprocessingRunner(),
                   use_dashboard=False,
                   propagate_exceptions=True)
        for result in experiment_iterator(name):
            if result["result"] != []:
                output_specifications.remove(result["specification"])
        self.assertEqual([], output_specifications)
Beispiel #3
0
        "beta_param": 1,
        "epsilon": 10,
        "delta": 0.1,
        "sample_observations": False,
        "use_expected_improvement": False,
        "planning_steps": [200],
        "rollout_allocation_method": ["fixed"],
        "waste_unused_rollouts": [False],
    }

    gen_baseline = base_specs.copy()
    gen_baseline.update({
        "plan_commitment_algorithm": "n_steps",
        "plan_threshold": [1],
    })
    specs_baseline = SpecificationGenerator().generate(gen_baseline)

    gen_ugapec = base_specs.copy()
    gen_ugapec.update({
        "plan_commitment_algorithm": "ugapec",
        # 1000 should allow for more points to added to self.current_traj. This means fewer rollouts
        "plan_threshold": [5, 10],  # [1, 5, 10, 100, 1000],
    })
    specs_ugapec = SpecificationGenerator().generate(gen_ugapec)

    gen_tTest = base_specs.copy()
    gen_tTest.update({
        "plan_commitment_algorithm": "tTest",
        # p-value
        "plan_threshold": [0.05, 0.1],  # [0.0, 0.25, 0.5],
    })
Beispiel #4
0
            raise Exception(
                "Something bad happened, a moth flew into the computer!")
        if self.i >= self.num_calls:
            #Done with the experiment, return the results dictionary like normal
            return {"number": self.r}
        else:
            #This experiment isn't done, return the progress as a tuple to update the dashboard
            return (self.i, self.num_calls)


#Same specification as before
generation_specification = {
    "seed": [1, 2, 3, 4, 5, 6, 7, 8],
    "num_calls": [1, 2, 3]
}
specifications = SpecificationGenerator().generate(generation_specification)

name = "checkpointed_run"
#This time we will run them all in parallel
runner = ExperimentRunner()
runner.run(name,
           specifications,
           SimpleExperiment(),
           specification_runner=MultiprocessingRunner())

#Some of our experiments may have failed, let's call run again to hopefully solve that
runner.run(name,
           specifications,
           SimpleExperiment(),
           specification_runner=MultiprocessingRunner())
Beispiel #5
0
    for root, _, files in os.walk(get_experiment_save_directory("random_number")):
        for fname in files:
            if ".json" in fname:
                with open(os.path.join(root, fname), "r") as f:
                    results = json.load(f)
                    print(results["specification"]["seed"])
                    print(results["result"]["number"])

    from smallab.specification_generator import SpecificationGenerator

    # If you want to run a lot of experiments but not manual write out each one, use the specification generator.
    # Note: This is also JSON serializable, so you could store this in a json file
    generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [1, 2, 3]}

    # Call the generate method. Will create the cross product.
    specifications = SpecificationGenerator().generate(generation_specification)
    print(specifications)

    runner.run("random_number_from_generator", specifications, SimpleExperiment(), continue_from_last_run=True)

    # Read back our results
    for root, _, files in os.walk(get_save_directory("random_number_from_generator")):
        for fname in files:
            if ".pkl" in fname:
                with open(os.path.join(root, fname), "rb") as f:
                    results = dill.load(f)
                    print(results["specification"]["seed"])
                    print(results["result"]["number"])

    # If you have an experiment you want run on a lot of computers you can use the MultiComputerGenerator
    # You assign each computer a number from 0..number_of_computers-1 and it gives each computer every number_of_computerth specification
        "beta_param": 1,
        "epsilon": 10,
        "delta": 0.1,
        "sample_observations": False,
        "use_expected_improvement": False,
        "planning_steps": [num_steps],
    }

    gen_baseline = base_specs.copy()
    gen_baseline.update({
        "plan_commitment_algorithm": "n_steps",
        "plan_threshold": [1],
        "rollout_allocation_method": ["fixed"],
        "waste_unused_rollouts": [False],
    })
    specs_baseline = SpecificationGenerator().generate(gen_baseline)

    gen_our_best = base_specs.copy()
    gen_our_best.update({
        "plan_commitment_algorithm": "tTest",
        "plan_threshold": [0.05],
        "rollout_allocation_method": ["beta-ugapeb"],
        "waste_unused_rollouts": [True],
    })
    specs_our_best = SpecificationGenerator().generate(gen_our_best)

    specifications = []
    specifications += specs_baseline
    specifications += specs_our_best

    print(