예제 #1
0
    def testmain(self):

        # Same specification as before
        generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [[10, 20, 30]]}
        specifications = SpecificationGenerator().generate(generation_specification)

        output_generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [10, 20, 30]}
        output_specifications = SpecificationGenerator().generate(output_generation_specification)

        name = "test"
        # This time we will run them all in parallel
        runner = ExperimentRunner()
        expr = SimpleExperiment()
        runner.run(name, specifications, expr, specification_runner=MultiprocessingRunner(),
                   use_dashboard=True, propagate_exceptions=True,context_type="spawn")
        log_base = os.path.join("experiment_runs",name,"logs")
        for root, dirs, files in  os.walk(log_base):
            for file in files:
                with open(os.path.join(root,file),"r") as f:
                    lines = f.readlines()
                    self.assertNotEqual([],lines)

        for result in experiment_iterator(name):
            if result["result"] != []:
                output_specifications.remove(result["specification"])
        self.assertEqual([],output_specifications)
예제 #2
0
 def test_pickle_serializable_experiment_success(self):
     experiment = PickleOnlySerializableExeperiment()
     runner = ExperimentRunner()
     specification = {"test": "test"}
     runner.run("test", [specification], experiment, specification_runner=MainRunner())
     self.assertIn("run.pkl", os.listdir(get_save_file_directory('test', specification)))
     self.assertIn("specification.json", os.listdir(get_save_file_directory('test', specification)))
예제 #3
0
    def testmain(self):

        # Same specification as before
        generation_specification = {
            "seed": [1, 2, 3, 4, 5, 6, 7, 8],
            "num_calls": [[10, 20, 30]]
        }
        specifications = SpecificationGenerator().generate(
            generation_specification)

        output_generation_specification = {
            "seed": [1, 2, 3, 4, 5, 6, 7, 8],
            "num_calls": [10, 20, 30]
        }
        output_specifications = SpecificationGenerator().generate(
            output_generation_specification)

        name = "test"
        # This time we will run them all in parallel
        runner = ExperimentRunner()
        runner.run(name,
                   specifications,
                   SimpleExperiment(),
                   specification_runner=MultiprocessingRunner(),
                   use_dashboard=False,
                   propagate_exceptions=True)
        for result in experiment_iterator(name):
            if result["result"] != []:
                output_specifications.remove(result["specification"])
        self.assertEqual([], output_specifications)
예제 #4
0
 def test_checkpoint_handler_rotates_checkpoints_properly(self):
     experiment = SerializableExperimentFailsAfter4Steps()
     runner = ExperimentRunner()
     specification = {"test": "test"}
     runner.run("test", [specification], experiment, specification_runner=MainRunner())
     self.assertEqual(3, len(os.listdir(get_partial_save_directory("test", specification))))
     partial_experiment = CheckpointedExperimentHandler().load_most_recent("test", specification)
     self.assertEqual(partial_experiment.j, 3)
예제 #5
0
    })
    specs_tree_count = SpecificationGenerator().generate(gen_tree_count)

    specifications = []
    specifications += specs_baseline
    specifications += specs_ugapec
    specifications += specs_tTest

    print(
        f"Expt {name}:\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds"
    )
    for spec in specifications:
        if spec["seed"] == 0:
            print(spec)

    runner = ExperimentRunner()
    map_memory(base_specs["file"], base_specs["state_space_dimensionality"])
    DEBUG = False

    if DEBUG:
        runner.run(name,
                   specifications,
                   PlanningExperiment(),
                   propagate_exceptions=True,
                   specification_runner=MainRunner(),
                   use_dashboard=False,
                   force_pickle=True,
                   context_type="fork")
    else:
        gpus = 4
        jobs_per_gpu = 2
예제 #6
0
        if self.i >= self.num_calls:
            #Done with the experiment, return the results dictionary like normal
            return {"number": self.r}
        else:
            #This experiment isn't done, return the progress as a tuple to update the dashboard
            return (self.i, self.num_calls)


#Same specification as before
generation_specification = {
    "seed": [1, 2, 3, 4, 5, 6, 7, 8],
    "num_calls": [1, 2, 3]
}
specifications = SpecificationGenerator().generate(generation_specification)

name = "checkpointed_run"
#This time we will run them all in parallel
runner = ExperimentRunner()
runner.run(name,
           specifications,
           SimpleExperiment(),
           specification_runner=MultiprocessingRunner())

#Some of our experiments may have failed, let's call run again to hopefully solve that
runner.run(name,
           specifications,
           SimpleExperiment(),
           specification_runner=MultiprocessingRunner())

#Cleanup example
delete_experiments_folder(name)
예제 #7
0
from examples.example_utils import delete_experiments_folder
from smallab.experiment_types.experiment import Experiment
from smallab.runner.runner import ExperimentRunner
from smallab.specification_generator import SpecificationGenerator


# Same experiment as before
class SimpleExperiment(Experiment):
    def main(self, specification: typing.Dict) -> typing.Dict:
        random.seed(specification["seed"])
        for i in range(specification["num_calls"]):
            random.random()
        return {"number": random.random()}


# In the generation specification keys that have lists as their values will be cross producted with other list valued keys to create many specifications
# in this instance there will be 8 * 3 = 24 specifications
generation_specification = {
    "seed": [1, 2, 3, 4, 5, 6, 7, 8],
    "num_calls": [1, 2, 3]
}

# Call the generate method. Will create the cross product.
specifications = SpecificationGenerator().generate(generation_specification)
print(specifications)

name = "specification_generation_experiment"
runner = ExperimentRunner()
runner.run(name, specifications, SimpleExperiment())

delete_experiments_folder(name)
예제 #8
0

# Write a simple experiment
from smallab.runner.runner import ExperimentRunner


class SimpleExperiment(Experiment):
    # Need to implement this method, will be passed the specification
    # Return a dictionary of results
    def main(self, specification: typing.Dict) -> typing.Dict:
        random.seed(specification["seed"])
        for i in range(specification["num_calls"]):  # Advance the random number generator some amount
            random.random()
        #return the random number. This, along with the specification that generated it will be saved
        return {"number": random.random()}

#The name describes what experiment your doing
name = "simple_experiment1"
#The specifications are a list of dictionaries that will get passed to your experiment instance
specifications = [{"seed":1,"num_calls":1},{"seed":2,"num_calls":2}]
#This object is the code to run your experiment, an instance of the Experiment class
experiment_instance = SimpleExperiment()

#The experiment runner handles cloning your experiment instance and giving it a specification to run with
runner = ExperimentRunner()
runner.run(name,specifications,experiment_instance)

#If I run it again, nothing will happen because smallab knows those experiments succesfully completed
runner.run(name,specifications,experiment_instance)
delete_experiments_folder(name)
from examples.example_utils import delete_experiments_folder
from smallab.experiment_types.experiment import Experiment

#This is another simple experiment, this time with logging involved!
from smallab.runner.runner import ExperimentRunner


class SimpleExperiment(Experiment):
    def main(self, specification: typing.Dict) -> typing.Dict:
        #Get logger name is set per experiment, all logging here will go to a seperate file with the experiment name and go to a main log as well
        logging.getLogger(self.get_logger_name()).info("Doing work!")
        random.seed(specification["seed"])
        for i in range(specification["num_calls"]
                       ):  # Advance the random number generator some amount
            random.random()
        return {"number": random.random()}


name = "simple_experiment2"
runner = ExperimentRunner()
runner.run(name, [{
    "seed": 1,
    "num_calls": 1
}, {
    "seed": 2,
    "num_calls": 2
}], SimpleExperiment())

delete_experiments_folder(name)
            specification["num_calls"] = self.i
            result = {"r": self.r}
            progress = self.i
            max_iterations = self.num_calls
            return OverlappingOutputCheckpointedExperimentReturnValue(should_continue, specification, result, progress,
                                                                      max_iterations)
        else:
            # This experiment isn't done, return the progress as a tuple to update the dashboard
            return (self.i, self.num_calls)
    #Tells the dashboard how many iterations this experiment will run for
    def max_iterations(self,specification):
        return specification["num_calls"]


# Same specification as before
generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": (10, 20, 30)}
specifications = SpecificationGenerator().generate(generation_specification)

name = "overlapping_checkpointed_run"
# This time we will run them all in parallel
runner = ExperimentRunner()
runner.run(name, specifications, SimpleExperiment(), specification_runner=MultiprocessingRunner(), use_dashboard=False,
           propagate_exceptions=True)

# Some of our experiments may have failed, let's call run again to hopefully solve that
runner.run(name, specifications, SimpleExperiment(), specification_runner=MultiprocessingRunner(), use_dashboard=False,
           propagate_exceptions=True)

# Cleanup example
delete_experiments_folder(name)
예제 #11
0
# Write a simple experiment
from smallab.runner.runner import ExperimentRunner


class SimpleExperiment(Experiment):
    # Need to implement this method, will be passed the specification
    # Return a dictionary of results
    def main(self, specification: typing.Dict) -> typing.Dict:
        random.seed(specification["seed"])
        for i in range(specification["num_calls"]):  # Advance the random number generator some amount

            random.random()
        #return the random number. This, along with the specification that generated it will be saved
        return {"number": random.random()}

#The name describes what experiment your doing
name = "simple_experiment1"
#The specifications are a list of dictionaries that will get passed to your experiment instance
specifications = [{"seed":1,"num_calls":1},{"seed":2,"num_calls":2}]
#This object is the code to run your experiment, an instance of the Experiment class
experiment_instance = SimpleExperiment()

#The experiment runner handles cloning your experiment instance and giving it a specification to run with
runner = ExperimentRunner()
runner.run(name,specifications,experiment_instance,use_dashboard=False)

#If I run it again, nothing will happen because smallab knows those experiments succesfully completed
runner.run(name,specifications,experiment_instance,use_dashboard=False)
delete_experiments_folder(name)
예제 #12
0
 def test_with_runner(self):
     experiment = SerializableExperiment()
     runner = ExperimentRunner()
     specification = {"test": "test"}
     runner.run("test", [specification], experiment, specification_runner=MainRunner())
     self.assertEqual(1, len(os.listdir(get_save_file_directory('test', specification))))
예제 #13
0
 def test_un_serializable_experiment_failure(self):
     experiment = UnserializableExperiment()
     runner = ExperimentRunner()
     specification = {"test": "test"}
     runner.run("test", [specification], experiment, specification_runner=MainRunner())
     self.assertEqual(0, len(os.listdir(get_save_file_directory('test', specification, runner.diff_namer))))
예제 #14
0
# Same experiment as before
class SimpleExperiment(Experiment):
    def main(self, specification: typing.Dict) -> typing.Dict:
        random.seed(specification["seed"])
        for i in range(specification["num_calls"]):
            logging.getLogger(self.get_logger_name()).info("...")
            random.random()
        return {"number": random.random()}


# In the generation specification keys that have lists as their values will be cross producted with other list valued keys to create many specifications
# in this instance there will be 8 * 3 = 24 specifications
generation_specification = {
    "seed": list(range(100)),
    "num_calls": [100, 200, 300]
}

# Call the generate method. Will create the cross product.
specifications = SpecificationGenerator().generate(generation_specification)
print(specifications)

name = "specification_generation_experiment"
runner = ExperimentRunner()
runner.run(name,
           specifications,
           SimpleExperiment(),
           specification_runner=MultiprocessingRunner(),
           use_dashboard=False,
           use_diff_namer=False)

delete_experiments_folder(name)
        visualizer.tSNE(prefix_str+'tsne.png')
        logging.getLogger(self.get_logger_name()).info("Visualization complete.")

        return {"discriminative score mean": results[0], "predictive score mean": results[2]}

    def get_hash(self):
        return self.get_logger_name()


# In the generation specification keys that have lists as their values will be cross producted with other list valued keys to create many specifications
# in this instance there will be 8 * 3 = 24 specifications
generation_specification = {  # just trying out random values for testing
    "total_iterations": [1], 
    "sub_iterations": [2],
    "data_size": [300],
    "max_seq_length": [12],
    "iterations": [10001],
    "batch_size": [128],
    # "module_name": ['gru', 'lstm', 'lstmLN']
}

# Call the generate method. Will create the cross product.
specifications = SpecificationGenerator().generate(generation_specification)
print(specifications)

expt = TsganExperiment()
name = "tsgan_unseen_metrics" #+expt.get_hash()
runner = ExperimentRunner()
runner.run(name, specifications, expt, specification_runner=MainRunner(), propagate_exceptions=True)