Exemple #1
0
    def test_init_from_example_configs(self, tmpdir):
        current_directory = os.path.dirname(os.path.realpath(__file__))
        example_conf_path = os.path.join(current_directory, "..",
                                         "configurations")
        for conf_name in os.listdir(example_conf_path):
            print("Next configuration: " + conf_name)
            path = os.path.join(example_conf_path, conf_name)
            if os.path.isdir(path):
                print("Skipping, because its a directory")
                continue
            if conf_name == "temp.json":
                print("Skipping, because its a temp file")
                continue

            if "design" in conf_name:
                with open(path, "r") as read_file:
                    design_space = json.load(read_file)
                c = ConfigReader.config_from_dict(
                    sample_from_design_space(design_space))
            else:
                c = ConfigReader.config_from_file(path)

            if c.environment in ["ReacherMemory-v0"]:
                print("Skipping, because its a Mujoco environment")
                continue

            Experiment(configuration=c,
                       result_path=tmpdir,
                       from_checkpoint=None,
                       processing_framework="mp")
Exemple #2
0
    def test_run_procgen(self, tmpdir, config, mu_lambda_es_config):
        brainCfg = evolve(config.brain,
                          v_mask="logarithmic",
                          v_mask_param=128,
                          w_mask="logarithmic",
                          w_mask_param=128,
                          t_mask="logarithmic",
                          t_mask_param=128)

        # need custom eprunner-config, because procgen experiments need explicit config
        episode_runner_cfg = evolve(
            config.episode_runner,
            environment_attributes=ProcGenEnvAttributesCfg(type="ProcGenAttr"))

        # need to use mu_lambda_es_config, because genome is too large for CMA_ES
        config = evolve(config,
                        environment="procgen:procgen-heist-v0",
                        brain=brainCfg,
                        optimizer=mu_lambda_es_config,
                        episode_runner=episode_runner_cfg)

        exp = Experiment(configuration=config,
                         result_path=tmpdir,
                         from_checkpoint=None,
                         processing_framework="mp")
        exp.run()
Exemple #3
0
    def test_run(self, tmpdir, config):
        experiment_dask = Experiment(configuration=config,
                                     result_path=tmpdir.mkdir("dask"),
                                     from_checkpoint=None,
                                     processing_framework="dask")
        experiment_dask.run()

        # update the expected results when it changed intentionally
        # when you do, don't forget to repeat an experiment you know will yield good results to make
        # sure nothing broke when you changed the underlying algorithm

        # Note that these versions depend on whether one uses a virtualenv created with the standard Python virtualenv
        # tool package or with Anaconda. Also this value changes when standard NumPy or NumPy + MKL is used.
        # I don't think this list is exhausted, expand or change accordingly if necessary.
        # Also the GitHub Action Runner has a different result than the standard Python virtualenv for some reason
        accepted_results = [
            # outdated: -103.4065390603272,   Python 3.8 + conda 4.9.2 + standard NumPy
            # outdated: -102.16727461334207,  # Python 3.8 + conda 4.9.2 + NumPy + MKL
            -96.44321002933644,  # Python 3.8 + Python virtualenv + standard NumPy updated after 2021-02-07
            -100.79614768185776,  # Python 3.8 in self-hosted GitHub Action Runner
            -110.90104247303623,  # Python 3.8 in shared GitHub Action Runner
        ]

        assert experiment_dask.result_handler.result_log.chapters["fitness"][
            -1]["max"] in accepted_results

        experiment_mp = Experiment(configuration=config,
                                   result_path=tmpdir.mkdir("mp"),
                                   from_checkpoint=None,
                                   processing_framework="mp")
        experiment_mp.run()

        experiment_sequential = Experiment(
            configuration=config,
            result_path=tmpdir.mkdir("sequential"),
            from_checkpoint=None,
            processing_framework="sequential")
        experiment_sequential.run()

        assert (experiment_dask.result_handler.result_log.chapters["fitness"]
                [-1]["max"] == experiment_mp.result_handler.result_log.
                chapters["fitness"][-1]["max"])

        assert (experiment_dask.result_handler.result_log.chapters["fitness"]
                [-1]["max"] == experiment_sequential.result_handler.result_log.
                chapters["fitness"][-1]["max"])
Exemple #4
0
    def test_cnn_init_exp(self, tmpdir):
        config_location = os.path.join(os.getcwd(),
                                       "../configurations/cnn_ctrnn.json")

        config = ConfigReader.config_from_file(config_location)
        Experiment(configuration=config,
                   result_path=tmpdir,
                   from_checkpoint=None,
                   processing_framework="dask")
Exemple #5
0
    def test_basic_init(self, tmpdir):
        config_location = os.path.join(os.getcwd(),
                                       "tests/basic_test_config.json")
        config = ConfigReader.config_from_file(config_location)

        assert config.brain.number_neurons == 2
        Experiment(configuration=config,
                   result_path=tmpdir,
                   from_checkpoint=None,
                   processing_framework="dask")
Exemple #6
0
    def test_run(self, tmpdir, config):
        experiment_dask = Experiment(configuration=config,
                                     result_path=tmpdir.mkdir("dask"),
                                     from_checkpoint=None,
                                     processing_framework="dask")
        experiment_dask.run()

        # update the expected results when it changed intentionally
        # when you do, don't forget to repeat an experiment you know will yield good results to make
        # sure nothing broke when you changed the underlying algorithm

        # note; this value depends on the machine
        accepted_results = [
            -99.11361202453168,  # result on bjoern's notebook
            -98.95448135483025,  # result on bjoern's desktop
            -92.24354731262838,  # result on Patrick's notebook
            -116.79799970080285,  # result on Github Action Public Runner
            -99.78831700269642  # result on se-catalpa
        ]

        assert experiment_dask.result_handler.result_log.chapters["fitness"][
            -1]["max"] in accepted_results

        experiment_mp = Experiment(configuration=config,
                                   result_path=tmpdir.mkdir("mp"),
                                   from_checkpoint=None,
                                   processing_framework="mp")
        experiment_mp.run()

        experiment_sequential = Experiment(
            configuration=config,
            result_path=tmpdir.mkdir("sequential"),
            from_checkpoint=None,
            processing_framework="sequential")
        experiment_sequential.run()

        assert (experiment_dask.result_handler.result_log.chapters["fitness"]
                [-1]["max"] == experiment_mp.result_handler.result_log.
                chapters["fitness"][-1]["max"])

        assert (experiment_dask.result_handler.result_log.chapters["fitness"]
                [-1]["max"] == experiment_sequential.result_handler.result_log.
                chapters["fitness"][-1]["max"])
Exemple #7
0
    def test_run_procgen(self, tmpdir, config, mu_lambda_es_config):
        brainCfg = evolve(config.brain,
                          v_mask='logarithmic',
                          v_mask_param=128,
                          w_mask='logarithmic',
                          w_mask_param=128,
                          t_mask='logarithmic',
                          t_mask_param=128)
        # need to use mu_lambda_es_config, because genome is too large for CMA_ES
        config = evolve(config,
                        environment='procgen:procgen-heist-v0',
                        brain=brainCfg,
                        optimizer=mu_lambda_es_config)

        exp = Experiment(configuration=config,
                         result_path=tmpdir,
                         from_checkpoint=None,
                         processing_framework="mp")
        exp.run()
Exemple #8
0
from brain_visualizer.brain_visualizer import BrainVisualizerHandler

from tools.configurations import ExperimentCfg, ContinuousTimeRNNCfg, StandardEpisodeRunnerCfg
from tools.helper import config_from_file
import os
import threading
from brains.continuous_time_rnn import ContinuousTimeRNN
from attr import s
import numpy as np
from tools.helper import transform

cfg_path = os.path.join('configurations', 'reverse_fixed.json')
cfg_exp = config_from_file(cfg_path)

experiment = Experiment(configuration=cfg_exp,
                        result_path="",
                        from_checkpoint=None)


@s(auto_attribs=True, frozen=True, slots=True)
class BrainParam:
    V: np.ndarray
    W: np.ndarray
    T: np.ndarray
    y0: np.ndarray
    clip_min: np.ndarray
    clip_max: np.ndarray


def param_to_genom(param):
    return np.concatenate([
        # complex type and default
        self.add_argument("--result_path",
                          type=os.path.abspath,
                          default=os.path.join(
                              "..", "CTRNN_Simulation_Results", "data",
                              datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
        return self


if __name__ == "__main__":  # pragma: no cover
    """Everything outside this block will be executed by every worker-thread, while this block is only run on the 
    main thread. Every object that is later passed to a worker must be pickle-able, that's why we 
    initialise everything that is not pickle-able before this point. Especially the DEAP-toolbox's creator-object is not 
    pickle-able. 
    """
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    args = TrainArgs(underscores_to_dashes=True).parse_args()

    experiment = Experiment(configuration=ConfigReader.config_from_file(
        args.configuration),
                            result_path=args.result_path,
                            from_checkpoint=args.from_checkpoint,
                            processing_framework=args.processing_framework,
                            number_of_workers=args.num_workers,
                            reset_hof=args.reset_hof,
                            write_final_checkpoint=args.write_final_checkpoint)

    os.mkdir(args.result_path)
    experiment.run()
Exemple #10
0
 def test_run_atari_setup(self, tmpdir, config):
     config = evolve(config, environment="Qbert-ram-v0")
     Experiment(configuration=config,
                result_path=tmpdir,
                from_checkpoint=None,
                processing_framework="mp")
Exemple #11
0
def main():

    ########################
    # Define a Project
    ########################

    # project id
    project_id = config['projects']['panoptes_id']

    # project classes
    project_classes = cfg_model['classes']

    # create Project object
    project = Project(name=str(project_id),
                      panoptes_id=project_id,
                      classes=project_classes,
                      cfg_path=cfg_path,
                      config=config)

    # create Subject Sets
    project.createSubjectSet(mode=cfg_model['subject_mode'])

    ########################
    # Define Experiment
    ########################

    # map classes as specified by the current experiment,
    # choose from a mapping defined in create_class_mappings() function
    class_mapper = create_class_mappings(cfg_model['class_mapping'])

    # experiment object
    exp = Experiment(name=cfg_model['experiment_id'],
                     project=project,
                     class_mapper=class_mapper,
                     train_size=cfg_model['train_size'],
                     test_size=cfg_model['test_size'],
                     equal_class_sizes=bool(cfg_model['balanced_classes']),
                     random_state=cfg_model['random_seed'],
                     max_labels_per_subject=1)

    # create separate directories with image data for this experiment
    # use only links to original images to save space
    exp.createExpDataSet(link_only=bool(eval(config['general']['link_only'])),
                         clear_old_files=False,
                         splits=cfg_model['experiment_data'],
                         split_mode=cfg_model['split_mode'])

    ########################
    # Define Model
    ########################

    # create model object
    model = Model(train_set=exp.train_set,
                  test_set=exp.test_set,
                  val_set=exp.val_set,
                  mod_file=cfg_model['model_file'],
                  pre_processing=cfg_model['pre_processing'],
                  config=config,
                  cfg_path=cfg_path,
                  callbacks=cfg_model['callbacks'],
                  optimizer=cfg_model['optimizer'],
                  num_classes=len(set(class_mapper.values())))

    ########################
    # Train Model
    ########################

    # add model to experiment
    exp.addModel(model)

    # prepare / initialize model
    exp.prep_model()

    # train model
    exp.train()

    ########################
    # Evaluate Model
    ########################

    exp.evaluate()

    logging.info("Finished")
Exemple #12
0
        return self


args = RenderArgs(underscores_to_dashes=True).parse_args()
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
try:
    with open(os.path.join(args.dir, "Log.pkl"), "rb") as read_file_log:
        log = pickle.load(read_file_log)
except:
    with open(os.path.join(args.dir, "Log.json"), "r") as read_file_log:
        log = json.load(read_file_log)

config = ConfigReader.config_from_file(os.path.join(args.dir, "Configuration.json"))

experiment = Experiment(configuration=config,
                        result_path="/tmp/not-used",
                        from_checkpoint=None, processing_framework='sequential')

with open(os.path.join(args.dir, "HallOfFame.pickle"), "rb") as read_file_hof:
    # creator is needed to unpickle HOF
    # creator is registered when loading experiment
    try:
        hall_of_fame = pickle.load(read_file_hof)
    except AttributeError:
        # workaround to render experiments that were created before this PR was merged:
        # https://github.com/neuroevolution-ai/NeuroEvolution-CTRNN_new/pull/48
        # feel free to remove this workaround when experiments from before february 2021 are no longer relevant
        creator.create("FitnessMax", base.Fitness, weights=(1.0,))
        creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax)
        hall_of_fame = pickle.load(read_file_hof)
Exemple #13
0
        type=int,
        default=os.cpu_count(),
        help="Specify the amount of workers for the computation")
    parser.add_argument(
        "--checkpoint-to-result",
        default=False,
        action='store_true',
        help="Should the last checkpoint be stored in the result directory?")

    return parser.parse_args(args)


if __name__ == "__main__":  # pragma: no cover
    """Everything outside this block will be executed by every worker-thread, while this block is only run on the 
    main thread. Every object that is later passed to a worker must be pickle-able, that's why we 
    initialise everything that is not pickle-able before this point. Especially the DEAP-toolbox's creator-object is not 
    pickle-able. 
    """
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    args = parse_args()
    experiment = Experiment(configuration=ConfigReader.config_from_file(
        args.configuration),
                            result_path=args.result_path,
                            from_checkpoint=args.from_checkpoint,
                            processing_framework=args.processing_framework,
                            number_of_workers=args.num_workers,
                            checkpoint_to_result=args.checkpoint_to_result)

    os.mkdir(args.result_path)
    experiment.run()