예제 #1
0
    def setExperiment(self, experiment):
        parameters = Parameter.parametersToDict(experiment.parameters)

        parameters_linearly_spaced_vals = []

        # parameter start from the lower bound to higher bound
        for idx, parameter in enumerate(experiment.parameters):
            ncpp = self.num_configs_per_param[idx]
            # step_size = (parameter.maximum - parameter.minimum) / (ncpp - 1)
            if ncpp == 1:
                step_size = 0
            else:
                step_size = (parameter.maximum - parameter.minimum) / (ncpp -
                                                                       1)
            parameter_linearly_spaced_vals = [
                parameter.minimum + (i * step_size) for i in range(ncpp)
            ]
            parameter_linearly_spaced_vals = reversed(
                parameter_linearly_spaced_vals)
            parameters_linearly_spaced_vals.append(
                parameter_linearly_spaced_vals)

        # get cartesian product of configs
        parameter_configs_product = itertools.product(
            *parameters_linearly_spaced_vals)
        # create collections of ParameterConfigs from config values
        for parameter_config_collection in parameter_configs_product:
            parameter_configs = []
            for parameter, value in zip(experiment.parameters,
                                        parameter_config_collection):
                parameter_configs.append(
                    ParameterConfig(parameter=parameter, value=value))
            self.grid_parameter_configs.append(parameter_configs)
예제 #2
0
 def setExperiment(self, experiment):
     """
     This is called by the runner after the experiment is properly initialized
     """
     self.parameters_by_name = {parameter.name: parameter for parameter in experiment.parameters}
     self.optimizer = RandomSearchOptimizer(pbounds=Parameter.parametersToDict(experiment.parameters), random_seed=self.random_seed)
     self.experiment_id = experiment.id
     self.previous_trials = experiment.trials
예제 #3
0
 def dictToExperiment(cls, experiment_dict):
     """Returns dict as Experiment
     Args:
         experiment_dict(dict): dictionary representation of Experiment
     Returns:
         experiment(Experiment): constructed Experiment
     """
     experiment_params = [Parameter(**param) for param in experiment_dict.pop('parameters')]
     if in_production:
         compute = EC2Compute(**experiment_dict.pop('compute'))
     else:
         compute = LocalCompute(**experiment_dict.pop('compute'))
     return Experiment(parameters=experiment_params, compute=compute, **experiment_dict)
예제 #4
0
 def setExperiment(self, experiment):
     """
 This is called by the runner after the experiment is properly initialized
 """
     self.parameters_by_name = {
         parameter.name: parameter
         for parameter in experiment.parameters
     }
     self.optimizer = BayesianOptimization(
         f=None,
         pbounds=Parameter.parametersToDict(experiment.parameters),
         verbose=2,
         random_state=randint(1, 100),
     )
     self.experiment_id = experiment.id
     self.previous_trials = experiment.trials
예제 #5
0
def dictToExperiment(experiment_dict):
    """Returns dict as Experiment
    Args:
        experiment_dict(dict): dictionary representation of Experiment
    Returns:
        experiment(Experiment): constructed Experiment
    """
    experiment_params = [
        Parameter(**param) for param in experiment_dict.pop('parameters')
    ]
    compute_type = experiment_dict['compute']['type']
    if compute_type == 'ec2':
        compute = EC2Compute(**experiment_dict.pop('compute'))
    elif compute_type == 'local':
        compute = LocalCompute(**experiment_dict.pop('compute'))
    elif compute_type == 'PBSPro':
        compute = PBSProCompute(**experiment_dict.pop('compute'))

    return Experiment(parameters=experiment_params,
                      compute=compute,
                      **experiment_dict)
예제 #6
0
    def setExperiment(self, experiment):
        parameters = Parameter.parametersToDict(experiment.parameters)
        ncpp = self.num_configs_per_param
        parameters_linearly_spaced_vals = []
        for parameter in experiment.parameters:
            step_size = (parameter.maximum - parameter.minimum) / (ncpp - 1)
            parameter_linearly_spaced_vals = [
                parameter.minimum + (i * step_size) for i in range(ncpp)
            ]
            parameters_linearly_spaced_vals.append(
                parameter_linearly_spaced_vals)

        # get cartesian product of configs
        parameter_configs_product = itertools.product(
            *parameters_linearly_spaced_vals)
        # create collections of ParameterConfigs from config values
        for parameter_config_collection in parameter_configs_product:
            parameter_configs = []
            for parameter, value in zip(experiment.parameters,
                                        parameter_config_collection):
                parameter_configs.append(
                    ParameterConfig(parameter=parameter, value=value))
            self.grid_parameter_configs.append(parameter_configs)
예제 #7
0
paropt.setConsoleLogger()

# when running on server, the experiment is fetched first before doing anything
# if the experiment isn't found then running the trial fails
command_template_string = """
#! /bin/bash
sleep ${myParam}
sleep ${myParamB}
sleep ${myParamC}
"""

experiment_inst = Experiment(
    tool_name='anothertoolaaa',
    parameters=[
        Parameter(name="myParam",
                  type=PARAMETER_TYPE_INT,
                  minimum=5,
                  maximum=10),
        Parameter(name="myParamB",
                  type=PARAMETER_TYPE_INT,
                  minimum=3,
                  maximum=5),
        Parameter(name="myParamC",
                  type=PARAMETER_TYPE_INT,
                  minimum=3,
                  maximum=5)
    ],
    command_template_string=command_template_string,
    # we use LocalCompute here b/c we don't want to launch jobs on EC2 like the server does
    compute=LocalCompute(max_threads=8))

# when run on the server, this doesn't change - we always connect to an AWS RDS postgres database
예제 #8
0
def setupAWS():
    # launch a small parsl job on AWS to initialize parsl's AWS VPC stuff
    # If run successfully, it will create the awsproviderstate.json file on host in paropt-service/config/
    # Needs to be run each time the AWS credentials are changed for the server
    # Intended to be used with a `docker run ...` command before running production server
    import os

    import paropt
    from paropt.runner import ParslRunner
    from paropt.storage import RelationalDB
    from paropt.optimizer import BayesianOptimizer, GridSearch
    from paropt.runner.parsl import timeCommand
    from paropt.storage.entities import Parameter, PARAMETER_TYPE_INT, Experiment, LocalCompute, EC2Compute

    container_state_file_dir = os.getenv("CONTAINER_STATE_FILE_DIR")
    if not container_state_file_dir:
        raise Exception(
            "Missing required env var CONTAINER_STATE_FILE_DIR which is used for copying awsproviderstate.json to host"
        )

    paropt.setConsoleLogger()

    command_template_string = """
    #! /bin/bash

    sleep ${myParam}
    """

    experiment_inst = Experiment(
        tool_name='tmptool',
        parameters=[
            Parameter(name="myParam",
                      type=PARAMETER_TYPE_INT,
                      minimum=0,
                      maximum=10),
        ],
        command_template_string=command_template_string,
        compute=EC2Compute(
            type='ec2',
            instance_model=
            "c4.large",  # using c5 b/c previously had trouble with t2 spot instances
            instance_family="c4",
            ami=
            "ami-0257427d05c8c18ac"  # parsl base ami - preinstalled apt packages
        ))

    # use an ephemeral database
    storage = RelationalDB(
        'sqlite',
        '',
        '',
        '',
        'tmpSqliteDB',
    )

    # run simple bayes opt
    bayesian_optimizer = BayesianOptimizer(
        n_init=1,
        n_iter=1,
    )

    po = ParslRunner(parsl_app=timeCommand,
                     optimizer=bayesian_optimizer,
                     storage=storage,
                     experiment=experiment_inst,
                     logs_root_dir='/var/log/paropt')

    po.run(debug=True)
    po.cleanup()

    # print result
    print(po.run_result)

    # move the awsproviderstate file into expected directory
    from shutil import copyfile
    copyfile("awsproviderstate.json",
             f'{container_state_file_dir}/awsproviderstate.json')