def test_bo_config(self): config_dict = { "kind": "bo", "metric": OptimizationMetricConfig( name="loss", optimization=Optimization.MINIMIZE).to_dict(), "n_initial_trials": 2, "n_iterations": 19, "utility_function": { "acquisition_function": AcquisitionFunctions.UCB, "kappa": 1.2, "gaussian_process": { "kernel": GaussianProcessesKernels.MATERN, "length_scale": 1.0, "nu": 1.9, "n_restarts_optimizer": 2, }, }, "matrix": { "lr": { "kind": "choice", "value": [[0.1], [0.9]] } }, } config = BOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_hyperband_config(self): config_dict = { 'max_iter': 10, 'eta': 3, 'resource': {'name': 'steps', 'type': 'int'}, 'resume': False, 'metric': SearchMetricConfig(name='loss', optimization=Optimization.MINIMIZE).to_dict() } config = HyperbandConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) # Raises for negative values config_dict['max_iter'] = 0 with self.assertRaises(ValidationError): HyperbandConfig.from_dict(config_dict) config_dict['max_iter'] = -0.5 with self.assertRaises(ValidationError): HyperbandConfig.from_dict(config_dict) config_dict['max_iter'] = 3 # Add n_experiments percent config_dict['eta'] = -0.5 with self.assertRaises(ValidationError): HyperbandConfig.from_dict(config_dict) config_dict['eta'] = 2.9 config = HyperbandConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_logging_config(self): config_dict = { 'level': 'INFO', 'formatter': None, } config = LoggingConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_global_step_wait_hook_config(self): config_dict = { 'wait_until_step': 10 } config = GlobalStepWaiterHookConfig.from_dict(config_dict) config_to_dict = config.to_dict() assert_equal_dict(config_to_dict, config_dict)
def test_param_config_with_value(self): config_dict = {"value": "string_value"} config = V1Param.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) assert config.is_literal is True assert config.is_ref is False assert config.is_search is False config_dict = {"value": 234} config = V1Param.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) assert config.is_literal is True assert config.is_ref is False assert config.is_search is False config_dict = {"value": 23.4} config = V1Param.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) assert config.is_literal is True assert config.is_ref is False assert config.is_search is False config_dict = {"value": {"key": "value"}} config = V1Param.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) assert config.is_literal is True assert config.is_ref is False assert config.is_search is False config_dict = {"value": ["value1", "value2"]} config = V1Param.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) assert config.is_literal is True assert config.is_ref is False assert config.is_search is False
def test_final_ops_hook_config(self): config_dict = { 'final_ops': ['loss', 'precision'] } config = FinalOpsHookConfig.from_dict(config_dict) config_to_dict = config.to_dict() assert_equal_dict(config_to_dict, config_dict)
def test_hyperband_config(self): config_dict = { "kind": "hyperband", "maxIterations": 10, "eta": 3, "resource": {"name": "steps", "type": "int"}, "resume": False, "metric": V1OptimizationMetric( name="loss", optimization=Optimization.MINIMIZE ).to_dict(), "params": {"lr": {"kind": "choice", "value": [[0.1], [0.9]]}}, } config = V1Hyperband.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) # Raises for negative values config_dict["maxIterations"] = 0 with self.assertRaises(ValidationError): V1Hyperband.from_dict(config_dict) config_dict["maxIterations"] = -0.5 with self.assertRaises(ValidationError): V1Hyperband.from_dict(config_dict) config_dict["maxIterations"] = 3 # Add numRuns percent config_dict["eta"] = -0.5 with self.assertRaises(ValidationError): V1Hyperband.from_dict(config_dict) config_dict["eta"] = 2.9 config = V1Hyperband.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_io_config_types(self): config_dict = { "name": "input1", "description": "some text", "type": IOTypes.INT, } config = IOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) expected_repr = OrderedDict((("name", "input1"), ("type", "int"), ("value", 3))) assert config.get_repr_from_value(3) == expected_repr assert config.get_repr() == OrderedDict((("name", "input1"), ("type", "int"))) config_dict = { "name": "input1", "description": "some text", "type": IOTypes.S3_PATH, } config = IOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) expected_repr = OrderedDict( (("name", "input1"), ("type", IOTypes.S3_PATH), ("value", "s3://foo")) ) assert config.get_repr_from_value("s3://foo") == expected_repr assert config.get_repr() == OrderedDict( (("name", "input1"), ("type", IOTypes.S3_PATH)) )
def test_failure_early_stopping_with_truncation_policy(self): config_dict = { "kind": "failure_early_stopping", "percent": 0.3, } config = V1FailureEarlyStopping.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_latent_bridge_config(self): config_dict = { 'size': 500, 'batch_size': 500, } config = BatchMemoryConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_container_resource_config(self): config_dict = {"limits": {"cpu": 0.1}} config = ResourceRequirementsConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) config_dict = {"requests": {"cpu": 0.1}} config = ResourceRequirementsConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) config_dict = {"requests": {"cpu": 0.1}, "limits": {"cpu": 0.1}} config = ResourceRequirementsConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) config_dict = { "requests": {"cpu": 0.1, "memory": "10mi"}, "limits": {"cpu": 0.1, "memory": 1024}, } config = ResourceRequirementsConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) config_dict = { "requests": {"cpu": 0.1, "memory": "10Mi", "amd.com/gpu": 2}, "limits": {"cpu": 0.1, "memory": 1024, "amd.com/gpu": 2}, } config = ResourceRequirementsConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_iterative_config(self): config_dict = { "kind": "iterative", "maxIterations": 10, "container": { "image": "my-matrix" }, } config = V1Iterative.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) # Raises for negative values config_dict["maxIterations"] = -5 with self.assertRaises(ValidationError): V1Iterative.from_dict(config_dict) config_dict["maxIterations"] = -0.5 with self.assertRaises(ValidationError): V1Iterative.from_dict(config_dict) # Add num_runs percent config_dict["maxIterations"] = 0.5 with self.assertRaises(ValidationError): V1Iterative.from_dict(config_dict) config_dict["maxIterations"] = 5 config = V1Iterative.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_project_experiments_and_groups_config(self): uuid_value = uuid.uuid4().hex config_dict = { 'name': 'test', 'description': '', 'is_public': True, 'experiment_groups': [ ExperimentGroupConfig(content='content', uuid=uuid_value, project=uuid_value).to_dict() ], 'experiments': [ ExperimentConfig(config={}, uuid=uuid_value, project=uuid_value).to_dict() ] } config = ProjectConfig.from_dict(config_dict) assert_equal_dict(config_dict, config.to_dict()) config_dict.pop('description') config_dict.pop('experiment_groups') config_dict.pop('experiments') assert_equal_dict(config_dict, config.to_light_dict())
def test_random_search_config(self): config_dict = { "kind": "random", "numRuns": 10, "params": {"lr": {"kind": "choice", "value": [[0.1], [0.9]]}}, } config = V1RandomSearch.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) # Raises for negative values config_dict["numRuns"] = -5 with self.assertRaises(ValidationError): V1RandomSearch.from_dict(config_dict) config_dict["numRuns"] = -0.5 with self.assertRaises(ValidationError): V1RandomSearch.from_dict(config_dict) # Add n_runs percent config_dict["numRuns"] = 0.5 with self.assertRaises(ValidationError): V1RandomSearch.from_dict(config_dict) config_dict["numRuns"] = 5 config = V1RandomSearch.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_grid_search_config(self): config_dict = { "kind": "grid_search", "n_experiments": 10, "matrix": { "lr": { "kind": "choice", "value": [[0.1], [0.9]] } }, } config = GridSearchConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) # Raises for negative values config_dict["n_experiments"] = -5 with self.assertRaises(ValidationError): GridSearchConfig.from_dict(config_dict) config_dict["n_experiments"] = -0.5 with self.assertRaises(ValidationError): GridSearchConfig.from_dict(config_dict) # Add n_experiments percent config_dict["n_experiments"] = 0.5 with self.assertRaises(ValidationError): GridSearchConfig.from_dict(config_dict) config_dict["n_experiments"] = 5 config = GridSearchConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_generator_model_config(self): config_dict = { 'bridge': NoOpBridgeConfig().to_schema(), 'encoder': { 'input_layers': ['image'], 'output_layers': ['encoded'], 'layers': [ {'Dense': {'units': 1, 'name': 'encoded'}} ] }, 'decoder': { 'input_layers': ['image'], 'output_layers': ['encoded'], 'layers': [ {'Dense': {'units': 1, 'name': 'decoded'}} ] }, 'loss': MeanSquaredErrorConfig(input_layer=['image', 0, 0], output_layer=['decoded', 0, 0]).to_schema(), 'optimizer': AdamConfig(learning_rate=0.01).to_schema(), 'metrics': [], 'summaries': ['loss', 'gradients'], 'clip_gradients': 0.5, 'clip_embed_gradients': 0., 'name': 'model'} config = GeneratorConfig.from_dict(config_dict) config_to_dict = config.to_dict() assert config_dict.pop('bridge') == config_to_dict.pop('bridge') assert_equal_graphs(config_dict.pop('encoder'), config_to_dict.pop('encoder')) assert_equal_graphs(config_dict.pop('decoder'), config_to_dict.pop('decoder')) assert_equal_dict(config_dict, config_to_dict)
def test_constant_exploration_config(self): config_dict = { 'value': 0.8, 'is_continuous': False } config = ConstantExplorationConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_io_config_default(self): config_dict = { "name": "input1", "description": "some text", "type": IOTypes.BOOL, "is_optional": True, "value": True, } config = IOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) expected_repr = OrderedDict( (("name", "input1"), ("type", "bool"), ("value", True)) ) assert config.get_repr_from_value(None) == expected_repr assert config.get_repr() == expected_repr config_dict = { "name": "input1", "description": "some text", "type": IOTypes.FLOAT, "is_optional": True, "value": 3.4, } config = IOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) expected_repr = OrderedDict( (("name", "input1"), ("type", "float"), ("value", 3.4)) ) assert config.get_repr_from_value(None) == expected_repr assert config.get_repr() == expected_repr
def test_simple_file_passes(self): plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/simple_file.yml')) spec = plxfile.experiment_spec_at(0) assert plxfile.version == 1 assert plxfile.project.name == 'project1' assert plxfile.project_path == '/tmp/plx_logs/project1' assert plxfile.matrix is None assert plxfile.settings is None assert plxfile.run_type == RunTypes.LOCAL assert spec.environment is None assert spec.experiment_path == '/tmp/plx_logs/project1/0' assert spec.is_runnable assert spec.cluster_def == ({TaskType.MASTER: 1}, False) assert_equal_dict(spec.get_cluster().to_dict(), {TaskType.MASTER: ['127.0.0.1:10000'], TaskType.PS: [], TaskType.WORKER: []}) assert isinstance(spec.model, RegressorConfig) assert isinstance(spec.model.loss, MeanSquaredErrorConfig) assert isinstance(spec.model.optimizer, AdamConfig) assert isinstance(spec.model.graph, GraphConfig) assert len(spec.model.graph.layers) == 4 assert spec.model.graph.input_layers == [['images', 0, 0]] last_layer = spec.model.graph.layers[-1].name assert spec.model.graph.output_layers == [[last_layer, 0, 0]] assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig) assert spec.eval is None
def assert_equal_losses(l1, l2): assert_tensors(l1.pop('input_layer', None), l2.pop('input_layer', None)) assert_tensors(l1.pop('output_layer', None), l2.pop('output_layer', None)) assert_equal_dict(l1, l2)
def test_k8s_resources_config(self): config_dict = { 'requests': 0.8, 'limits': 1, } config = K8SResourcesConfig.from_dict(config_dict) assert_equal_dict(config_dict, config.to_dict())
def test_bayes_config(self): config_dict = { "kind": "bayes", "metric": V1OptimizationMetric( name="loss", optimization=V1Optimization.MINIMIZE).to_dict(), "numInitialRuns": 2, "numIterations": 19, "utilityFunction": { "acquisitionFunction": AcquisitionFunctions.UCB, "kappa": 1.2, "gaussianProcess": { "kernel": GaussianProcessesKernels.MATERN, "lengthScale": 1.0, "nu": 1.9, "numRestartsOptimizer": 2, }, }, "params": { "lr": { "kind": "choice", "value": [[0.1], [0.9]] } }, } config = V1Bayes.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_utility_function_config(self): config_dict = {"acquisitionFunction": AcquisitionFunctions.UCB} with self.assertRaises(ValidationError): UtilityFunctionConfig.from_dict(config_dict) config_dict = {"acquisitionFunction": AcquisitionFunctions.POI} with self.assertRaises(ValidationError): UtilityFunctionConfig.from_dict(config_dict) config_dict = { "acquisitionFunction": AcquisitionFunctions.UCB, "kappa": 1.2, "gaussianProcess": { "kernel": GaussianProcessesKernels.MATERN, "lengthScale": 1.0, "nu": 1.9, "numRestartsOptimizer": 2, }, } config = UtilityFunctionConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict) config_dict = { "acquisitionFunction": AcquisitionFunctions.EI, "eps": 1.2, "gaussianProcess": { "kernel": GaussianProcessesKernels.MATERN, "lengthScale": 1.0, "nu": 1.9, "numRestartsOptimizer": 2, }, } config = UtilityFunctionConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def assert_equal_metrics(m1, m2): assert_tensors(m1.pop('input_layer', None), m2.pop('input_layer', None)) assert_tensors(m1.pop('output_layer', None), m2.pop('output_layer', None)) assert_equal_dict(m1, m2)
def test_episode_logging_tensor_hook(self): config_dict = { 'tensors': ['conv2d_1', 'relu_1'], 'every_n_episodes': 2, } config = EpisodeLoggingTensorHookConfig.from_dict(config_dict) config_to_dict = config.to_dict() assert_equal_dict(config_to_dict, config_dict)
def test_gpu_options_config(self): config_dict = { 'gpu_memory_fraction': 0.8, 'allow_growth': False, 'per_process_gpu_memory_fraction': 0.4, } config = GPUOptionsConfig.from_dict(config_dict) assert_equal_dict(config_dict, config.to_dict())
def test_step_logging_tensor_hook(self): config_dict = { 'tensors': ['conv2d_1', 'relu_1'], 'every_n_iter': 10, 'every_n_secs': None } config = StepLoggingTensorHookConfig.from_dict(config_dict) config_to_dict = config.to_dict() assert_equal_dict(config_to_dict, config_dict)
def test_early_stopping(self): config_dict = { 'metric': 'loss', 'value': 0.1, 'optimization': Optimization.MINIMIZE, 'policy': EarlyStoppingPolicy.ALL } config = EarlyStoppingMetricConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_io_config_flag(self): config_dict = { 'name': 'input1', 'description': 'some text', 'type': IOTypes.BOOL, 'is_flag': True, } config = IOConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
def test_gaussian_process_config(self): config_dict = { "kernel": GaussianProcessesKernels.MATERN, "lengthScale": 1.0, "nu": 1.9, "numRestartsOptimizer": 2, } config = GaussianProcessConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)