def test_cartesianmix(): ps = ParameterSet() ps.add_parameters(p1=[1, 2], p2=["a", "b"]) ps1 = ExplicitParameterSet() ps1.add_parameter_tuple(p3=3, p4=10) ps1.add_parameter_tuple(p3=4, p4=11) c = CartesianMixer(ps, ps1) assert_equal(len(c), 8) expected = [ {"p1": 1, "p2": "a", "p3": 3, "p4": 10}, {"p1": 1, "p2": "a", "p3": 4, "p4": 11}, {"p1": 1, "p2": "b", "p3": 3, "p4": 10}, {"p1": 1, "p2": "b", "p3": 4, "p4": 11}, {"p1": 2, "p2": "a", "p3": 3, "p4": 10}, {"p1": 2, "p2": "a", "p3": 4, "p4": 11}, {"p1": 2, "p2": "b", "p3": 3, "p4": 10}, {"p1": 2, "p2": "b", "p3": 4, "p4": 11}, ] i = 0 for idx, tup in c: assert_equal(i, idx) assert_in(tup, expected) i += 1 assert_true(repr(c).startswith("CartesianMixer"))
def test_prioritized_paramset(): ps = ParameterSet() ps.add_parameters(p1=[1, 2, 3, 4], p2=["a", "b", "c"]) pps = PrioritizedParamSet(ps) pps.prioritize("p2", "b") pps.prioritize("p1", 2) pps.prioritize("p1", 3) pps.prioritize("p2", "c") expected = [ (4, {"p1": 2, "p2": "b"}), # 12 = 0 2^0 + 0 2^1 + 1 2^2 + 1 2^ 3 (7, {"p1": 3, "p2": "b"}), # 10 = 0 2^0 + 1 2^1 + 0 2^2 + 1 2^ 3 (1, {"p1": 1, "p2": "b"}), # 8 = 0 2^0 + 0 2^1 + 0 2^2 + 1 2^ 3 (10, {"p1": 4, "p2": "b"}), # 8 = 0 2^0 + 0 2^1 + 0 2^2 + 1 2^ 3 (5, {"p1": 2, "p2": "c"}), # 5 = 1 2^0 + 0 2^1 + 1 2^2 + 0 2^ 3 (3, {"p1": 2, "p2": "a"}), # 4 = 0 2^0 + 0 2^1 + 1 2^2 + 0 2^ 3 (8, {"p1": 3, "p2": "c"}), # 3 = 1 2^0 + 1 2^1 + 0 2^2 + 0 2^ 3 (6, {"p1": 3, "p2": "a"}), # 2 = 0 2^0 + 2 2^1 + 0 2^2 + 0 2^ 3 (2, {"p1": 1, "p2": "c"}), # 1 = 1 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3 (11, {"p1": 4, "p2": "c"}), # 1 = 1 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3 (0, {"p1": 1, "p2": "a"}), # 0 = 0 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3 (9, {"p1": 4, "p2": "a"}), # 0 = 0 2^0 + 0 2^1 + 0 2^2 + 0 2^ 3 ] result = list(pps) assert_equal(result, expected)
def test_constrainparamset(): ps = ParameterSet() ps.add_parameters(p1=[1, 2, 3], p2=["a", "b"]) cps = ConstrainedParameterSet(ps) cps.add_constraints(c1=lambda p1, p2: True if p2 == "a" else p1 % 2 == 0) assert_equal(len(cps), 4) # (1, a), (2, a), (3, a), (2, b) expected = [ { "p1": 1, "p2": "a" }, { "p1": 2, "p2": "a" }, { "p1": 3, "p2": "a" }, { "p1": 2, "p2": "b" }, ] for _, param_dict in cps: assert_in(param_dict, expected)
def test_paramset_separator(): ps = ParameterSet() ps.add_parameters(p1=[1, 2], p2=["a", "b"]) ps.add_separator(p3="param") ps.add_parameters(p1=3) assert_equal(len(ps), 6) for i, param_dict in ps: assert_equal(param_dict["p3"], "param") if i < 4: assert_in(param_dict["p1"], [1, 2]) else: assert_equal(param_dict["p1"], 3) ps.add_parameters(p2="c") assert_equal(len(ps), 9) count = 0 for i, param_dict in ps: assert_equal(param_dict["p3"], "param") if i < 4: assert_in(param_dict["p1"], [1, 2]) assert_in(param_dict["p2"], ["a", "b"]) if param_dict["p1"] == 3 and param_dict["p2"] == "c": count += 1 assert_equal(count, 1) assert_raises(ValueError, ps.add_parameters, p4=10)
def test_experiment(): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, IntrospectStorage) assert_equal(len(list(experiment.yield_computations())), 9) # start=3 : skip 0,1,2 assert_equal(len(list(experiment.yield_computations(start=3))), 6) # capacity=6 : skip 6, 7, 8 assert_equal(len(list(experiment.yield_computations(capacity=6))), 6)
def test_paramset_get_indices_with(): ps = ParameterSet() ps.add_parameters(p1=[1, 2], p2=["a", "b"]) ps.add_separator(p3="param") ps.add_parameters(p1=3, p2="c") for index in ps.get_indices_with(p1={3}): assert_less(3, index) # 0,1,2,3 --> [1,2] x [a,b] assert_equal(ps[index]["p1"], 3) assert_equal(len(list(ps.get_indices_with(p1={4}))), 0)
def environment_integration(environment): # Can only test whether the computation was issued correctly print( repr(environment)) # In case of error, prints the type of environment parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, PickleStorage) try: error_code = environment.run(experiment, start=2, capacity=5) assert_equal(error_code, 0) except: assert_true(False, "An exception was raised by the environment") raise
def test_debug_run(): exp_name = "TestDebugParserRun" monitor = Monitor(exp_name) assert_equal(len(monitor), 0) parser = DebugParser() environment, _ = parser.parse(["--verbose"]) parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(exp_name, parameter_set, TestComputation) environment.run(experiment) monitor.refresh() assert_equal(len(monitor), 0)
def test_paramset_getitem(): ps = ParameterSet() ps.add_parameters(p1=[1, 2], p2=["a", "b"]) ps.add_separator(p3="param") ps.add_parameters(p1=3, p2="c") for i, param_dict in ps: assert_equal(param_dict, ps[i])
def test_session(): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, IntrospectStorage) env = InSituEnvironment(fail_fast=True) session = env.create_session(experiment) assert_false(session.is_open()) assert_raises(ValueError, partial(session.run, TestComputation())) with session: for lazy_computation in experiment.yield_computations(): session.run(lazy_computation) assert_equal(session.n_launch, 9) assert_false(session.is_open()) assert_raises(ValueError, partial(session.run, TestComputation()))
def do_auto_refresh(auto_refresh): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment("{}_1".format(__EXP_NAME__), parameter_set, TestComputation) # There should be 9 computations assert_equal(len(experiment), 9) count = 0 for i, _ in enumerate( experiment.yield_computations(auto_refresh=auto_refresh)): if i == 0: state = CompletedState( Experiment.name_computation(experiment.exp_name, 6)) PickleStorage(experiment.exp_name).update_state(state) count += 1 print("Auto refresh?", auto_refresh, "--", count) assert_equal(count, 8 if auto_refresh else 9)
def in_situ_env(environment): print( repr(environment)) # In case of error, prints the type of environment parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, PickleStorage) try: error_code = environment.run(experiment, start=2, capacity=5) assert_equal(error_code, 0) except: assert_true(False, "An exception was raised by the environment") raise storage = experiment.storage parameters_ls, result_ls = storage.load_params_and_results() assert_equal(len(parameters_ls), 5) # 5 computations assert_equal(len(result_ls), 5) # 5 computations for parameters, result in zip(parameters_ls, result_ls): assert_equal(parameters["x1"] * parameters["x2"], result["mult"])
def test_paramset_yield(): ps = ParameterSet() assert_equal(len(ps), 1) # The null dictionary ps.add_parameters(p1=1, p2=[2, 3], p3="param") ps.add_parameters(p1=4, p2=5) cart_prod = [ { "p1": 1, "p2": 2, "p3": "param" }, { "p1": 1, "p2": 3, "p3": "param" }, { "p1": 1, "p2": 5, "p3": "param" }, { "p1": 4, "p2": 2, "p3": "param" }, { "p1": 4, "p2": 3, "p3": "param" }, { "p1": 4, "p2": 5, "p3": "param" }, ] assert_equal(len(ps), 6) i = 0 for _, param_dict in ps: assert_in(param_dict, cart_prod) i += 1 assert_equal(i, 6) assert_equal(len(ps), 6)
super().__init__(exp_name, comp_name, context, storage_factory) self.my_environment_parameter = my_environment_parameter def run(self, result, x, z, w, y=2, **parameters): import time from random import randint # We can access our environment parameter print(self.my_environment_parameter) result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # We use the :meth:`partialize` class method to specialize the # computation class with our environment parameter my_factory = MyComputation.partialize(my_environment_parameter="Test") experiment = Experiment("BasicUsage", param_set, my_factory) environment.run(experiment)
import time from random import randint result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # We decorate our `ParameterSet` to be able to add some priorities param_set = PrioritizedParamSet(param_set) # We add the priority. The method is expecting the name of the parameter # and the domain value to prioritize param_set.prioritize('x', 3) # It is possible to give the next priority to another paramter: # param_set.prioritize('w', 5) # Or to give the next priority to another value of the same parameter: # param_set.prioritize('x', 2) experiment = Experiment('BasicUsage', param_set, MyComputation) environment.run(experiment)
for i in range(100): m = (a+b)/2. if P(a)*P(m) < 0: b = m else: a = m # Notify the progression of the task. `notify_progress` takes as # input a float between 0 and 1 self.notify_progress((i+1)/100.) # Wait some time to be able to see the update with # `clustertools count` time.sleep(1) result["root"] = m if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(a=1, b=2) experiment = Experiment("BasicUsageMonitoring", param_set, MyComputation) environment.run(experiment)
""" Inherit from `Computation` and redefine the `run` method as you which """ def run(self, result, x, z, w, y=2, **parameters): import time from random import randint result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # We decorate our `ParameterSet` to be able to prevent some computations param_set = ConstrainedParameterSet(param_set) # We add the constrain param_set.add_constraints(not_x3_and_w6=not_x_eq_3_and_w_eq_6) experiment = Experiment('BasicUsage', param_set, MyComputation) environment.run(experiment)
import time from random import randint result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() # Add custom argument to parser parser.add_argument("y", help="The value of the `y` parameter", type=int) environment, namespace = parser.parse() param_set = ParameterSet() # add `y` given in command line to parameter set param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6], y=namespace.y) # Change the name of the computation according to the value of `y` # Note that this is mandatory. Otherwise, Clustertools cannot distinguish # between the two "sub" experiment exp_name = "BasicUsage_{}".format(namespace.y) print("This is experiment", exp_name) experiment = Experiment(exp_name, param_set, MyComputation) environment.run(experiment)
env_params = dict(namespace._get_kwargs()) data_path = "/scratch/users/rmormont/tissuenet" env = { "image_path": os.path.join(data_path, "patches"), "metadata_path": os.path.join(data_path, "metadata"), "model_path": os.path.join(data_path, "models"), "device": namespace.device, "n_jobs": namespace.n_jobs } os.makedirs(env["image_path"], exist_ok=True) os.makedirs(env["metadata_path"], exist_ok=True) os.makedirs(env["model_path"], exist_ok=True) param_set = ParameterSet() param_set.add_parameters(pretrained=["imagenet", "mtdp"]) param_set.add_parameters(architecture=["densenet121", "resnet34"]) param_set.add_parameters(epochs=60) param_set.add_parameters(batch_size=[24]) param_set.add_parameters(zoom_level=[2]) param_set.add_parameters(train_size=0.8) param_set.add_parameters(random_seed=42) param_set.add_parameters(learning_rate=[0.001]) param_set.add_parameters(aug_elastic_alpha_low=[80]) param_set.add_parameters(aug_elastic_alpha_high=[120]) param_set.add_parameters(aug_elastic_sigma_low=[9.0]) param_set.add_parameters(aug_elastic_sigma_high=[11.0]) param_set.add_parameters(aug_hed_bias_range=[0.0125, 0.025, 0.05, 0.1]) param_set.add_parameters(aug_hed_coef_range=[0.0125, 0.025, 0.05, 0.1])
def run(self, result, x, z, w, y=2, n=0, **parameters): import time from random import randint, normalvariate result["multiply"] = x * y # Simulate the effect of the new parameter: in this case we add # some gaussian noise to the computation of the sum result["sum"] = z + w + normalvariate(0, n) time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # Add the separator, the default value for what was computed previously and # the new parameter values # -- Separator (compare this to `004_adding_parameter_values.py`) param_set.add_separator(n=0) # Notice we pass the default value # -- new parameter values param_set.add_parameters(n=[0.01, 0.001]) experiment = Experiment("BasicUsage", param_set, MyComputation) environment.run(experiment)
def test_paramset_list_insertion(): ps = ParameterSet() ps.add_single_values(p1=(1, 2, 3), p2=(1, 2)) assert_equal(len(ps), 1) for _, param_dict in ps: assert_equal(param_dict, {"p1": (1, 2, 3), "p2": (1, 2)})
parser.add_argument("--data_path", "--data_path", dest="data_path") parser.add_argument("--device", dest="device", default="cuda:0") parser.add_argument("--n_jobs", dest="n_jobs", default=1, type=int) _ = Cytomine._add_cytomine_cli_args(parser.parser) return parser if __name__ == "__main__": set_stdout_logging() # Define the parameter set: the domain each variable can take environment, namespace = env_parser().parse() env_params = dict(namespace._get_kwargs()) os.makedirs(namespace.save_path, exist_ok=True) param_set = ParameterSet() param_set.add_parameters(batch_size=[8]) param_set.add_parameters(epochs=[5]) param_set.add_parameters(overlap=[0]) param_set.add_parameters(tile_size=[512, 256]) param_set.add_parameters(lr=[0.001]) param_set.add_parameters(init_fmaps=[8]) param_set.add_parameters(zoom_level=[0, 1, 2]) def make_build_fn(**kwargs): def build_fn(exp_name, comp_name, context="n/a", storage_factory=PickleStorage): return TrainComputation(exp_name, comp_name,
set_stdout_logging() # Define the parameter set: the domain each variable can take environment, namespace = env_parser().parse() env_params = dict(namespace._get_kwargs()) data_path = "/scratch/users/rmormont/tissuenet" env = {"image_path": os.path.join(data_path, "patches"), "metadata_path": os.path.join(data_path, "metadata"), "model_path": os.path.join(data_path, "models"), "device": namespace.device, "n_jobs": namespace.n_jobs } os.makedirs(env["image_path"], exist_ok=True) os.makedirs(env["metadata_path"], exist_ok=True) os.makedirs(env["model_path"], exist_ok=True) param_set = ParameterSet() param_set.add_parameters(epochs=40) param_set.add_parameters(batch_size=[8]) param_set.add_parameters(zoom_level=[0]) param_set.add_parameters(train_size=0.7) param_set.add_parameters(random_seed=42) param_set.add_parameters(learning_rate=[0.001]) # Wrap it together as an experiment experiment = Experiment("tissuenet-e2e-train-maxzoom", param_set, CliComputationFactory(main, **env)) # Finally run the experiment environment.run(experiment)
default=-1, type=int) parser.add_argument("--image_id", dest="image_id", default=-1, type=int) _ = Cytomine._add_cytomine_cli_args(parser.parser) return parser if __name__ == "__main__": set_stdout_logging() # Define the parameter set: the domain each variable can take environment, namespace = env_parser().parse() env_params = dict(namespace._get_kwargs()) os.makedirs(namespace.save_path, exist_ok=True) param_set = ParameterSet() param_set.add_parameters(image_id=[77150767, 77150761, 77150809]) param_set.add_parameters(batch_size=[8]) param_set.add_parameters(tile_overlap=[0]) param_set.add_parameters(tile_size=256) param_set.add_parameters(init_fmaps=8) param_set.add_parameters(zoom_level=2) param_set.add_separator() param_set.add_parameters(tile_size=512) param_set.add_parameters(zoom_level=0) def make_build_fn(**kwargs): def build_fn(exp_name, comp_name, context="n/a", storage_factory=PickleStorage):