def test_experiment(): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, IntrospectStorage) assert_equal(len(list(experiment.yield_computations())), 9) # start=3 : skip 0,1,2 assert_equal(len(list(experiment.yield_computations(start=3))), 6) # capacity=6 : skip 6, 7, 8 assert_equal(len(list(experiment.yield_computations(capacity=6))), 6)
def test_len(): exp = Experiment("TestLen") exp.add_params(p1=1, p2=[2, 3], p3="param") exp.add_params(p1=4, p2=5) assert_equal(len(exp), 6) i = 0 for _ in exp: i += 1 assert_equal(i, 6) assert_equal(len(exp), 6)
def test_session(): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, IntrospectStorage) env = InSituEnvironment(fail_fast=True) session = env.create_session(experiment) assert_false(session.is_open()) assert_raises(ValueError, partial(session.run, TestComputation())) with session: for lazy_computation in experiment.yield_computations(): session.run(lazy_computation) assert_equal(session.n_launch, 9) assert_false(session.is_open()) assert_raises(ValueError, partial(session.run, TestComputation()))
def test_exp_diff(): exp = Experiment("TestExpDiff") exp.add_params(p1=1, p2=[2, 3], p3="param") exp.add_params(p1=4, p2=5) labels = [] params = [] for l, p in exp: labels.append(l) params.append(p) for i in range(len(labels)): lab = labels[:i] par = params[:i] cmpts = {k:v for k,v in zip(lab, par)} res = experiment_diff(exp, cmpts) for l, p in res: assert_in(l, labels[i:]) assert_in(p, params[i:])
def do_auto_refresh(auto_refresh): parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment("{}_1".format(__EXP_NAME__), parameter_set, TestComputation) # There should be 9 computations assert_equal(len(experiment), 9) count = 0 for i, _ in enumerate( experiment.yield_computations(auto_refresh=auto_refresh)): if i == 0: state = CompletedState( Experiment.name_computation(experiment.exp_name, 6)) PickleStorage(experiment.exp_name).update_state(state) count += 1 print("Auto refresh?", auto_refresh, "--", count) assert_equal(count, 8 if auto_refresh else 9)
def environment_integration(environment): # Can only test whether the computation was issued correctly print( repr(environment)) # In case of error, prints the type of environment parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, PickleStorage) try: error_code = environment.run(experiment, start=2, capacity=5) assert_equal(error_code, 0) except: assert_true(False, "An exception was raised by the environment") raise
def test_getitem(): exp = Experiment("TestItem") exp.add_params(p1=1, p2=[3, 2], p3="param") exp.add_params(p1=4, p2=5) assert_raises(KeyError, exp.get_params_for, -1) assert_raises(KeyError, exp.get_params_for, 10e6) labels = [] params = [] for l, p in exp: labels.append(l) params.append(p) for i, (l, p) in enumerate(zip(labels, params)): li, pi = exp[l] assert_equal(l, li) assert_equal(p, pi) li, pi = exp[i] assert_equal(l, li) assert_equal(p, pi)
def test_debug_run(): exp_name = "TestDebugParserRun" monitor = Monitor(exp_name) assert_equal(len(monitor), 0) parser = DebugParser() environment, _ = parser.parse(["--verbose"]) parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(exp_name, parameter_set, TestComputation) environment.run(experiment) monitor.refresh() assert_equal(len(monitor), 0)
def in_situ_env(environment): print( repr(environment)) # In case of error, prints the type of environment parameter_set = ParameterSet() parameter_set.add_parameters(x1=range(3), x2=range(3)) experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation, PickleStorage) try: error_code = environment.run(experiment, start=2, capacity=5) assert_equal(error_code, 0) except: assert_true(False, "An exception was raised by the environment") raise storage = experiment.storage parameters_ls, result_ls = storage.load_params_and_results() assert_equal(len(parameters_ls), 5) # 5 computations assert_equal(len(result_ls), 5) # 5 computations for parameters, result in zip(parameters_ls, result_ls): assert_equal(parameters["x1"] * parameters["x2"], result["mult"])
set_stdout_logging() # Define the parameter set: the domain each variable can take environment, namespace = env_parser().parse() env_params = dict(namespace._get_kwargs()) data_path = "/scratch/users/rmormont/tissuenet" env = {"image_path": os.path.join(data_path, "patches"), "metadata_path": os.path.join(data_path, "metadata"), "model_path": os.path.join(data_path, "models"), "device": namespace.device, "n_jobs": namespace.n_jobs } os.makedirs(env["image_path"], exist_ok=True) os.makedirs(env["metadata_path"], exist_ok=True) os.makedirs(env["model_path"], exist_ok=True) param_set = ParameterSet() param_set.add_parameters(epochs=40) param_set.add_parameters(batch_size=[8]) param_set.add_parameters(zoom_level=[0]) param_set.add_parameters(train_size=0.7) param_set.add_parameters(random_seed=42) param_set.add_parameters(learning_rate=[0.001]) # Wrap it together as an experiment experiment = Experiment("tissuenet-e2e-train-maxzoom", param_set, CliComputationFactory(main, **env)) # Finally run the experiment environment.run(experiment)
param_set = ParameterSet() param_set.add_parameters(image_id=[77150767, 77150761, 77150809]) param_set.add_parameters(batch_size=[8]) param_set.add_parameters(tile_overlap=[0]) param_set.add_parameters(tile_size=256) param_set.add_parameters(init_fmaps=8) param_set.add_parameters(zoom_level=2) param_set.add_separator() param_set.add_parameters(tile_size=512) param_set.add_parameters(zoom_level=0) def make_build_fn(**kwargs): def build_fn(exp_name, comp_name, context="n/a", storage_factory=PickleStorage): return ProcessWSIComputation(exp_name, comp_name, **kwargs, context=context, storage_factory=storage_factory) return build_fn # Wrap it together as an experiment experiment = Experiment("thyroid-unet-inference", param_set, make_build_fn(**env_params)) # Finally run the experiment environment.run(experiment)
def run(self, result, x, z, w, y=2, n=0, **parameters): import time from random import randint, normalvariate result["multiply"] = x * y # Simulate the effect of the new parameter: in this case we add # some gaussian noise to the computation of the sum result["sum"] = z + w + normalvariate(0, n) time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # Add the separator, the default value for what was computed previously and # the new parameter values # -- Separator (compare this to `004_adding_parameter_values.py`) param_set.add_separator(n=0) # Notice we pass the default value # -- new parameter values param_set.add_parameters(n=[0.01, 0.001]) experiment = Experiment("BasicUsage", param_set, MyComputation) environment.run(experiment)
model_filename=filename, tile_size=320, train_size=0.8, batch_size=32, random_seed=42) set_stdout_logging() environment, namespace = env_parser().parse() env_params = dict(namespace._get_kwargs()) data_path = "/scratch/users/rmormont/tissuenet" env = { "image_path": os.path.join(data_path, "wsis"), "metadata_path": os.path.join(data_path, "metadata"), "model_path": os.path.join(data_path, "models"), "device": namespace.device, "n_jobs": namespace.n_jobs } os.makedirs(env["image_path"], exist_ok=True) os.makedirs(env["metadata_path"], exist_ok=True) os.makedirs(env["model_path"], exist_ok=True) print(environment) # Wrap it together as an experiment experiment = Experiment("tissuenet-e2e-eval-3rd", param_set, CliComputationFactory(main, **env)) # Finally run the experiment environment.run(experiment)
param_set.add_parameters(epochs=60) param_set.add_parameters(batch_size=[24]) param_set.add_parameters(zoom_level=[2]) param_set.add_parameters(train_size=0.8) param_set.add_parameters(random_seed=42) param_set.add_parameters(learning_rate=[0.001]) param_set.add_parameters(aug_elastic_alpha_low=[80]) param_set.add_parameters(aug_elastic_alpha_high=[120]) param_set.add_parameters(aug_elastic_sigma_low=[9.0]) param_set.add_parameters(aug_elastic_sigma_high=[11.0]) param_set.add_parameters(aug_hed_bias_range=[0.0125, 0.025, 0.05, 0.1]) param_set.add_parameters(aug_hed_coef_range=[0.0125, 0.025, 0.05, 0.1]) param_set.add_separator() param_set.add_parameters(aug_elastic_alpha_high=[150]) param_set.add_parameters(aug_elastic_sigma_low=[7.0]) constrained = ConstrainedParameterSet(param_set) constrained.add_constraints(bsize_zoom_arch=cstrnt_batchsize_zoom) constrained.add_constraints(arch_pretr=cstrnt_pretraining) constrained.add_constraints(elastic_sigma=partial( cstrnt_low_lt_high, param_prefix="aug_elastic_sigma_")) constrained.add_constraints(elastic_sigma=partial( cstrnt_low_lt_high, param_prefix="aug_elastic_alpha_")) # Wrap it together as an experiment experiment = Experiment("tissuenet-e2e-train-3rd", constrained, CliComputationFactory(main, **env)) # Finally run the experiment environment.run(experiment)
super().__init__(exp_name, comp_name, context, storage_factory) self.my_environment_parameter = my_environment_parameter def run(self, result, x, z, w, y=2, **parameters): import time from random import randint # We can access our environment parameter print(self.my_environment_parameter) result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # We use the :meth:`partialize` class method to specialize the # computation class with our environment parameter my_factory = MyComputation.partialize(my_environment_parameter="Test") experiment = Experiment("BasicUsage", param_set, my_factory) environment.run(experiment)
else: a = m # Update result at each iteration. Saving the iteration number is # just there for nicer formatting on the result side result["root"] = m result["iteration"] = i if i % 10 == 0: # Saving the result every 10 iterations self.save_result() # Wait some time to be able to see the update when running # `python 009_partial_results.py` time.sleep(1) i += 1 if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(a=1, b=2) experiment = Experiment("BasicUsagePartialSave", param_set, MyComputation) environment.run(experiment)
for i in range(100): m = (a+b)/2. if P(a)*P(m) < 0: b = m else: a = m # Notify the progression of the task. `notify_progress` takes as # input a float between 0 and 1 self.notify_progress((i+1)/100.) # Wait some time to be able to see the update with # `clustertools count` time.sleep(1) result["root"] = m if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(a=1, b=2) experiment = Experiment("BasicUsageMonitoring", param_set, MyComputation) environment.run(experiment)
""" Inherit from `Computation` and redefine the `run` method as you which """ def run(self, result, x, z, w, y=2, **parameters): import time from random import randint result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() environment, _ = parser.parse() param_set = ParameterSet() param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6]) # We decorate our `ParameterSet` to be able to prevent some computations param_set = ConstrainedParameterSet(param_set) # We add the constrain param_set.add_constraints(not_x3_and_w6=not_x_eq_3_and_w_eq_6) experiment = Experiment('BasicUsage', param_set, MyComputation) environment.run(experiment)
import time from random import randint result["multiply"] = x * y result["sum"] = z + w time.sleep(randint(1, 10)) if __name__ == "__main__": set_stdout_logging() parser = CTParser() # Add custom argument to parser parser.add_argument("y", help="The value of the `y` parameter", type=int) environment, namespace = parser.parse() param_set = ParameterSet() # add `y` given in command line to parameter set param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6], y=namespace.y) # Change the name of the computation according to the value of `y` # Note that this is mandatory. Otherwise, Clustertools cannot distinguish # between the two "sub" experiment exp_name = "BasicUsage_{}".format(namespace.y) print("This is experiment", exp_name) experiment = Experiment(exp_name, param_set, MyComputation) environment.run(experiment)
os.makedirs(namespace.save_path, exist_ok=True) param_set = ParameterSet() param_set.add_parameters(batch_size=[8]) param_set.add_parameters(epochs=[5]) param_set.add_parameters(overlap=[0]) param_set.add_parameters(tile_size=[512, 256]) param_set.add_parameters(lr=[0.001]) param_set.add_parameters(init_fmaps=[8]) param_set.add_parameters(zoom_level=[0, 1, 2]) def make_build_fn(**kwargs): def build_fn(exp_name, comp_name, context="n/a", storage_factory=PickleStorage): return TrainComputation(exp_name, comp_name, **kwargs, context=context, storage_factory=storage_factory) return build_fn # Wrap it together as an experiment experiment = Experiment("thyroid-unet-training", param_set, make_build_fn(**env_params)) # Finally run the experiment environment.run(experiment)