def run(self, print_to_console = True, show_figs = None, version = None, test_mode=None, keep_record=None, **experiment_record_kwargs): """ Run the experiment, and return the ExperimentRecord that is generated. :param print_to_console: Print to console (as well as logging to file) :param show_figs: Show figures (as well as saving to file) :param version: String identifying which version of the experiment to run (refer to "versions" argument of __init__) :param test_mode: Run in "test_mode". This sets the global "test_mode" flag when running the experiment. This flag can be used to, for example, shorten a training session to verify that the code runs. Can be: True: Run in test mode False: Don't run in test mode: None: Keep the current state of the global "is_test_mode()" flag. :param keep_record: Keep the folder that results are saved into. True: Results are saved into a folder False: Results folder is deleted at the end. None: If "test_mode" is true, then delete results at end, otherwise save them. :param experiment_record_kwargs: Passed to the "record_experiment" context. :return: The ExperimentRecord object, if keep_record is true, otherwise None """ if test_mode is None: test_mode = is_test_mode() if keep_record is None: keep_record = not test_mode old_test_mode = is_test_mode() set_test_mode(test_mode) ARTEMIS_LOGGER.info('{border} {mode} Experiment: {name}{version} {border}'.format(border = '='*10, mode = "Testing" if test_mode else "Running", name=self.name, version=(' - '+version) if version is not None else '')) with record_experiment(name = self.name, info=self.info, print_to_console=print_to_console, show_figs=show_figs, use_temp_dir=not keep_record, **experiment_record_kwargs) as exp_rec: results = self() if self.display_function is not None: self.display_function(results) exp_rec.set_result(results) ARTEMIS_LOGGER.info('{border} Done {mode} Experiment: {name}{version} {border}'.format(border = '='*10, mode = "Testing" if test_mode else "Running", name=self.name, version=(' - '+version) if version is not None else '')) set_test_mode(old_test_mode) return exp_rec
) # Previously, this was not caught, leading you not to notice your typo def test_catch_kwarg_error(): clear_memo_files_for_function(compute_slow_thing_with_kwargs) t = time.time() num, t1 = compute_slow_thing_with_kwargs(1, b=2, c=4) assert num == (1 + 2) / 4. assert t1 > t num, t2 = compute_slow_thing_with_kwargs(1, b=2, c=6) assert num == (1 + 2) / 6. assert t2 > t1 num, t3 = compute_slow_thing_with_kwargs(1, b=2, c=4) assert num == (1 + 2) / 4. assert t3 == t1 if __name__ == '__main__': set_test_mode(True) test_unnoticed_wrong_arg_bug_is_dead() test_catch_kwarg_error() test_clear_arror_for_wrong_arg() test_clear_error_for_missing_arg() test_memoize_to_disk_and_cache() test_memoize_to_disk() test_complex_args()
def run(self, print_to_console=True, show_figs=None, test_mode=None, keep_record=None, raise_exceptions=True, display_results=True, **experiment_record_kwargs): """ Run the experiment, and return the ExperimentRecord that is generated. :param print_to_console: Print to console (as well as logging to file) :param show_figs: Show figures (as well as saving to file) :param test_mode: Run in "test_mode". This sets the global "test_mode" flag when running the experiment. This flag can be used to, for example, shorten a training session to verify that the code runs. Can be: True: Run in test mode False: Don't run in test mode: None: Keep the current state of the global "is_test_mode()" flag. :param keep_record: Keep the folder that results are saved into. True: Results are saved into a folder False: Results folder is deleted at the end. None: If "test_mode" is true, then delete results at end, otherwise save them. :param raise_exceptions: True to raise any exception that occurs when running the experiment. False to catch it, print the error, and move on. :param experiment_record_kwargs: Passed to the "record_experiment" context. :return: The ExperimentRecord object, if keep_record is true, otherwise None """ if test_mode is None: test_mode = is_test_mode() if keep_record is None: keep_record = keep_record_by_default if keep_record_by_default is not None else not test_mode old_test_mode = is_test_mode() set_test_mode(test_mode) ARTEMIS_LOGGER.info( '{border} {mode} Experiment: {name} {border}'.format( border='=' * 10, mode="Testing" if test_mode else "Running", name=self.name)) EIF = ExpInfoFields date = datetime.now() with record_experiment(name=self.name, print_to_console=print_to_console, show_figs=show_figs, use_temp_dir=not keep_record, date=date, **experiment_record_kwargs) as exp_rec: start_time = time.time() try: exp_rec.info.set_field(ExpInfoFields.NAME, self.name) exp_rec.info.set_field(ExpInfoFields.ID, exp_rec.get_id()) exp_rec.info.set_field(ExpInfoFields.DIR, exp_rec.get_dir()) exp_rec.info.set_field(EIF.ARGS, self.get_args().items()) root_function = self.get_root_function() exp_rec.info.set_field(EIF.FUNCTION, root_function.__name__) exp_rec.info.set_field(EIF.TIMESTAMP, str(date)) module = inspect.getmodule(root_function) exp_rec.info.set_field(EIF.MODULE, module.__name__) exp_rec.info.set_field( EIF.FILE, module.__file__ if hasattr(module, '__file__') else '<unknown>') exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STARTED) exp_rec.info.set_field(EIF.USER, getuser()) exp_rec.info.set_field( EIF.MAC, ':'.join(("%012X" % getnode())[i:i + 2] for i in range(0, 12, 2))) results = self.function() exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.FINISHED) except KeyboardInterrupt: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STOPPED) exp_rec.write_error_trace(print_too=False) raise except Exception: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.ERROR) exp_rec.write_error_trace(print_too=not raise_exceptions) if raise_exceptions: raise else: return exp_rec finally: exp_rec.info.set_field(EIF.RUNTIME, time.time() - start_time) fig_locs = exp_rec.get_figure_locs(include_directory=False) exp_rec.info.set_field(EIF.N_FIGS, len(fig_locs)) exp_rec.info.set_field(EIF.FIGS, fig_locs) exp_rec.save_result(results) for n in self._notes: exp_rec.info.add_note(n) if display_results and self.display_function is not None: self.display_function(results) ARTEMIS_LOGGER.info( '{border} Done {mode} Experiment: {name} {border}'.format( border='=' * 10, mode="Testing" if test_mode else "Running", name=self.name)) set_test_mode(old_test_mode) return exp_rec
layer_sizes=[ dataset.input_size, 500, dataset.n_categories ], hidden_activation='sig', # Sigmoidal hidden units output_activation= 'softmax', # Softmax output unit, since we're doing multinomial classification w_init=0.01, rng=5), cost_function= negative_log_likelihood_dangerous, # "Dangerous" because it doesn't check to see that output is normalized, but we know it is because it comes from softmax. optimizer=SimpleGradientDescent(eta=0.1), ).compile(), # .compile() returns an IPredictor }, offline_predictors={'RF': RandomForestClassifier(n_estimators=40)}, minibatch_size=minibatch_size, test_epochs=sqrtspace(0, n_epochs, n_tests), evaluation_function=percent_argmax_correct # Compares one-hot ) # Results is a LearningCurveData object return learning_curve_data if __name__ == '__main__': set_test_mode(False) records = compare_example_predictors( n_epochs=30, minibatch_size=20, ) plot_learning_curves(records)
def run_and_record(function, experiment_id, print_to_console=True, show_figs=None, test_mode=None, keep_record=None, raise_exceptions=True, notes=(), prefix=None, **experiment_record_kwargs): """ Run an experiment function. Save the console output, return values, and any matplotlib figures generated to a new experiment folder in ~/.artemis/experiments :param function: A function which takes no args. :param experiment_id: The name under which you'd like to save this run of this experiment. :param print_to_console: Show the print output in the console (as well as saving it) :param show_figs: :param test_mode: :param keep_record: :param raise_exceptions: :param notes: :param experiment_record_kwargs: :return: The ExperimentRecord object """ if test_mode is None: test_mode = is_test_mode() old_test_mode = is_test_mode() set_test_mode(test_mode) ARTEMIS_LOGGER.info('{border} {mode} Experiment: {name} {border}'.format( border='=' * 10, mode="Testing" if test_mode else "Running", name=experiment_id)) EIF = ExpInfoFields date = datetime.now() with record_experiment(name=experiment_id, print_to_console=print_to_console, show_figs=show_figs, use_temp_dir=not keep_record, date=date, prefix=prefix, **experiment_record_kwargs) as exp_rec: start_time = time.time() try: exp_rec.info.set_field(ExpInfoFields.NAME, experiment_id) exp_rec.info.set_field(ExpInfoFields.ID, exp_rec.get_id()) exp_rec.info.set_field(ExpInfoFields.DIR, exp_rec.get_dir()) root_function, args = infer_function_and_derived_arg_values( function) exp_rec.info.set_field(EIF.ARGS, list(args.items())) # root_function = self.get_root_function() exp_rec.info.set_field(EIF.FUNCTION, root_function.__name__) exp_rec.info.set_field(EIF.TIMESTAMP, date) module = inspect.getmodule(root_function) exp_rec.info.set_field(EIF.MODULE, module.__name__) exp_rec.info.set_field( EIF.FILE, module.__file__ if hasattr(module, '__file__') else '<unknown>') exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STARTED) exp_rec.info.set_field(EIF.USER, getuser()) exp_rec.info.set_field( EIF.MAC, ':'.join( ("%012X" % getnode())[i:i + 2] for i in range(0, 12, 2))) exp_rec.info.set_field(EIF.PID, os.getpid()) if inspect.isgeneratorfunction(root_function): for result in function(): exp_rec.save_result(result) else: result = function() exp_rec.save_result(result) exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.FINISHED) except KeyboardInterrupt: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STOPPED) exp_rec.write_error_trace(print_too=False) raise except Exception: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.ERROR) exp_rec.write_error_trace(print_too=not raise_exceptions) if raise_exceptions: raise else: return exp_rec finally: exp_rec.info.set_field(EIF.RUNTIME, time.time() - start_time) fig_locs = exp_rec.get_figure_locs(include_directory=False) exp_rec.info.set_field(EIF.N_FIGS, len(fig_locs)) exp_rec.info.set_field(EIF.FIGS, fig_locs) for n in notes: exp_rec.info.add_note(n) ARTEMIS_LOGGER.info( '{border} Done {mode} Experiment: {name} {border}'.format( border='=' * 10, mode="Testing" if test_mode else "Running", name=experiment_id)) set_test_mode(old_test_mode) return exp_rec
def run_and_record(function, experiment_id, print_to_console=True, show_figs=None, test_mode=None, keep_record=None, raise_exceptions=True, notes = (), prefix=None, **experiment_record_kwargs): """ Run an experiment function. Save the console output, return values, and any matplotlib figures generated to a new experiment folder in ~/.artemis/experiments :param function: A function which takes no args. :param experiment_id: The name under which you'd like to save this run of this experiment. :param print_to_console: Show the print output in the console (as well as saving it) :param show_figs: :param test_mode: :param keep_record: :param raise_exceptions: :param notes: :param experiment_record_kwargs: :return: The ExperimentRecord object """ if test_mode is None: test_mode = is_test_mode() old_test_mode = is_test_mode() set_test_mode(test_mode) ARTEMIS_LOGGER.info('{border} {mode} Experiment: {name} {border}' .format(border='=' * 10, mode="Testing" if test_mode else "Running", name=experiment_id)) EIF = ExpInfoFields date = datetime.now() with record_experiment(name=experiment_id, print_to_console=print_to_console, show_figs=show_figs, use_temp_dir=not keep_record, date=date, prefix=prefix, **experiment_record_kwargs) as exp_rec: start_time = time.time() try: exp_rec.info.set_field(ExpInfoFields.NAME, experiment_id) exp_rec.info.set_field(ExpInfoFields.ID, exp_rec.get_id()) exp_rec.info.set_field(ExpInfoFields.DIR, exp_rec.get_dir()) root_function = get_partial_chain(function)[0] args, undefined_args = get_defined_and_undefined_args(function) assert len(undefined_args)==0, "Required arguments {} are still undefined!".format(undefined_args) try: exp_rec.info.set_field(EIF.ARGS, get_serialized_args(args)) except PicklingError as err: ARTEMIS_LOGGER.error('Could not pickle arguments for experiment: {}. Artemis demands that arguments be piclable. If they are not, just make a new function.') raise exp_rec.info.set_field(EIF.FUNCTION, root_function.__name__) exp_rec.info.set_field(EIF.TIMESTAMP, date) module = inspect.getmodule(root_function) exp_rec.info.set_field(EIF.MODULE, module.__name__) exp_rec.info.set_field(EIF.FILE, module.__file__ if hasattr(module, '__file__') else '<unknown>') exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STARTED) exp_rec.info.set_field(EIF.USER, getuser()) exp_rec.info.set_field(EIF.MAC, ':'.join(("%012X" % getnode())[i:i+2] for i in range(0, 12, 2))) exp_rec.info.set_field(EIF.PID, os.getpid()) exp_rec.info.set_field(EIF.ARTEMIS_VERSION, ARTEMIS_VERSION) if inspect.isgeneratorfunction(root_function): for result in function(): exp_rec.save_result(result) yield exp_rec else: result = function() exp_rec.save_result(result) exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.FINISHED) except KeyboardInterrupt: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.STOPPED) exp_rec.write_error_trace(print_too=False) raise except Exception: exp_rec.info.set_field(EIF.STATUS, ExpStatusOptions.ERROR) exp_rec.write_error_trace(print_too=not raise_exceptions) if raise_exceptions: raise else: yield exp_rec return finally: exp_rec.info.set_field(EIF.RUNTIME, time.time() - start_time) fig_locs = exp_rec.get_figure_locs(include_directory=False) exp_rec.info.set_field(EIF.N_FIGS, len(fig_locs)) exp_rec.info.set_field(EIF.FIGS, fig_locs) for n in notes: exp_rec.info.add_note(n) ARTEMIS_LOGGER.info('{border} Done {mode} Experiment: {name} {border}'.format(border='=' * 10, mode="Testing" if test_mode else "Running", name=experiment_id)) set_test_mode(old_test_mode) yield exp_rec
) def run_and_plot(training_scheme): learning_curves = training_scheme() plot_learning_curves(learning_curves) def get_experiments(): training_schemes = { 'adamax-showdown': mnist_adamax_showdown, 'mlp-normalization': mlp_normalization, } experiments = { name: lambda sc=scheme: run_and_plot(sc) for name, scheme in training_schemes.iteritems() } return experiments if __name__ == '__main__': test_mode = False experiment = 'adamax-showdown' set_test_mode(test_mode) run_experiment(experiment, exp_dict=get_experiments(), show_figs=None, print_to_console=True)
raise Exception('Unlucky Number!') yield i X1 = my_generator_exp.add_variant(n_steps=5) X2 = my_generator_exp.add_variant(n_steps=5, poison_4 = True) rec1 = X1.run() rec2 = X2.run(raise_exceptions = False) assert rec1.get_result() == 4 assert rec2.get_result() == 3 if __name__ == '__main__': set_test_mode(True) test_get_latest_identifier() test_get_latest() test_experiment_with() test_start_experiment() test_accessing_experiment_dir() test_saving_result() test_variants() test_experiment_api(try_browse=False) test_figure_saving(show_them=False) test_get_variant_records_and_delete() test_experiments_play_well_with_debug() test_run_multiple_experiments() test_parallel_run_errors() test_invalid_arg_detection() test_invalid_arg_detection_2()