def get_pipeline_config_options(self): options = [ ConfigOption(name="over_sampling_methods", default=list(self.over_sampling_methods.keys()), type=str, list=True, choices=list(self.over_sampling_methods.keys())), ConfigOption(name="under_sampling_methods", default=list(self.under_sampling_methods.keys()), type=str, list=True, choices=list(self.under_sampling_methods.keys())), ConfigOption(name="target_size_strategies", default=list(self.target_size_strategies.keys()), type=str, list=True, choices=list(self.target_size_strategies.keys())), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("ensemble_size", default=50, type=int, info="Build a ensemble of well performing autonet configurations. 0 to disable."), ConfigOption("ensemble_only_consider_n_best", default=30, type=int, info="Only consider the n best models for ensemble building."), ConfigOption("ensemble_sorted_initialization_n_best", default=0, type=int, info="Initialize ensemble with n best models.") ] return options
def get_pipeline_config_options(self): return [ ConfigOption("result_logger_dir", default=".", type="directory"), ConfigOption("optimize_metric", default="a", type=str), ]
def get_pipeline_config_options(self): options = [ ConfigOption("refit_config", default=None, type='directory'), ConfigOption("refit_budget", default=None, type=int), ConfigOption("confirmation_gmail_user", default=None, type=str), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption( name="batch_loss_computation_techniques", default=list(self.batch_loss_computation_techniques.keys()), type=str, list=True, choices=list(self.batch_loss_computation_techniques.keys())), ConfigOption("cuda", default=True, type=to_bool, choices=[True, False]), ConfigOption("torch_num_threads", default=1, type=int), ConfigOption( "full_eval_each_epoch", default=False, type=to_bool, choices=[True, False], info= "Whether to evaluate everything every epoch. Results in more useful output" ), ConfigOption( "best_over_epochs", default=False, type=to_bool, choices=[True, False], info="Whether to report the best performance occurred to BOHB") ] for name, technique in self.training_techniques.items(): options += technique.get_pipeline_config_options() return options
def get_pipeline_config_options(self): options = [ ConfigOption('ensemble_size', default=0, type=int), ConfigOption('ensemble_only_consider_n_best', default=0, type=int), ConfigOption('ensemble_sorted_initialization_n_best', default=0, type=int) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("test_split", default=0.0, type=float), ConfigOption("problem_type", default='feature_classification', type=str, choices=['feature_classification', 'feature_multilabel', 'feature_regression', 'image_classification']), ConfigOption("data_manager_verbose", default=False, type=to_bool), ConfigOption("test_instances", default=None, type=str) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption('plot_logs', default=None, type='str', list=True), ConfigOption('output_folder', default=None, type='directory'), ConfigOption('agglomeration', default='mean', choices=['mean', 'median']), ConfigOption('scale_uncertainty', default=1, type=float), ConfigOption('font_size', default=12, type=int) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption('result_dir', default=None, type='directory', required=True), ConfigOption('name', default=None, type=str, required=True) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption('only_finished_runs', default=True, type=to_bool), ConfigOption('result_dir', default=None, type='directory', required=True), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True), ConfigOption("autonet_config_slice", default=None, type=str) ] return options
def get_pipeline_config_options(): options = [ ConfigOption("early_stopping_patience", default=float("inf"), type=float), ConfigOption("early_stopping_reset_parameters", default=False, type=to_bool) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("use_dataset_metric", default=False, type=to_bool), ConfigOption("use_dataset_max_runtime", default=False, type=to_bool), ConfigOption("working_dir", default=None, type='directory'), ConfigOption("network_interface_name", default=None, type=str) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("task_id", default=-1, type=int), ConfigOption("run_id", default="0", type=str), ConfigOption("log_level", default="info", type=str, choices=list(self.logger_settings.keys())) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption( "default_dataset_download_dir", default=ConfigFileParser.get_autonet_home(), type='directory', info="Directory default datasets will be downloaded to."), ConfigOption("dataloader_worker", default=1, type=int), ConfigOption("dataloader_cache_size_mb", default=0, type=int) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("validation_split", default=0.0, type=float, choices=[0, 1], info='In range [0, 1). Part of train dataset used for validation. Ignored in fit if cv_splits > 1 or valid data given.'), ConfigOption("cv_splits", default=1, type=int, info='The number of CV splits.'), ConfigOption("use_stratified_cv_split", default=self.use_stratified_cv_split_default, type=to_bool, choices=[True, False]), ConfigOption("min_budget_for_cv", default=0, type=float, info='Specify minimum budget for cv. If budget is smaller only evaluate a single fold.'), ConfigOption("half_num_cv_splits_below_budget", default=0, type=float, info='Incorporate number of cv splits in budget: Use half the number of specified cv splits below given budget.') ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("instances", default=None, type='directory', required=True), ConfigOption("instance_slice", default=None, type=str), ConfigOption("dataset_root", default=ConfigFileParser.get_autonet_home(), type='directory'), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("run_id_range", type=str, default=None), ConfigOption("log_level", default="info", type=str, choices=list(self.logger_settings.keys())), ConfigOption("benchmark_name", default=None, type=str, required=True) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True), ConfigOption("autonet_config_root", default=ConfigFileParser.get_autonet_home(), type='directory'), ConfigOption("autonet_config_slice", default=None, type=str) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption( name='categorical_features', default=None, type=to_bool, list=True, info= 'List of booleans that specifies for each feature whether it is categorical.' ), ConfigOption(name='dataset_name', default=None, type=str) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption(name="networks", default=list(self.networks.keys()), type=str, list=True, choices=list(self.networks.keys())), ConfigOption(name="final_activation", default=self.default_final_activation, type=str, choices=list(self.final_activations.keys())) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption(name="initialization_methods", default=list(self.initialization_methods.keys()), type=str, list=True, choices=list(self.initialization_methods.keys())), ConfigOption(name="initializer", default=self.default_initializer, type=str, choices=list(self.initializers.keys())) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption('dataset_order', default=None, type=int, list=True, info="Only used for multiple datasets."), #autonet.refit sets this to false to avoid refit budget issues ConfigOption('increase_number_of_trained_datasets', default=False, type=to_bool, info="Only used for multiple datasets.") ] return options
def get_pipeline_config_options(self): options = [ ConfigOption( name="train_metric", default=self.default_train_metric, type=str, choices=list(self.metrics.keys()), info="This is the meta train metric BOHB will try to optimize." ), ConfigOption(name="additional_metrics", default=[], type=str, list=True, choices=list(self.metrics.keys())) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption("additional_trajectories", default=list(), type="directory", list=True) ] return options
def get_pipeline_config_options(self): options = [ ConfigOption( name="batch_loss_computation_techniques", default=list(self.batch_loss_computation_techniques.keys()), type=str, list=True, choices=list(self.batch_loss_computation_techniques.keys())), ConfigOption("minimize", default=self.default_minimize_value, type=to_bool, choices=[True, False]), ConfigOption("cuda", default=True, type=to_bool, choices=[True, False]), ConfigOption("save_checkpoints", default=False, type=to_bool, choices=[True, False], info="Wether to save state dicts as checkpoints."), ConfigOption("tensorboard_min_log_interval", default=30, type=int), ConfigOption("tensorboard_images_count", default=0, type=int), ConfigOption("evaluate_on_train_data", default=True, type=to_bool), ] for name, technique in self.training_techniques.items(): options += technique.get_pipeline_config_options() for name, technique in self.batch_loss_computation_techniques.items(): options += technique.get_pipeline_config_options() return options
def get_pipeline_config_options(self): options = [ ConfigOption(name="additional_logs", default=[], type=str, list=True, choices=list(self.log_functions.keys())), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption(name="embeddings", default=list(self.embedding_modules.keys()), type=str, list=True, choices=list(self.embedding_modules.keys())), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption(name="optimizer", default=list(self.optimizer.keys()), type=str, list=True, choices=list(self.optimizer.keys())), ] return options
def get_pipeline_config_options(self): options = [ ConfigOption('dataset_order', default=None, type=int, list=True, info="Order in which datasets are considered."), #autonet.refit sets this to false to avoid refit budget issues ConfigOption( 'increase_number_of_trained_datasets', default=True, type=to_bool, info= "Wether to increase the number of considered datasets with each successive halfing iteration." ) ] return options