Esempio n. 1
0
        self.log("CLEANING MODEL DIR")
        shutil.rmtree(model_dir, ignore_errors=True)


def add_version(name, dataset, flat, used_labels):
    v.add_version(name,
                  dataloader=DataLoaderCallableWrapper(
                      BaseTorchDataLoader,
                      datasets=Datasets("../data/generated/rico.json",
                                        train_data_load_function=load_data,
                                        test_size=0.1,
                                        validation_size=0,
                                        used_labels=used_labels),
                      pytorch_dataset_factory=DatasetFactory(
                          dataset, used_labels=used_labels),
                      batch_size=4),
                  epocs=15,
                  num_classes=len(used_labels),
                  generate_images=False,
                  pretrained_model=None,
                  used_labels=used_labels)


v = Versions()
add_version("r4",
            ObjectDetectionModelDataSet,
            flat=False,
            used_labels=load_semantic_classes(
                "../data/generated/semnantic_colors.json"))
EXPERIMENT = Experiment(v, allow_delete_experiment_dir=False)
Esempio n. 2
0
    def _get_master_bar_write_fn(self):
        def write_fn(line, end=None):
            if end is not None:
                print(line, end=end)
            else:
                print(end="\r")
                self.log(line)
        return write_fn

    def clean_experiment_dir(self, model_dir):
        self.log("CLEANING MODEL DIR")
        shutil.rmtree(model_dir, ignore_errors=True)


v = Versions(None, 1, 1)


# Combined data
v.add_version("complete_combined_data_model",
              dataloader=lambda: DataLoader(Datasets(
                  train_dataset_file_path=TRN_DATA_FILE,
                  test_dataset_file_path=TST_DATA_FILE,
                  train_data_load_function=LoadDatasetUtterance(LoadDatasetUtterance.COMBINED_FUNCTIONS),
                  validation_size=0)),
              custom_paramters={
                  "oov_df": lambda: DataLoader(Datasets(
                      test_dataset_file_path=TST_OOV_DATA_FILE,
                      test_data_load_function=LoadDatasetUtterance(LoadDatasetUtterance.COMBINED_FUNCTIONS))),
              })
                if idx % 3 == 0:
                    metric_container.reset()
            metric_container.log_metrics(['a', '2'])
            metric_container.reset_epoch()
        metric_container.log_metrics()
        self.log("trained: {}".format(self.model.train()))
        self.copy_related_files("experiments/exports")

    def evaluate_loop(self, input_fn):
        self.log("calling input fn")
        input_fn()
        metrics = MetricContainer(['a', 'b'])
        metrics.a.update(10, 1)
        metrics.b.update(2, 1)
        return metrics

    def export_model(self):
        self.log("YAY! Exported!")


dl = DataLoaderCallableWrapper(TestingDataLoader)
v = Versions(dl, 1, 10, learning_rate=0.01)
v.add_version("version1", hyperparameter="a hyperparameter")
v.add_version("version2", custom_paramters={"hyperparameter": None})
v.add_version("version3", custom_paramters={"hyperparameter": None})
v.add_version("version4", custom_paramters={"hyperparameter": None})
v.filter_versions(blacklist_versions=["version3"])
v.filter_versions(whitelist_versions=["version1", "version2"])
v.add_version("version5", custom_paramters={"hyperparameter": None})
EXPERIMENT = TestingExperiment(versions=v)
                next_func = self.dm.get_next_system_concern()
            except Exception:
                # print(next_func, dialogue, function_map, asked, outed, completed_functions, self.dm.context.ac, sep="\n")
                raise
        # If there are any functions in the function_map not in the completed_functions
        # the dialogue failed
        if len([f for f in function_map.keys() if f not in completed_functions]) != 0:
            # print(2, *self.dm.context.contexts, sep="\n")
            # print("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
            # print(next_func, dialogue, function_map, asked, outed, completed_functions, self.dm.context.ac, sep="\n")
            # raise
            return False
        return True


v = Versions(None, 1, 1)
templates.print_function_output = False

v.add_version('complete_combined',
              order=1,
              dataloader=DataLoaderCallableWrapper(DataLoader,
                                                   test_file_path=TST_DATA_FILE,
                                                   test_oov_file_path=TST_OOV_DATA_FILE,
                                                   function_filter=None),
              custom_paramters={
                  'root_model_path': "outputs/experiment_ckpts/ulmfit-complete_combined_data_model",
                  'model': ModelCombined,
                  # 'root_model_path': "outputs/experiment_ckpts/ulmfit-generated_data_model",
                  'functions': [templates.order_taxi, templates.book_room,
                                templates.book_ticket, templates.book_table]})
Esempio n. 5
0
from mlpipeline import Versions, MetricContainer
from mlpipeline.base import ExperimentABC, DataLoaderABC


class Experiment(ExperimentABC):
    def setup_model(self):
        pass

    def pre_execution_hook(self, **kwargs):
        pass

    def train_loop(self, input_fn, **kwargs):
        pass

    def evaluate_loop(self, input_fn, **kwargs):
        return MetricContainer()

    def export_model(self, **kwargs):
        pass

    def _export_model(self, export_dir):
        pass

    def post_execution_hook(self, **kwargs):
        pass


v = Versions()
v.add_version("Run-1")
EXPERIMENT = Experiment(v)