コード例 #1
0
    def get_metrics_evaluations(self, prediction, groun_truth):
        if self.compiler is None:
            return None

        metrics = tb.L([self.compiler.loss]) + self.compiler.metrics
        loss_dict = dict()
        for a_metric in metrics:
            if hasattr(a_metric, "path"):
                name = a_metric.name
            elif hasattr(a_metric, "__name__"):
                name = a_metric.__name__
            else:
                name = "unknown"
            # try:  # EAFP principle.
            #     path = a_metric.path  # works for subclasses Metrics
            # except AttributeError:
            #
            #     path = a_metric.__name__  # works for functions.
            loss_dict[name] = []

            for a_prediction, a_y_test in zip(prediction, groun_truth):
                if hasattr(a_metric, "reset_states"):
                    a_metric.reset_states()
                loss = a_metric(a_prediction[None], a_y_test[None])
                loss_dict[name].append(np.array(loss).item())
        return tb.pd.DataFrame(loss_dict)
コード例 #2
0
    def compile(self, loss=None, optimizer=None, metrics=None, compile_model=True, **kwargs):
        """ Updates compiler attributes. This acts like a setter.

        .. note:: * this method is as good as setting attributes of `compiler` directly in case of PyTorch.
                  * In case of TF, this is not the case as TF requires actual futher different
                    compilation before changes take effect.

        Remember:

        * Must be run prior to fit method.
        * Can be run only after defining model attribute.

        """
        pkg = self.hp.pkg
        if self.hp.pkg_name == 'tensorflow':
            if loss is None:
                loss = pkg.keras.losses.MeanSquaredError()
            if optimizer is None:
                optimizer = pkg.keras.optimizers.Adam(self.hp.learning_rate)
            if metrics is None:
                metrics = tb.List()  # [pkg.keras.metrics.MeanSquaredError()]
        elif self.hp.pkg_name == 'torch':
            if loss is None:
                loss = pkg.nn.MSELoss()
            if optimizer is None:
                optimizer = pkg.optim.Adam(self.model.parameters(), lr=self.hp.learning_rate)
            if metrics is None:
                metrics = tb.List()  # [tmp.MeanSquareError()]
        # Create a new compiler object
        self.compiler = tb.Struct(loss=loss, optimizer=optimizer, metrics=tb.L(metrics), **kwargs)

        # in both cases: pass the specs to the compiler if we have TF framework
        if self.hp.pkg.__name__ == "tensorflow" and compile_model:
            self.model.compile(**self.compiler.__dict__)
コード例 #3
0
 def fit(self, shuffle_train_test=True, save=True, **kwargs):
     self.performance = tb.L()
     for i in range(self.size):
         print('\n\n', f" Training Model {i} ".center(100, "*"), '\n\n')
         if shuffle_train_test:
             self.data.split_my_data_now(seed=np.random.randint(0, 1000))  # shuffle data (shared among models)
         self.models[i].fit(**kwargs)
         self.performance.append(self.models[i].evaluate(idx=slice(0, -1), viz=False))
         if save:
             self.models[i].save_class()
             self.performance.save_pickle(self.hp_class.save_dir / "performance.pkl")
     print("\n\n", f" Finished fitting the ensemble ".center(100, ">"), "\n")
コード例 #4
0
    def refresh(self, sch=None):
        # fails if multiple schemas are there and None is specified
        self.meta.reflect(bind=self.eng, schema=sch or self.sch)
        self.insp = inspect(subject=self.eng)

        self.schema = tb.L(self.insp.get_schema_names())
        self.schema.append(None)
        self.tables = self.schema.apply(
            lambda x: self.insp.get_table_names(schema=x))
        # self.tables = [self.meta.tables[tmp] for tmp in self.meta.tables.keys()]
        self.views = self.schema.apply(
            lambda x: self.insp.get_view_names(schema=x))
        self.sch_tab = tb.Struct.from_keys_values(self.schema, self.tables)
        self.sch_vws = tb.Struct.from_keys_values(self.schema, self.views)

        return self
コード例 #5
0
    def from_class_weights(cls, path, hparam_class=None, data_class=None, device_name=None):
        path = tb.P(path)

        if hparam_class is not None: hp_obj = hparam_class.from_saved(path)
        else: hp_obj = (path / HyperParam.subpath + ".HyperParam.pkl").readit()
        if device_name: hp_obj.device_name = device_name

        if data_class is not None: d_obj = data_class.from_saved(path, hp=hp_obj)
        else: d_obj = (path / DataReader.subpath / "data_reader.DataReader.pkl").readit()
        d_obj.hp = hp_obj

        model_obj = cls(hp_obj, d_obj)
        model_obj.load_weights(path.search('*_save_*')[0])
        model_obj.history = (path / "metadata/history.pkl").readit(notfound=tb.L())

        print(f"Class {model_obj.__class__} Loaded Successfully.")
        return model_obj
コード例 #6
0
def construct_path(path_list):
    from functools import reduce
    return reduce(lambda x, y: x + ";" + y,
                  tb.L(tb.pd.unique(path_list)).apply(str))
コード例 #7
0
def get_list_of_executables_defined_in_shell():
    return tb.L(tb.os.environ["Path"].split(";")).apply(
        lambda x: tb.P(x).search("*.exe")).flatten().print()