def __init__(self, model, feature_names=None, label_names=None, norm_params=None, normalize=(True, False), one_hot_categories=None): """ Parameters ---------- model : OBJ Initialized model object feature_names : list Ordered list of feature names. label_names : list Ordered list of label (output) names. norm_params : dict, optional Dictionary mapping feature and label names (keys) to normalization parameters (mean, stdev), by default None normalize : bool | tuple, optional Boolean flag(s) as to whether features and labels should be normalized. Possible values: - True means normalize both - False means don't normalize either - Tuple of flags (normalize_feature, normalize_label) by default True one_hot_categories : dict, optional Features to one-hot encode using given categories, if None do not run one-hot encoding, by default None """ self._model = model if isinstance(feature_names, str): feature_names = [feature_names] elif isinstance(feature_names, (np.ndarray, pd.Index)): feature_names = feature_names.tolist() self._feature_names = feature_names if isinstance(label_names, str): label_names = [label_names] elif isinstance(label_names, (np.ndarray, pd.Index)): label_names = label_names.tolist() self._label_names = label_names if norm_params is None: norm_params = {} self._norm_params = norm_params self._normalize = self._parse_normalize(normalize) if one_hot_categories is not None: PreProcess.check_one_hot_categories(one_hot_categories) self._one_hot_categories = one_hot_categories
def _check_one_hot_feature_names(self, feature_names): """ Check one_hot_feature_names, update feature_names to remove features that were one-hot encoded and add in new one-hot features if needed Parameters ---------- feature_names : list Input feature names """ one_hot_feature_names = self.make_one_hot_feature_names( feature_names, self.one_hot_categories) if one_hot_feature_names != self.feature_names: check_names = feature_names.copy() if self.label_names is not None: check_names += self.label_names PreProcess.check_one_hot_categories(self.one_hot_categories, feature_names=check_names) self._feature_names = one_hot_feature_names
def build_trained(cls, features, label, normalize=True, one_hot_categories=None, shuffle=True, save_path=None, compile_kwargs=None, parse_kwargs=None, fit_kwargs=None): """ Build Random Forest Model with given kwargs and then train with given features, labels, and kwargs Parameters ---------- features : pandas.DataFrame Model features label : pandas.DataFrame label to train on normalize : bool | tuple, optional Boolean flag(s) as to whether features and labels should be normalized. Possible values: - True means normalize both - False means don't normalize either - Tuple of flags (normalize_feature, normalize_label) by default True one_hot_categories : dict, optional Features to one-hot encode using given categories, if None do not run one-hot encoding, by default None shuffle : bool Flag to randomly subset the validation data and batch selection from features and labels. save_path : str Directory path to save model to. The RandomForest Model will be saved to the directory while the framework parameters will be saved in json. compile_kwargs : dict kwargs for sklearn.ensemble.RandomForestRegressor parse_kwargs : dict kwargs for cls.parse_features fit_kwargs : dict kwargs for sklearn.ensemble.RandomForestRegressor.fit Returns ------- model : RandomForestModel Initialized and trained RandomForestModel obj """ if compile_kwargs is None: compile_kwargs = {} _, feature_names = cls._parse_data(features) _, label_name = cls._parse_data(label) model = cls.compile_model(**compile_kwargs) if one_hot_categories is not None: check_names = feature_names + label_name PreProcess.check_one_hot_categories(one_hot_categories, feature_names=check_names) feature_names = cls.make_one_hot_feature_names( feature_names, one_hot_categories) model = cls(model, feature_names=feature_names, label_name=label_name, normalize=normalize, one_hot_categories=one_hot_categories) model.train_model(features, label, shuffle=shuffle, parse_kwargs=parse_kwargs, fit_kwargs=fit_kwargs) if save_path is not None: model.save_model(save_path) return model
def build(cls, p_fun, feature_names, label_names, normalize=(True, False), one_hot_categories=None, loss_weights=(0.5, 0.5), hidden_layers=None, input_layer=None, output_layer=None, layers_obj=None, metric='mae', initializer=None, optimizer=None, learning_rate=0.01, history=None, kernel_reg_rate=0.0, kernel_reg_power=1, bias_reg_rate=0.0, bias_reg_power=1, name=None): """ Build phygnn model from given features, layers and kwargs Parameters ---------- p_fun : function Physics function to guide the neural network loss function. This fun must take (phygnn, y_true, y_predicted, p, **p_kwargs) as arguments with datatypes (PhysicsGuidedNeuralNetwork, tf.Tensor, np.ndarray, np.ndarray). The function must return a tf.Tensor object with a single numeric loss value (output.ndim == 0). feature_names : list Ordered list of feature names. label_names : list Ordered list of label (output) names. normalize : bool | tuple, optional Boolean flag(s) as to whether features and labels should be normalized. Possible values: - True means normalize both - False means don't normalize either - Tuple of flags (normalize_feature, normalize_label) by default True one_hot_categories : dict, optional Features to one-hot encode using given categories, if None do not run one-hot encoding, by default None loss_weights : tuple, optional Loss weights for the neural network y_true vs y_predicted and for the p_fun loss, respectively. For example, loss_weights=(0.0, 1.0) would simplify the phygnn loss function to just the p_fun output. hidden_layers : list, optional List of dictionaries of key word arguments for each hidden layer in the NN. Dense linear layers can be input with their activations or separately for more explicit control over the layer ordering. For example, this is a valid input for hidden_layers that will yield 8 hidden layers (10 layers including input+output): [{'units': 64, 'activation': 'relu', 'dropout': 0.01}, {'units': 64}, {'batch_normalization': {'axis': -1}}, {'activation': 'relu'}, {'dropout': 0.01}, {'class': 'Flatten'}, ] input_layer : None | bool | dict Input layer. specification. Can be a dictionary similar to hidden_layers specifying a dense / conv / lstm layer. Will default to a keras InputLayer with input shape = n_features. Can be False if the input layer will be included in the hidden_layers input. output_layer : None | bool | list | dict Output layer specification. Can be a list/dict similar to hidden_layers input specifying a dense layer with activation. For example, for a classfication problem with a single output, output_layer should be [{'units': 1}, {'activation': 'sigmoid'}]. This defaults to a single dense layer with no activation (best for regression problems). Can be False if the output layer will be included in the hidden_layers input. layers_obj : None | phygnn.utilities.tf_layers.Layers Optional initialized Layers object to set as the model layers including pre-set weights. This option will override the hidden_layers, input_layer, and output_layer arguments. metric : str, optional Loss metric option for the NN loss function (not the physical loss function). Must be a valid key in phygnn.loss_metrics.METRICS initializer : tensorflow.keras.initializers, optional Instantiated initializer object. None defaults to GlorotUniform optimizer : tensorflow.keras.optimizers | dict | None Instantiated tf.keras.optimizers object or a dict optimizer config from tf.keras.optimizers.get_config(). None defaults to Adam. learning_rate : float, optional Optimizer learning rate. Not used if optimizer input arg is a pre-initialized object or if optimizer input arg is a config dict. history : None | pd.DataFrame, optional Learning history if continuing a training session. kernel_reg_rate : float, optional Kernel regularization rate. Increasing this value above zero will add a structural loss term to the loss function that disincentivizes large hidden layer weights and should reduce model complexity. Setting this to 0.0 will disable kernel regularization. kernel_reg_power : int, optional Kernel regularization power. kernel_reg_power=1 is L1 regularization (lasso regression), and kernel_reg_power=2 is L2 regularization (ridge regression). bias_reg_rate : float, optional Bias regularization rate. Increasing this value above zero will add a structural loss term to the loss function that disincentivizes large hidden layer biases and should reduce model complexity. Setting this to 0.0 will disable bias regularization. bias_reg_power : int, optional Bias regularization power. bias_reg_power=1 is L1 regularization (lasso regression), and bias_reg_power=2 is L2 regularization (ridge regression). name : None | str Optional model name for debugging. Returns ------- model : PhygnnModel Initialized PhygnnModel instance """ if isinstance(label_names, str): label_names = [label_names] if one_hot_categories is not None: check_names = feature_names + label_names PreProcess.check_one_hot_categories(one_hot_categories, feature_names=check_names) feature_names = cls.make_one_hot_feature_names( feature_names, one_hot_categories) model = PhysicsGuidedNeuralNetwork(p_fun, loss_weights=loss_weights, n_features=len(feature_names), n_labels=len(label_names), hidden_layers=hidden_layers, input_layer=input_layer, output_layer=output_layer, layers_obj=layers_obj, metric=metric, initializer=initializer, optimizer=optimizer, learning_rate=learning_rate, history=history, kernel_reg_rate=kernel_reg_rate, kernel_reg_power=kernel_reg_power, bias_reg_rate=bias_reg_rate, bias_reg_power=bias_reg_power, feature_names=feature_names, output_names=label_names, name=name) model = cls(model, feature_names=feature_names, label_names=label_names, normalize=normalize, one_hot_categories=one_hot_categories) return model
def build(cls, feature_names, label_names, normalize=(True, False), one_hot_categories=None, hidden_layers=None, learning_rate=0.001, loss="mean_squared_error", metrics=('mae', 'mse'), optimizer_class=Adam, **kwargs): """ Build tensorflow sequential model from given features, layers and kwargs Parameters ---------- feature_names : list Ordered list of feature names. label_names : list Ordered list of label (output) names. normalize : bool | tuple, optional Boolean flag(s) as to whether features and labels should be normalized. Possible values: - True means normalize both - False means don't normalize either - Tuple of flags (normalize_feature, normalize_label) by default True one_hot_categories : dict, optional Features to one-hot encode using given categories, if None do not run one-hot encoding, by default None hidden_layers : list, optional List of tensorflow layers.Dense kwargs (dictionaries) if None use a single linear layer, by default None learning_rate : float, optional tensorflow optimizer learning rate, by default 0.001 loss : str, optional name of objective function, by default "mean_squared_error" metrics : list, optional List of metrics to be evaluated by the model during training and testing, by default ('mae', 'mse') optimizer_class : tf.keras.optimizers, optional Optional explicit request of optimizer. This should be a class that will be instantated in the TfModel.compile_model() method The default is the Adam optimizer kwargs : dict kwargs for tensorflow.keras.models.compile Returns ------- model : TfModel Initialized TfModel obj """ if isinstance(label_names, str): label_names = [label_names] if one_hot_categories is not None: check_names = feature_names + label_names PreProcess.check_one_hot_categories(one_hot_categories, feature_names=check_names) feature_names = cls.make_one_hot_feature_names( feature_names, one_hot_categories) model = cls.compile_model(len(feature_names), n_labels=len(label_names), hidden_layers=hidden_layers, learning_rate=learning_rate, loss=loss, metrics=metrics, optimizer_class=optimizer_class, **kwargs) model = cls(model, feature_names=feature_names, label_names=label_names, normalize=normalize, one_hot_categories=one_hot_categories) return model