class AutoKerasBaselineModel(Model): def __init__( self, name: str, model_params: Dict[str, Any], ) -> None: super().__init__(name, model_params) #self._fit_params = model_params.pop('fit_params') self._fit_params = {} self._model = StructuredDataClassifier(**model_params) def _force_fit(self, X: np.ndarray, y: np.ndarray) -> None: self._model.fit(X, y, **self._fit_params) def fit(self, X: np.ndarray, y: np.ndarray) -> None: if not isinstance(self._model, StructuredDataClassifier): raise RuntimeError( 'Due to AutoKeras being unsaveable, saving this' + ' means only the best keras model was' + ' saved. Calling fit will fit this pipeline' + ' rather than the AutoKeras algorithm. If this' + ' is desired behaviour, please use ' + '`_force_fit` instead') self._force_fit(X, y) def save(self, path: str) -> None: # Get the best model that can be saved best_model = self._model.export_model() best_model.save(path) # Also store the model wrapper after removing autokeras model # as it can't be saved self._model = None wrapper_path = path + '.wrapper' with open(wrapper_path, 'wb') as file: pickle.dump(self, file) @classmethod def load(cls, path: str): wrapper_class = None # Have to load model wrapper and the underlying keras model seperatly # as the autokeras model can't be saved wrapper_path = path + '.wrapper' with open(wrapper_path, 'rb') as file: wrapper_class = pickle.load(file) wrapper_class._model = keras.models.load_model(path) return wrapper_class def predict(self, X: np.ndarray) -> np.ndarray: if isinstance(self._model, StructuredDataClassifier): return self._model.predict(X) else: # Already been exported, return np.round(self._model.predict(X)) def predict_proba(self, X: np.ndarray) -> np.ndarray: if isinstance(self._model, StructuredDataClassifier): exported_model = self._model.export_model() return exported_model.predict(X) else: # Already been exported return self._model.predict(X)
def auto_ml_classify(url, x_names, y_name, max_trials, test_size, X_new): # load dataset dataframe = read_csv(url) X = dataframe[x_names].values y = dataframe[y_name].values print(X.shape, y.shape) # basic data preparation X = X.astype('float32') y = LabelEncoder().fit_transform(y) # separate into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # define the search search = StructuredDataClassifier(max_trials=max_trials) # perform the search search.fit(x=X_train, y=y_train, verbose=0) # evaluate the model loss, acc = search.evaluate(X_test, y_test, verbose=0) print('Accuracy: %.3f' % acc) # use the model to make a prediction yhat = search.predict(X_new) print('Predicted: %.3f' % yhat[0]) # get the best performing model model = search.export_model() # summarize the loaded model model.summary() # save the best performing model to file for next time you dont have to train model.save('model_sonar.h5')
sess = tf.compat.v1.Session( graph=tf.compat.v1.get_default_graph(), config=session_conf) tf.compat.v1.keras.backend.set_session(sess) # define the search search = StructuredDataClassifier(max_trials=max_trials) # perform the search search.fit(x=X_train, y=y_train, verbose=0, epochs=epochs) y_pred_train = search.predict(X_train) y_pred_test = search.predict(X_test) # get the best performing model model = search.export_model() # Model summary s = io.StringIO() model.summary(print_fn=lambda x: s.write(x + '\n')) model_summary = s.getvalue() s.close() print("The model summary is:\n\n{}".format(model_summary)) st.info('**Model summary**') plt.text(0.1, 0.1, model_summary) plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=()) plt.grid(False) st.pyplot() # Training set