def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.run().predict(X_test) rmse = compute_rmse(y_pred, y_test) # self.mlflow_log_param('model', "LinearRegression") # self.mlflow_log_param('rmse', rmse) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) self.mlflow_log_metric('rmse', rmse) self.mlflow_log_param("model", 'linear') return round(rmse, 2)
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) if self.mlflow: self.mlflow_log_metric('rmse', rmse) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) print(rmse) self.mlflow_log_metric("rmse", rmse) self.mlflow_log_param("model", self.pipeline.get_params()['linear_model']) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) print(f'ID:{trainer.mlflow_experiment_id}') self.mlflow_log_param('model', str(self.pipeline.get_params()['model']) .strip('()')) self.mlflow_log_metric('rmse', rmse) return rmse
def compute_rmse(self, X_test, y_test, show=False): if self.pipeline is None: raise ("Cannot evaluate an empty pipeline") y_pred = self.pipeline.predict(X_test) if show: res = pd.DataFrame(y_test) res["pred"] = y_pred print(colored(res.sample(5), "blue")) rmse = compute_rmse(y_pred, y_test) return round(rmse, 3)
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse_ = compute_rmse(y_pred, y_test) print(f"RMSE = {rmse_}") self.experiment_name = EXPERIMENT_NAME self.mlflow_log_metric("rmse", rmse_) self.mlflow_log_param("model", self.estimator) self.mlflow_log_param("student_name", myname) return rmse_
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.best_model.predict(X_test) #y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) # for model in ["a", "b"]: on peut itérer sur des modèles avec des runs différents en bouclant # self.mlflow_run() # self.mlflow_log_metric("rmse",rmse) # self.mlflow_log_param("model",self.pipeline.get_params()['linear_model']) # self.mlflow_log_param("truc",model) # for key,value in self.best_params.items(): # self.mlflow_log_param(key,value) self.mlflow_run() self.mlflow_log_metric("rmse", rmse) self.mlflow_log_param( "model", str(self.best_model.get_params()['linear_model'])[:20]) for key, value in self.best_params.items(): self.mlflow_log_param(key, value) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" a=self.pipeline.predict(X_test) self.mlflow_log_param('model', 'KnnRegressor') self.mlflow_log_metric('rmse', compute_rmse(a,y_test)) return compute_rmse(a,y_test)
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" '''returns the value of the RMSE''' y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) return rmse
def evaluate(self, X_test, y_test): '''returns the value of the RMSE''' y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) score = compute_rmse(y_pred, y_test) self.mlflow_log_metric("test rmse", score) return score
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) self.rmse = compute_rmse(y_pred, y_test) self.mlflow_log_param('model', self.model) self.mlflow_log_metric('rmse', self.rmse)
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" model = self.run() y_pred = model.predict(X_test) return compute_rmse(y_pred, y_test)
def evaluate(self): """evaluates the pipeline on df_test and return the RMSE""" print(compute_rmse(self.pipeline.predict(self.X_test), self.y_test))
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" pipe_trained = self.run() y_pred = pipe_trained.predict(X_test) rmse = compute_rmse(y_pred, y_test) return rmse
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) print(f"rmse = {rmse}")
def compute_rmse(self, X, y): y_pred = self.pipeline.predict(X) rmse = compute_rmse(y_pred, y) return round(rmse, 3)
def test_rmse(): y_true = np.array((34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24)) y_pred = np.array((37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23)) assert round(compute_rmse(y_pred, y_true), 2) == 2.43, "RMSE calculation is not right"
def evaluate(self, X_test, y_test): if self.pipeline is None: raise ("Cannot evaluate an empty pipeline") y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test) return round(rmse, 3)
def evaluate(self, X_test, y_test): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = round(compute_rmse(y_pred, y_test), 2) self.mlflow_log_metric("rmse", rmse) return rmse
def evaluate(self, X_test, y_test, estimator): """evaluates the pipeline on df_test and return the RMSE""" y_pred = self.pipeline.predict(X_test) rmse = compute_rmse(y_pred, y_test)
def evaluate(self): return compute_rmse(self.pipeline.predict(self.X_test), self.y_test)