def evaluate(approach_id, metric): if not _is_running_in_project(): print("You must use driftai CLI inside an driftai project directory") return if not Approach.collection().exists(approach_id): print("Approach with id {} does not exist.".format(approach_id)) return approach = Approach.load(approach_id) r = ResultReport(approach=approach, metrics=[str_to_metric_fn[m] for m in metric]) r.as_dataframe()\ .to_csv(approach_id + "_evaluation.csv", index=False)
def run(approach_id, resume): if not _is_running_in_project(): print("You must use driftai CLI inside an driftai project directory") return if not Approach.collection().exists(approach_id): print("Approach with id {} does not exist.".format(approach_id)) return sys.path.append(Project.load().path) namespace = 'approaches.' + approach_id cls_name = to_camel_case(approach_id) + "Approach" approach_cls = import_from(namespace, cls_name) approach_cls().run(resume=resume)