def __init__(self): from lale.lib.rasl.concat_features import ConcatFeatures self._pipeline_suffix = ( ConcatFeatures >> Map( columns={ "y": it.y_true, # observed values "f": it.y_pred, # predicted values "y2": it.y_true * it.y_true, # squares "e2": (it.y_true - it.y_pred) * (it.y_true - it.y_pred), # type: ignore }) >> Aggregate( columns={ "n": count(it.y), "sum": sum(it.y), "sum_sq": sum(it.y2), "res_sum_sq": sum(it.e2), # residual sum of squares }))
def __init__(self): from lale.lib.rasl.concat_features import ConcatFeatures self._pipeline_suffix = ( ConcatFeatures >> Map( columns={"match": astype("int", it.y_true == it.y_pred) }) # type: ignore >> Aggregate(columns={ "match": sum(it.match), "total": count(it.match) }))
def _lift(X, hyperparams): feature_names_in_ = get_columns(X) strategy = hyperparams["strategy"] if strategy == "constant": fill_value = _SimpleImputerImpl._get_fill_value(X, hyperparams) agg_data = [[fill_value for col in get_columns(X)]] lifted_statistics = pd.DataFrame(agg_data, columns=get_columns(X)) elif strategy == "mean": agg_op_sum = Aggregate( columns={c: sum(it[c]) for c in get_columns(X)}, exclude_value=hyperparams["missing_values"], ) agg_op_count = Aggregate( columns={c: count(it[c]) for c in get_columns(X)}, exclude_value=hyperparams["missing_values"], ) lifted_statistics = {} agg_sum = agg_op_sum.transform(X) if agg_sum is not None and _is_spark_df(agg_sum): agg_sum = agg_sum.toPandas() agg_count = agg_op_count.transform(X) if agg_count is not None and _is_spark_df(agg_count): agg_count = agg_count.toPandas() lifted_statistics["sum"] = agg_sum lifted_statistics["count"] = agg_count else: raise ValueError( "_lift is only supported for imputation strategy `mean` and `constant`." ) return ( feature_names_in_, lifted_statistics, strategy, ) # strategy is added so that _combine can use it
def test_with_hyperopt2(self): from lale.expressions import ( count, it, max, mean, min, string_indexer, sum, variance, ) wrap_imported_operators() scan = Scan(table=it["main"]) scan_0 = Scan(table=it["customers"]) join = Join(pred=[(it["main"]["group_customer_id"] == it["customers"] ["group_customer_id"])]) map = Map( columns={ "[main](group_customer_id)[customers]|number_children|identity": it["number_children"], "[main](group_customer_id)[customers]|name|identity": it["name"], "[main](group_customer_id)[customers]|income|identity": it["income"], "[main](group_customer_id)[customers]|address|identity": it["address"], "[main](group_customer_id)[customers]|age|identity": it["age"], }, remainder="drop", ) pipeline_4 = join >> map scan_1 = Scan(table=it["purchase"]) join_0 = Join( pred=[(it["main"]["group_id"] == it["purchase"]["group_id"])], join_limit=50.0, ) aggregate = Aggregate( columns={ "[main](group_id)[purchase]|price|variance": variance(it["price"]), "[main](group_id)[purchase]|time|sum": sum(it["time"]), "[main](group_id)[purchase]|time|mean": mean(it["time"]), "[main](group_id)[purchase]|time|min": min(it["time"]), "[main](group_id)[purchase]|price|sum": sum(it["price"]), "[main](group_id)[purchase]|price|count": count(it["price"]), "[main](group_id)[purchase]|price|mean": mean(it["price"]), "[main](group_id)[purchase]|price|min": min(it["price"]), "[main](group_id)[purchase]|price|max": max(it["price"]), "[main](group_id)[purchase]|time|max": max(it["time"]), "[main](group_id)[purchase]|time|variance": variance(it["time"]), }, group_by=it["row_id"], ) pipeline_5 = join_0 >> aggregate map_0 = Map( columns={ "[main]|group_customer_id|identity": it["group_customer_id"], "[main]|transaction_id|identity": it["transaction_id"], "[main]|group_id|identity": it["group_id"], "[main]|comments|identity": it["comments"], "[main]|id|identity": it["id"], "prefix_0_id": it["prefix_0_id"], "next_purchase": it["next_purchase"], "[main]|time|identity": it["time"], }, remainder="drop", ) scan_2 = Scan(table=it["transactions"]) scan_3 = Scan(table=it["products"]) join_1 = Join(pred=[ (it["main"]["transaction_id"] == it["transactions"] ["transaction_id"]), (it["transactions"]["product_id"] == it["products"]["product_id"]), ]) map_1 = Map( columns={ "[main](transaction_id)[transactions](product_id)[products]|price|identity": it["price"], "[main](transaction_id)[transactions](product_id)[products]|type|identity": it["type"], }, remainder="drop", ) pipeline_6 = join_1 >> map_1 join_2 = Join(pred=[(it["main"]["transaction_id"] == it["transactions"] ["transaction_id"])]) map_2 = Map( columns={ "[main](transaction_id)[transactions]|description|identity": it["description"], "[main](transaction_id)[transactions]|product_id|identity": it["product_id"], }, remainder="drop", ) pipeline_7 = join_2 >> map_2 map_3 = Map(columns=[ string_indexer(it["[main]|comments|identity"]), string_indexer( it["[main](transaction_id)[transactions]|description|identity"] ), string_indexer(it[ "[main](transaction_id)[transactions](product_id)[products]|type|identity"] ), string_indexer( it["[main](group_customer_id)[customers]|name|identity"]), string_indexer( it["[main](group_customer_id)[customers]|address|identity"]), ]) pipeline_8 = ConcatFeatures() >> map_3 relational = Relational(operator=make_pipeline_graph( steps=[ scan, scan_0, pipeline_4, scan_1, pipeline_5, map_0, scan_2, scan_3, pipeline_6, pipeline_7, pipeline_8, ], edges=[ (scan, pipeline_4), (scan, pipeline_5), (scan, map_0), (scan, pipeline_6), (scan, pipeline_7), (scan_0, pipeline_4), (pipeline_4, pipeline_8), (scan_1, pipeline_5), (pipeline_5, pipeline_8), (map_0, pipeline_8), (scan_2, pipeline_6), (scan_2, pipeline_7), (scan_3, pipeline_6), (pipeline_6, pipeline_8), (pipeline_7, pipeline_8), ], )) pipeline = relational >> (KNeighborsClassifier | LogisticRegression) from sklearn.datasets import load_iris X, y = load_iris(return_X_y=True) from lale.lib.lale import Hyperopt opt = Hyperopt(estimator=pipeline, max_evals=2) opt.fit(X, y)