def test_regressor_mg_train_sg_predict(datatype, keys, data_size, fit_intercept, client): nrows, ncols, n_info = data_size X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info) X_test_local = X_test.compute() dist_model = LinearRegression(fit_intercept=fit_intercept, client=client) dist_model.fit(X_train, y_train) expected = dist_model.predict(X_test).compute() local_model = dist_model.get_combined_model() actual = local_model.predict(X_test_local) assert_equal(expected.get(), actual.get())
def test_mnmg(): cluster = LocalCUDACluster(threads_per_worker=1) client = Client(cluster) n_workers = len(client.scheduler_info()['workers']) # Create and populate a GPU DataFrame df_float = cudf.DataFrame() df_float['0'] = [1.0, 2.0, 5.0] df_float['1'] = [4.0, 2.0, 1.0] df_float['2'] = [4., 2, 1] ddf_float = dask_cudf.from_cudf(df_float, npartitions=2*n_workers) X = ddf_float[ddf_float.columns.difference(['2'])] y = ddf_float['2'] mod = LinearRegression() mod = mod.fit(X, y) actual_output = str(mod.predict(X).compute().values) expected_output = '[4. 2. 1.]' assert actual_output == expected_output