def test_make_classification_hard_vote_score_mix(self): """Tests RandomForestClassifier score with hard_vote, sklearn_max, distr_depth and max_depth.""" x, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0) x_train = ds.array(x[:len(x) // 2], (300, 10)) y_train = ds.array(y[:len(y) // 2][:, np.newaxis], (300, 1)) x_test = ds.array(x[len(x) // 2:], (300, 10)) y_test = ds.array(y[len(y) // 2:][:, np.newaxis], (300, 1)) rf = RandomForestClassifier(random_state=0, sklearn_max=100, distr_depth=2, max_depth=12, hard_vote=True) rf.fit(x_train, y_train) accuracy = compss_wait_on(rf.score(x_test, y_test)) self.assertGreater(accuracy, 0.7)
def main(): x, y = load_iris(return_X_y=True) indices = np.arange(len(x)) shuffle(indices) # use 80% of samples for training train_idx = indices[:int(0.8 * len(x))] test_idx = indices[int(0.8 * len(x)):] # Train the RF classifier print("- Training Random Forest classifier with %s samples of Iris " "dataset." % len(train_idx)) x_train = ds.array(x[train_idx], (10, 4)) y_train = ds.array(y[train_idx][:, np.newaxis], (10, 1)) forest = RandomForestClassifier(10) forest.fit(x_train, y_train) # Test the trained RF classifier print("- Testing the classifier.", end='') x_test = ds.array(x[test_idx], (10, 4)) y_real = ds.array(y[test_idx][:, np.newaxis], (10, 1)) y_pred = forest.predict(x_test) score = compss_wait_on(forest.score(x_test, y_real)) # Put results in fancy dataframe and print the accuracy df = pd.DataFrame(data=list(zip(y[test_idx], y_pred.collect())), columns=['Label', 'Predicted']) print(" Predicted values: \n\n%s" % df) print("\n- Classifier accuracy: %s" % score)
def test_make_classification_hard_vote_predict(self): """Tests RandomForestClassifier predict with hard_vote.""" x, y = make_classification( n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0, ) x_train = ds.array(x[::2], (300, 10)) y_train = ds.array(y[::2][:, np.newaxis], (300, 1)) x_test = ds.array(x[1::2], (300, 10)) y_test = y[1::2] rf = RandomForestClassifier(random_state=0, sklearn_max=10, hard_vote=True) rf.fit(x_train, y_train) y_pred = rf.predict(x_test).collect() accuracy = np.count_nonzero(y_pred == y_test) / len(y_test) self.assertGreater(accuracy, 0.7)
def test_make_classification_sklearn_max_predict_proba(self): """Tests RandomForestClassifier predict_proba with sklearn_max.""" x, y = make_classification( n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0, ) x_train = ds.array(x[::2], (300, 10)) y_train = ds.array(y[::2][:, np.newaxis], (300, 1)) x_test = ds.array(x[1::2], (300, 10)) y_test = y[1::2] rf = RandomForestClassifier(random_state=0, sklearn_max=10) rf.fit(x_train, y_train) probabilities = rf.predict_proba(x_test).collect() rf.classes = compss_wait_on(rf.classes) y_pred = rf.classes[np.argmax(probabilities, axis=1)] accuracy = np.count_nonzero(y_pred == y_test) / len(y_test) self.assertGreater(accuracy, 0.7)
def test_iris(self): """Tests RandomForestClassifier with a minimal example.""" x, y = datasets.load_iris(return_X_y=True) ds_fit = ds.array(x[::2], block_size=(30, 2)) fit_y = ds.array(y[::2].reshape(-1, 1), block_size=(30, 1)) ds_validate = ds.array(x[1::2], block_size=(30, 2)) validate_y = ds.array(y[1::2].reshape(-1, 1), block_size=(30, 1)) rf = RandomForestClassifier(n_estimators=1, max_depth=1, random_state=0) rf.fit(ds_fit, fit_y) accuracy = compss_wait_on(rf.score(ds_validate, validate_y)) # Accuracy should be <= 2/3 for any seed, often exactly equal. self.assertAlmostEqual(accuracy, 2 / 3)
def test_make_classification_score(self): """Tests RandomForestClassifier fit and score with default params.""" x, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0) x_train = ds.array(x[:len(x) // 2], (300, 10)) y_train = ds.array(y[:len(y) // 2][:, np.newaxis], (300, 1)) x_test = ds.array(x[len(x) // 2:], (300, 10)) y_test = ds.array(y[len(y) // 2:][:, np.newaxis], (300, 1)) rf = RandomForestClassifier(random_state=0) rf.fit(x_train, y_train) accuracy = compss_wait_on(rf.score(x_test, y_test)) self.assertGreater(accuracy, 0.7)
def test_make_classification_predict_and_distr_depth(self): """Tests RandomForestClassifier fit and predict with a distr_depth.""" x, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0) x_train = ds.array(x[:len(x) // 2], (300, 10)) y_train = ds.array(y[:len(y) // 2][:, np.newaxis], (300, 1)) x_test = ds.array(x[len(x) // 2:], (300, 10)) y_test = y[len(y) // 2:] rf = RandomForestClassifier(distr_depth=2, random_state=0) rf.fit(x_train, y_train) y_pred = rf.predict(x_test).collect() accuracy = np.count_nonzero(y_pred == y_test) / len(y_test) self.assertGreater(accuracy, 0.7)
def test_make_classification_fit_predict(self): """Tests RandomForestClassifier fit_predict with default params.""" x, y = make_classification(n_samples=3000, n_features=10, n_classes=3, n_informative=4, n_redundant=2, n_repeated=1, n_clusters_per_class=2, shuffle=True, random_state=0) x_train = ds.array(x[:len(x) // 2], (300, 10)) y_train = ds.array(y[:len(y) // 2][:, np.newaxis], (300, 1)) rf = RandomForestClassifier(random_state=0) y_pred = rf.fit(x_train, y_train).predict(x_train).collect() y_train = y_train.collect() accuracy = np.count_nonzero(y_pred == y_train) / len(y_train) self.assertGreater(accuracy, 0.7)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--svmlight", help="read files in SVMLight format", action="store_true") parser.add_argument("-dt", "--detailed_times", help="get detailed execution times (read and fit)", action="store_true") parser.add_argument("-e", "--estimators", metavar="N_ESTIMATORS", type=int, help="default is 10", default=10) parser.add_argument("-b", "--block_size", metavar="BLOCK_SIZE", type=str, help="two comma separated ints that represent the " "size of the blocks in which to divide the input " "data (default is 100,100)", default="100,100") parser.add_argument("-md", "--max_depth", metavar="MAX_DEPTH", type=int, help="default is np.inf", required=False) parser.add_argument("-dd", "--dist_depth", metavar="DIST_DEPTH", type=int, help="default is auto", required=False) parser.add_argument("-f", "--features", metavar="N_FEATURES", help="number of features of the input data " "(only for SVMLight files)", type=int, default=None, required=False) parser.add_argument("--dense", help="use dense data structures", action="store_true") parser.add_argument("-t", "--test-file", metavar="TEST_FILE_PATH", help="test file path", type=str, required=False) parser.add_argument("train_data", help="input file in CSV or SVMLight format", type=str) args = parser.parse_args() train_data = args.train_data s_time = time.time() read_time = 0 sparse = not args.dense bsize = args.block_size.split(",") block_size = (int(bsize[0]), int(bsize[1])) if args.svmlight: x, y = ds.load_svmlight_file(train_data, block_size, args.features, sparse) else: x = ds.load_txt_file(train_data, block_size) y = x[:, x.shape[1] - 2:x.shape[1] - 1] x = x[:, :x.shape[1] - 1] if args.detailed_times: barrier() read_time = time.time() - s_time s_time = time.time() if args.dist_depth: dist_depth = args.dist_depth else: dist_depth = "auto" if args.max_depth: max_depth = args.max_depth else: max_depth = np.inf forest = RandomForestClassifier(n_estimators=args.estimators, max_depth=max_depth, distr_depth=dist_depth) forest.fit(x, y) barrier() fit_time = time.time() - s_time out = [ forest.n_estimators, forest.distr_depth, forest.max_depth, read_time, fit_time ] if args.test_file: if args.svmlight: x_test, y_test = ds.load_svmlight_file(args.test_file, block_size, args.features, sparse) else: x_test = ds.load_txt_file(args.test_file, block_size) y_test = x_test[:, x_test.shape[1] - 1:x_test.shape[1]] x_test = x_test[:, :x_test.shape[1] - 1] out.append(compss_wait_on(forest.score(x_test, y_test))) print(out)