def test_ensemble(self): X, y = make_blob_data() clf = AllStumpsClassifier(X, y) self.assertGreaterEqual(clf.score(X, y), 0)
def test_decision_stump(self): X, y = make_blob_data() stump = DecisionStumpClassifier(X, y, 2) # Basic sanity check: self.assertEqual(len(stump.predict(X)), len(X))
def setUpClass(cls): cls.X, cls.y = make_blob_data() cls.clf = QBoostClassifier(cls.X, cls.y, 0.0)
'digits', help='handwritten digits data set') sp_digits.add_argument('--digit1', type=int, default=0, choices=range(10), help='first digit to include (default: %(default)s)') sp_digits.add_argument('--digit2', type=int, default=1, choices=range(10), help='second digit to include (default: %(default)s)') sp_digits.add_argument('--plot-digits', action='store_true', help='plot a random sample of each digit') args = parser.parse_args() if args.dataset == 'blobs': n_samples = args.num_samples n_features = args.num_features n_informative = args.num_informative X, y = make_blob_data( n_samples=n_samples, n_features=n_features, n_informative=n_informative) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4) if args.cross_validation: # See Boyda et al. (2017), Eq. (17) regarding normalization normalized_lambdas = np.linspace(0.0, 0.5, 10) lambdas = normalized_lambdas / n_features print('Performing cross-validation using {} values of lambda, this may take several minutes...'.format(len(lambdas))) qboost, lam = qboost_lambda_sweep( X_train, y_train, lambdas, verbose=args.verbose) else: qboost = QBoostClassifier(X_train, y_train, args.lam) if args.verbose: