Exemple #1
0
def test_homer_shallowly():
    '''a very shallow test for HOMER'''
    n_classes = 4
    X, y = make_ml_clf(n_samples=50, n_features=20,
                       n_classes=n_classes,
                       n_labels=2,
                       length=10, allow_unlabeled=False,
                       return_indicator=True,
                       random_state=123456)

    model = HOMER(base_clf=OneVsRestClassifier(LinearSVC(random_state=0)),
                  k=3,
                  max_iter=20,
                  random_state=123456)

    model.fit(X, y)

    assert_equal(model._label_n, n_classes)
    assert_equal(type(model._estimator_hierarchy[0]),
                 OneVsRestClassifier)
    for clf in model._estimator_hierarchy[1]:
        assert_equal(type(clf[0]), OneVsRestClassifier)
        assert_equal(len(clf[1]), 0)  # no more children clf

    model.predict(X)
    assert_equal(model._meta_y_hier[0].shape, (50, 2))
    for y_hier in model._meta_y_hier[1]:
        assert_equal(y_hier.shape, (50, 2))
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
    X, Y, p_c, p_w_c = make_ml_clf(
        n_samples=150,
        n_features=2,
        n_classes=n_classes,
        n_labels=n_labels,
        length=length,
        allow_unlabeled=False,
        return_distributions=True,
        random_state=RANDOM_SEED,
    )

    ax.scatter(X[:, 0],
               X[:, 1],
               color=COLORS.take((Y * [1, 2, 4]).sum(axis=1)),
               marker=".")
    ax.scatter(
        p_w_c[0] * length,
        p_w_c[1] * length,
        marker="*",
        linewidth=0.5,
        edgecolor="black",
        s=20 + 1500 * p_c**2,
        color=COLORS.take([1, 2, 4]),
    )
    ax.set_xlabel("Feature 0 count")
    return p_c, p_w_c
Exemple #3
0
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
    X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
                                   n_classes=n_classes, n_labels=n_labels,
                                   length=length, allow_unlabeled=False,
                                   return_distributions=True,
                                   random_state=RANDOM_SEED)

    ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
                                                    ).sum(axis=1)),
               marker='.')
    ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
               marker='*', linewidth=.5, edgecolor='black',
               s=20 + 1500 * p_c ** 2,
               color=COLORS.take([1, 2, 4]))
    ax.set_xlabel('Feature 0 count')
    return p_c, p_w_c
from imblearn.metrics import classification_report_imbalanced
from sklearn.datasets import make_multilabel_classification as make_ml_clf
from sklearn.datasets import make_classification
from sklearn.svm import LinearSVC
from imblearn.ensemble import EasyEnsemble
from sklearn.cross_validation import train_test_split
from balance_data import cluster_centroids, edit_nearest_neribours, near_miss
from balance_data import adasyn, smote
from balance_data import smoteenn

RANDOM_STATE = 42
RANDOM_SEED = np.random.randint(2**10)
# Generate a dataset

X, y = make_ml_clf(n_classes=5,n_samples=5000, n_features=20,\
                                     n_labels=1 ,allow_unlabeled=True,\
                                     return_distributions=False,\
                                    random_state=RANDOM_SEED)
"""                                    
X, y = make_classification(n_classes=5, class_sep=5,\
    weights=[0.1,0.3,0.1,0.05,0.45], n_informative=3, n_redundant=1, flip_y=0,\
    n_features=20, n_clusters_per_class=5, n_samples=1000, random_state=10)
"""

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

y_train = [np.argmax(yt) for yt in y_train]
y_test = [np.argmax(yt) for yt in y_test]

print('Original train dataset shape {}'.format(Counter(y_train)))
print('Original test dataset shape {}'.format(Counter(y_test)))
sample_methods = ['no_sample','easy_ensemble','cluster_centroids','edit_nearest_neribours',\