Exemple #1
0
def test_rest(x, y):

    print('Random under-sampling')
    US = UnderSampler(indices_support=indices_support, verbose=verbose)
    usx, usy, idx_tmp = US.fit_transform(x, y)
    print ('Indices selected')
    print(idx_tmp)

    print('Tomek links')
    TL = TomekLinks(verbose=verbose)
    tlx, tly = TL.fit_transform(x, y)

    print('Clustering centroids')
    CC = ClusterCentroids(verbose=verbose)
    ccx, ccy = CC.fit_transform(x, y)

    print('NearMiss-1')
    NM1 = NearMiss(version=1, indices_support=indices_support, verbose=verbose)
    nm1x, nm1y, idx_tmp = NM1.fit_transform(x, y)
    print ('Indices selected')
    print(idx_tmp)

    print('NearMiss-2')
    NM2 = NearMiss(version=2, indices_support=indices_support, verbose=verbose)
    nm2x, nm2y, idx_tmp = NM2.fit_transform(x, y)
    print ('Indices selected')
    print(idx_tmp)

    print('NearMiss-3')
    NM3 = NearMiss(version=3, indices_support=indices_support, verbose=verbose)
    nm3x, nm3y, idx_tmp = NM3.fit_transform(x, y)
    print ('Indices selected')
    print(idx_tmp)

    print('Neighboorhood Cleaning Rule')
    NCR = NeighbourhoodCleaningRule(indices_support=indices_support, verbose=verbose)
    ncrx, ncry, idx_tmp = NCR.fit_transform(x, y)
    print ('Indices selected')
    print(idx_tmp)

    print('Random over-sampling')
    OS = OverSampler(verbose=verbose)
    ox, oy = OS.fit_transform(x, y)

    print('SMOTE Tomek links')
    STK = SMOTETomek(verbose=verbose)
    stkx, stky = STK.fit_transform(x, y)

    print('SMOTE ENN')
    SENN = SMOTEENN(verbose=verbose)
    sennx, senny = SENN.fit_transform(x, y)

    print('EasyEnsemble')
    EE = EasyEnsemble(verbose=verbose)
    eex, eey = EE.fit_transform(x, y)
def nearmiss_undersampling(X, y, version):
    """
	Perform NearMiss undersampling

	Keyword arguments:
	X -- The feature vectors
	y -- The target classes
	"""

    if verbose:
        print '\nUndersampling with NearMiss-' + str(version) + ' ...'

    undersampler = NearMiss(verbose=verbose, version=version)
    X_undersampled, y_undersampled = undersampler.fit_transform(X, y)
    return X_undersampled, y_undersampled
def nearmiss_undersampling(X,y,version):
	"""
	Perform NearMiss undersampling

	Keyword arguments:
	X -- The feature vectors
	y -- The target classes
	"""

	if verbose:
		print '\nUndersampling with NearMiss-'+str(version)+' ...'

	undersampler=NearMiss(verbose=verbose,version=version)
	X_undersampled,y_undersampled = undersampler.fit_transform(X,y)
	return X_undersampled,y_undersampled
Exemple #4
0
def test_nm1_fit():
    """Test the fitting method"""

    # Define the parameter for the under-sampling
    ratio = 'auto'

    # Create the object
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED,
                   version=VERSION_NEARMISS)
    # Fit the data
    nm1.fit(X, Y)

    # Check if the data information have been computed
    assert_equal(nm1.min_c_, 0)
    assert_equal(nm1.maj_c_, 1)
    assert_equal(nm1.stats_c_[0], 500)
    assert_equal(nm1.stats_c_[1], 4500)
Exemple #5
0
def test_nm_fit_invalid_ratio():
    """Test either if an error is raised when the balancing ratio to fit is
    smaller than the one of the data"""

    # Create the object
    ratio = 1. / 10000.
    nm = NearMiss(ratio=ratio, random_state=RND_SEED)
    # Fit the data
    assert_raises(RuntimeError, nm.fit, X, Y)
Exemple #6
0
def test_nm1_fit_transform_half():
    """Test fit and transform routines with .5 ratio"""

    # Define the parameter for the under-sampling
    ratio = .5

    # Create the object
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED,
                   version=VERSION_NEARMISS)

    # Fit and transform
    X_resampled, y_resampled = nm1.fit_transform(X, Y)

    currdir = os.path.dirname(os.path.abspath(__file__))
    X_gt = np.load(os.path.join(currdir, 'data', 'nm1_x_05.npy'))
    y_gt = np.load(os.path.join(currdir, 'data', 'nm1_y_05.npy'))
    assert_array_equal(X_resampled, X_gt)
    assert_array_equal(y_resampled, y_gt)
Exemple #7
0
def test_nm1_fit_transform_auto_indices():
    """Test fit and transform routines with auto ratio and indices support"""

    # Define the parameter for the under-sampling
    ratio = 'auto'

    # Create the object
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED,
                   version=VERSION_NEARMISS, return_indices=True)

    # Fit and transform
    X_resampled, y_resampled, idx_under = nm1.fit_transform(X, Y)

    currdir = os.path.dirname(os.path.abspath(__file__))
    X_gt = np.load(os.path.join(currdir, 'data', 'nm1_x.npy'))
    y_gt = np.load(os.path.join(currdir, 'data', 'nm1_y.npy'))
    idx_gt = np.load(os.path.join(currdir, 'data', 'nm1_idx.npy'))
    assert_array_equal(X_resampled, X_gt)
    assert_array_equal(y_resampled, y_gt)
    assert_array_equal(idx_under, idx_gt)
Exemple #8
0
def test_nm1_transform_wt_fit():
    """Test either if an error is raised when transform is called before
    fitting"""

    # Define the parameter for the under-sampling
    ratio = 'auto'

    # Create the object
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED,
                   version=VERSION_NEARMISS)
    assert_raises(RuntimeError, nm1.transform, X, Y)
Exemple #9
0
def test_nearmiss_fit_single_class():
    """Test either if an error when there is a single class"""

    # Define the parameter for the under-sampling
    ratio = 'auto'

    # Create the object
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED,
                   version=VERSION_NEARMISS)
    # Resample the data
    # Create a wrong y
    y_single_class = np.zeros((X.shape[0], ))
    assert_raises(RuntimeError, nm1.fit, X, y_single_class)
Exemple #10
0
def test_rest(x, y,c=0,ratio='auto'):
    c=c
    if(c==0):
        print('Random under-sampling')
        US = UnderSampler(indices_support=indices_support, verbose=verbose,ratio=ratio)
        x, y, idx_tmp = US.fit_transform(x, y)
        print ('Indices selected')
        print(idx_tmp)
    elif(c==1):
        print('Tomek links')
        TL = TomekLinks(verbose=verbose,ratio=ratio)
        x, y = TL.fit_transform(x, y)
    elif(c==2):
        print('Clustering centroids')
        CC = ClusterCentroids(verbose=verbose,ratio=ratio)
        x, y = CC.fit_transform(x, y)
    elif(c==3):
        print('NearMiss-1')
        NM1 = NearMiss(version=1, indices_support=indices_support, verbose=verbose,ratio=ratio)
        x, y, idx_tmp = NM1.fit_transform(x, y)
        print ('Indices selected')
        print(idx_tmp)
    elif(c==4):
        print('NearMiss-2')
        NM2 = NearMiss(version=2, indices_support=indices_support, verbose=verbose,ratio=ratio)
        x, y, idx_tmp = NM2.fit_transform(x, y)
        print ('Indices selected')
        print(idx_tmp)
    elif(c==5):
        print('NearMiss-3')
        NM3 = NearMiss(version=3, indices_support=indices_support, verbose=verbose,ratio=ratio)
        x, y, idx_tmp = NM3.fit_transform(x, y)
        print ('Indices selected')
        print(idx_tmp)
    elif(c==6):
        print('Neighboorhood Cleaning Rule')
        NCR = NeighbourhoodCleaningRule(indices_support=indices_support, verbose=verbose,ratio=ratio)
        x, y, idx_tmp = NCR.fit_transform(x, y)
        print ('Indices selected')
        print(idx_tmp)
    elif(c==7):
        print('Random over-sampling')
        OS = OverSampler(verbose=verbose,ratio=ratio)
        x, y = OS.fit_transform(x, y)
    elif(c==8):
        print('SMOTE Tomek links')
        STK = SMOTETomek(verbose=verbose,ratio=ratio)
        x, y = STK.fit_transform(x, y)
    elif(c==9):
        print('SMOTE ENN')
        SENN = SMOTEENN(verbose=verbose,ratio=ratio)
        x, y = SENN.fit_transform(x, y)
    else:
        print('EasyEnsemble')
        EE = EasyEnsemble(verbose=verbose,ratio=ratio)
        x, y = EE.fit_transform(x, y)
    return x, y
Exemple #11
0
def test_nearmiss_init():
    """Test the initialisation of the object"""

    # Define a ratio
    ratio = 1.
    verbose = True
    nm1 = NearMiss(ratio=ratio, random_state=RND_SEED, verbose=verbose,
                   version=VERSION_NEARMISS)

    assert_equal(nm1.version, VERSION_NEARMISS)
    assert_equal(nm1.size_ngh, 3)
    assert_equal(nm1.ratio_, ratio)
    assert_equal(nm1.rs_, RND_SEED)
    assert_equal(nm1.verbose, verbose)
    assert_equal(nm1.min_c_, None)
    assert_equal(nm1.maj_c_, None)
    assert_equal(nm1.stats_c_, {})
Exemple #12
0
def __get_sample_transformed_examples(sample_type, train_x, train_y, ratio):
    sampler = None
    verbose = True
    if sample_type == SMOTE_REG:
        sampler = SMOTE(kind='regular', verbose=verbose, ratio=ratio, k=15)
    elif sample_type == SMOTE_SVM:
        # TODO: Make this configurable?
        svm_args = {'class_weight': 'balanced'}
        sampler = SMOTE(kind='svm',
                        ratio=ratio,
                        verbose=verbose,
                        k=15,
                        **svm_args)
    elif sample_type == SMOTE_BORDERLINE_1:
        sampler = SMOTE(kind='borderline1', ratio=ratio, verbose=verbose)
    elif sample_type == SMOTE_BORDERLINE_2:
        sampler = SMOTE(kind='borderline2', ratio=ratio, verbose=verbose)
    elif sample_type == SMOTE_ENN:
        sampler = SMOTEENN(ratio=ratio, verbose=verbose, k=15)
    elif sample_type == SMOTE_TOMEK:
        sampler = SMOTETomek(ratio=ratio, verbose=verbose, k=15)
    elif sample_type == UNDERSAMPLER:
        sampler = UnderSampler(ratio=ratio,
                               verbose=verbose,
                               replacement=False,
                               random_state=17)
    elif sample_type == ADASYN_SAMPLER:
        sampler = ADASYN(k=15, imb_threshold=0.6, ratio=ratio)
    elif sample_type == TOMEK_LINKS:
        sampler = TomekLinks()
    elif sample_type == CLUSTER_CENTROIDS:
        sampler = ClusterCentroids(ratio=ratio)
    elif sample_type == NEARMISS:
        sampler = NearMiss(ratio=ratio)
    else:
        print "Unrecoqnized sample technique: " + sample_type
        print "Returning original data"
        return train_x, train_y
    return sampler.fit_transform(train_x, train_y)
from unbalanced_dataset.under_sampling import NearMiss

# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
                           n_informative=3, n_redundant=1, flip_y=0,
                           n_features=20, n_clusters_per_class=1,
                           n_samples=5000, random_state=10)

# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)

# Apply the random under-sampling
nm3 = NearMiss(version=1)
X_resampled, y_resampled = nm3.fit_transform(X, y)
X_res_vis = pca.transform(X_resampled)

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)

ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
            edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
            edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')

ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
            label="Class #0", alpha=.5, edgecolor=almost_black,
            facecolor=palette[0], linewidth=0.15)
                           weights=[0.1, 0.9],
                           n_informative=3,
                           n_redundant=1,
                           flip_y=0,
                           n_features=20,
                           n_clusters_per_class=1,
                           n_samples=5000,
                           random_state=10)

# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)

# Apply the random under-sampling
nm3 = NearMiss(version=1)
X_resampled, y_resampled = nm3.fit_transform(X, y)
X_res_vis = pca.transform(X_resampled)

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)

ax1.scatter(X_vis[y == 0, 0],
            X_vis[y == 0, 1],
            label="Class #0",
            alpha=0.5,
            edgecolor=almost_black,
            facecolor=palette[0],
            linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0],
            X_vis[y == 1, 1],
from unbalanced_dataset.under_sampling import NearMiss

# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
                           n_informative=3, n_redundant=1, flip_y=0,
                           n_features=20, n_clusters_per_class=1,
                           n_samples=5000, random_state=10)

# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)

# Apply the random under-sampling
nm2 = NearMiss(version=2)
X_resampled, y_resampled = nm2.fit_transform(X, y)
X_res_vis = pca.transform(X_resampled)

# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)

ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
            edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
            edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')

ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
            label="Class #0", alpha=.5, edgecolor=almost_black,
            facecolor=palette[0], linewidth=0.15)