Exemplo n.º 1
0
    def __init__(self, img, mask, bounding_box=None, gmm_components=5):
        self.img = np.asarray(img, dtype=np.float64)
        self.rows, self.cols = img.shape[0],img.shape[1]

        self.mask = mask
        if bounding_box is not None:
            self.mask[bounding_box[1]:bounding_box[1] + bounding_box[3], bounding_box[0]:bounding_box[0] + bounding_box[2]] = DRAW_PR_FG['val']
        self.classify_pixels()

        # Best number of GMM components K suggested in paper
        self.gmm_components = gmm_components
        self.gamma = 50  # Best gamma suggested in paper formula (5)
        self.beta = 0

        self.dis_W = np.empty((self.rows, self.cols - 1))
        self.dis_NW = np.empty((self.rows - 1, self.cols - 1))
        self.dis_N = np.empty((self.rows - 1, self.cols))
        self.dis_NE = np.empty((self.rows - 1, self.cols - 1))

        self.bgd_gmm = None
        self.fgd_gmm = None
        self.comp_idxs = np.empty((self.rows, self.cols), dtype=np.uint32)

        self.gc_graph = None
        self.gc_graph_capacity = None           # Edge capacities
        self.gc_source = self.cols * self.rows  # "object" terminal S
        self.gc_sink = self.gc_source + 1       # "background" terminal T
        
        #calculate ||Zm-Zn||^2 (four directions enough)
        left_diffr = self.img[:, 1:] - self.img[:, :-1]
        upleft_diffr = self.img[1:, 1:] - self.img[:-1, :-1]
        up_diffr = self.img[1:, :] - self.img[:-1, :]
        upright_diffr = self.img[1:, :-1] - self.img[:-1, 1:]

        #calculate Beta
        self.beta = np.sum(np.square(left_diffr)) + np.sum(np.square(upleft_diffr)) + np.sum(np.square(up_diffr)) + np.sum(np.square(upright_diffr))
        self.beta = 1 / (2 * self.beta / (4 * self.cols * self.rows - 3 * self.cols - 3 * self.rows + 2)) 

        # Smoothness term V described in formula (11)
        # define V edges
        self.dis_W = self.gamma * np.exp(-self.beta * np.sum(np.square(left_diffr), axis=2))
        self.dis_NW = self.gamma / np.sqrt(2) * np.exp(-self.beta * np.sum(np.square(upleft_diffr), axis=2))
        self.dis_N = self.gamma * np.exp(-self.beta * np.sum(np.square(up_diffr), axis=2))
        self.dis_NE = self.gamma / np.sqrt(2) * np.exp(-self.beta * np.sum(np.square(upright_diffr), axis=2))
            
        # Apply GaussianMixture for both foreground and background   
        self.bgd_gmm = GaussianMixture(self.img[self.bgd_indexes])
        self.fgd_gmm = GaussianMixture(self.img[self.fgd_indexes])
Exemplo n.º 2
0
 def init_GMMs(self):
     self.bgd_gmm = GaussianMixture(self.img[self.bgd_indexes])
     self.fgd_gmm = GaussianMixture(self.img[self.fgd_indexes])
Exemplo n.º 3
0
from GMM import GaussianMixture
import numpy as np
import matplotlib.pyplot as plt

gmm = GaussianMixture(2, n_components=3, n_init=10)
X = np.load("samples.npz")["data"]

loss = gmm.fit(X)
print(loss)
gamma = gmm.predict_proba(X)
labels = np.argmax(gamma, axis=1)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=30)
plt.axis('equal')
plt.show()


Exemplo n.º 4
0
def train_GaussianMixtures(GMM_model_file, color_space='hsv'):
    [train_set, target_set, ids_set] = load_train_data(color_space)
    print np.shape(train_set)
    n_classes = len(np.unique(target_set))
    print '-- Number of color classes:', n_classes
    print '---------------- Training GMM models.... ----------------'

    # for c in np.unique(target_set):
    # print 'class ', c
    # print len(target_set[target_set == c])

    data_set = DATA(train_set, target_set, ids_set)

    # print np.shape(data_set.data)
    # print np.shape(data_set.target)
    # print np.shape(data_set.target)
    # print sum(data_set.target)

    # X_train = data_set.data
    # y_train = data_set.target

    # Break up the dataset into non-overlapping training (75%) and testing
    # (25%) sets.
    skf = StratifiedKFold(data_set.target, n_folds=25)
    # Only take the first fold.
    train_index, test_index = next(iter(skf))
    X_train = data_set.data[train_index]
    y_train = data_set.target[train_index]
    X_test = data_set.data[test_index]
    y_test = data_set.target[test_index]

    n_classes = len(np.unique(y_train))

    # Extract X_train, X_test... for each class
    c1_X_train = X_train[y_train == 1]
    c1_X_test = X_test[y_test == 1]
    c1_y_train = y_train[y_train == 1]
    c1_y_test = y_test[y_test == 1]

    c2_X_train = X_train[y_train == 2]
    c2_X_test = X_test[y_test == 2]
    c2_y_train = y_train[y_train == 2]
    c2_y_test = y_test[y_test == 2]

    c3_X_train = X_train[y_train == 3]
    c3_X_test = X_test[y_test == 3]
    c3_y_train = y_train[y_train == 3]
    c3_y_test = y_test[y_test == 3]

    c4_X_train = X_train[y_train == 4]
    c4_X_test = X_test[y_test == 4]
    c4_y_train = y_train[y_train == 4]
    c4_y_test = y_test[y_test == 4]

    c5_X_train = X_train[y_train == 5]
    c5_X_test = X_test[y_test == 5]
    c5_y_train = y_train[y_train == 5]
    c5_y_test = y_test[y_test == 5]

    c6_X_train = X_train[y_train == 6]
    c6_X_test = X_test[y_test == 6]
    c6_y_train = y_train[y_train == 6]
    c6_y_test = y_test[y_test == 6]

    c7_X_train = X_train[y_train == 7]
    c7_X_test = X_test[y_test == 7]
    c7_y_train = y_train[y_train == 7]
    c7_y_test = y_test[y_test == 7]

    # print  'len c2 train:',len(c2_X_train)
    # print 'len c2 test:', len(c2_y_test)

    # Number of components for each class
    c1_comps = 2
    c2_comps = 2
    c3_comps = 2
    c4_comps = 2
    c5_comps = 2
    c6_comps = 3
    c7_comps = 2

    c1_classifier = GaussianMixture(n_components=c1_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c1_kmeans = KMeans(n_clusters=c1_comps, random_state=0).fit(c1_X_train)

    c2_classifier = GaussianMixture(n_components=c2_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c2_kmeans = KMeans(n_clusters=c2_comps, random_state=0).fit(c2_X_train)

    c3_classifier = GaussianMixture(n_components=c3_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c3_kmeans = KMeans(n_clusters=c3_comps, random_state=0).fit(c3_X_train)

    c4_classifier = GaussianMixture(n_components=c4_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c4_kmeans = KMeans(n_clusters=c4_comps, random_state=0).fit(c4_X_train)

    c5_classifier = GaussianMixture(n_components=c5_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c5_kmeans = KMeans(n_clusters=c5_comps, random_state=0).fit(c5_X_train)

    c6_classifier = GaussianMixture(n_components=c6_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c6_kmeans = KMeans(n_clusters=c6_comps, random_state=0).fit(c6_X_train)

    c7_classifier = GaussianMixture(n_components=c7_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c7_kmeans = KMeans(n_clusters=c7_comps, random_state=0).fit(c7_X_train)

    # Since we have class labels for the training data, we can
    # initialize the GaussianMixture parameters in a supervised manner.

    c1_classifier.means_ = np.array([
        c1_X_train[c1_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c1_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c1_classifier.fit(c1_X_train)

    # Display the GaussianMixture of c1
    # ax = fig.add_subplot(231, projection='3d')
    # ax.scatter(c1_X_train[c1_y_train_pred==0][:, 0], c1_X_train[c1_y_train_pred==0][:, 1], c1_X_train[c1_y_train_pred==0][:, 2], color='r',label='red')
    # ax.scatter(c1_X_train[c1_y_train_pred==1][:, 0], c1_X_train[c1_y_train_pred==1][:, 1], c1_X_train[c1_y_train_pred==1][:, 2], color='b',label='red')
    # plt.show()

    c2_classifier.means_ = np.array([
        c2_X_train[c2_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c2_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c2_classifier.fit(c2_X_train)

    c3_classifier.means_ = np.array([
        c3_X_train[c3_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c3_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c3_classifier.fit(c3_X_train)

    c4_classifier.means_ = np.array([
        c4_X_train[c4_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c4_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c4_classifier.fit(c4_X_train)

    c5_classifier.means_ = np.array([
        c5_X_train[c5_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c5_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c5_classifier.fit(c5_X_train)

    c6_classifier.means_ = np.array([
        c6_X_train[c6_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c6_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c6_classifier.fit(c6_X_train)

    c7_classifier.means_ = np.array([
        c7_X_train[c7_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c7_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c7_classifier.fit(c7_X_train)

    # If test_mode = 'on', check accuracy of test set
    # if test_mode == 'on':
    # 	c1_X_train = c1_X_test
    # 	c1_y_train = c1_y_test

    c1_X_train_score_on_c1 = np.exp(c1_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c2 = np.exp(c2_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c3 = np.exp(c3_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c4 = np.exp(c4_classifier.score_samples(c1_X_train))
    # ignore yellow color as it leads to large error in choosing a red color as yellow.
    c1_X_train_score_on_c5 = np.exp(c5_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c6 = np.exp(c6_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c7 = np.exp(c7_classifier.score_samples(c1_X_train))

    # print np.shape(np.array(c1_X_train_score_on_c1[0]))
    # Min of likelihood of c1_score_on_c1 is 8.69e-12
    print c1_X_train_score_on_c1.min(), c1_X_train_score_on_c1.max()
    print c1_X_train_score_on_c2.min(), c1_X_train_score_on_c2.max()
    print c1_X_train_score_on_c3.min(), c1_X_train_score_on_c3.max()
    print c1_X_train_score_on_c4.min(), c1_X_train_score_on_c4.max()
    print c1_X_train_score_on_c4.min(), c1_X_train_score_on_c5.max()
    print c1_X_train_score_on_c6.min(), c1_X_train_score_on_c6.max()
    print c1_X_train_score_on_c7.min(), c1_X_train_score_on_c7.max()
    # Add a row of 0,0,0.... to make sure that the returned index matches classes [1, 2, ... ,6]
    c1_zeros_row = np.zeros(np.shape(c1_X_train_score_on_c1))

    # c1_y_train_score_matrix = np.array([c1_zeros_row,c1_X_train_score_on_c1,c1_X_train_score_on_c2,c1_X_train_score_on_c3,c1_X_train_score_on_c4,c1_X_train_score_on_c5,c1_X_train_score_on_c6])
    c1_y_train_score_matrix = np.array([
        c1_zeros_row, c1_X_train_score_on_c1, c1_X_train_score_on_c2,
        c1_X_train_score_on_c3, c1_X_train_score_on_c4, c1_X_train_score_on_c5,
        c1_X_train_score_on_c6, c1_X_train_score_on_c7
    ])
    c1_y_train_pred = np.argmax(c1_y_train_score_matrix, axis=0)
    # print c1_y_train_pred, type(c1_y_train_pred), np.shape(c1_y_train_pred)
    # print np.unique(c1_y_train_pred)

    # Train accuracy = 94.81%
    # Test accuracy = 91.%

    c1_train_accuracy = np.mean(c1_y_train_pred == c1_y_train) * 100
    print 'Training Accuracy: ', c1_train_accuracy
    print np.mean(c1_y_train_pred == 1), np.mean(
        c1_y_train_pred == 2), np.mean(c1_y_train_pred == 3), np.mean(
            c1_y_train_pred == 4), np.mean(c1_y_train_pred == 5), np.mean(
                c1_y_train_pred == 6), np.mean(c1_y_train_pred == 7)

    GMM_models = [
        c1_classifier, c2_classifier, c3_classifier, c4_classifier,
        c5_classifier, c6_classifier, c7_classifier
    ]

    # Save the trained GMM_models
    try:
        with open(GMM_model_file, 'wb') as f:
            pickle.dump(GMM_models, f, pickle.HIGHEST_PROTOCOL)
    except Exception as e:
        print 'Could not save GMM models to the GMM_model file ', GMM_model_file, e
        raise
        return GMM_models
    print '--------------- Completed Training GMM models! ----------------'
    return [
        c1_classifier, c2_classifier, c3_classifier, c4_classifier,
        c5_classifier, c6_classifier, c7_classifier
    ]
Exemplo n.º 5
0
def tune_GaussianMixtures(color_space='hsv', test_mode='off'):
    iris = datasets.load_iris()
    [train_set, target_set, ids_set] = load_train_data()
    print np.shape(train_set)
    n_classes = len(np.unique(target_set))
    print 'number of classes:', n_classes

    for c in np.unique(target_set):
        print 'class ', c
        print len(target_set[target_set == c])

    # exit()
    # # get red color only
    # red_train = train_set[target_set == 1] #[0:1000]
    # red_target = target_set[target_set == 1]#[0:1000]
    # red_ids = ids_set[target_set == 1] # [0:1000]
    # data_set = DATA(red_train,red_target,red_ids)

    data_set = DATA(train_set, target_set, ids_set)

    print np.shape(data_set.data)
    print np.shape(data_set.target)
    print np.shape(data_set.target)
    print sum(data_set.target)

    # exit()
    # Break up the dataset into non-overlapping training (75%) and testing
    # (25%) sets.
    skf = StratifiedKFold(data_set.target, n_folds=25)
    # Only take the first fold.
    train_index, test_index = next(iter(skf))

    X_train = data_set.data[train_index]
    y_train = data_set.target[train_index]
    X_test = data_set.data[test_index]
    y_test = data_set.target[test_index]

    n_classes = len(np.unique(y_train))

    # Extract X_train, X_test... for each class
    c1_X_train = X_train[y_train == 1]
    c1_X_test = X_test[y_test == 1]
    c1_y_train = y_train[y_train == 1]
    c1_y_test = y_test[y_test == 1]

    c2_X_train = X_train[y_train == 2]
    c2_X_test = X_test[y_test == 2]
    c2_y_train = y_train[y_train == 2]
    c2_y_test = y_test[y_test == 2]

    c3_X_train = X_train[y_train == 3]
    c3_X_test = X_test[y_test == 3]
    c3_y_train = y_train[y_train == 3]
    c3_y_test = y_test[y_test == 3]

    c4_X_train = X_train[y_train == 4]
    c4_X_test = X_test[y_test == 4]
    c4_y_train = y_train[y_train == 4]
    c4_y_test = y_test[y_test == 4]

    c5_X_train = X_train[y_train == 5]
    c5_X_test = X_test[y_test == 5]
    c5_y_train = y_train[y_train == 5]
    c5_y_test = y_test[y_test == 5]

    c6_X_train = X_train[y_train == 6]
    c6_X_test = X_test[y_test == 6]
    c6_y_train = y_train[y_train == 6]
    c6_y_test = y_test[y_test == 6]

    c7_X_train = X_train[y_train == 7]
    c7_X_test = X_test[y_test == 7]
    c7_y_train = y_train[y_train == 7]
    c7_y_test = y_test[y_test == 7]

    print 'len c2 train:', len(c2_X_train)
    print 'len c2 test:', len(c2_y_test)

    # Number of components for each class
    c1_comps = 2
    c2_comps = 2
    c3_comps = 2
    c4_comps = 2
    c5_comps = 2
    c6_comps = 3
    c7_comps = 2

    # c2_classifiers = dict((covar_type, GaussianMixture(n_components=c2_comps,
    #                     covariance_type=covar_type, init_params='kmeans', max_iter=20))
    #                    for covar_type in ['spherical', 'diag', 'tied','full'])
    # apply Kmeans to find the means of components of each class
    c1_classifier = GaussianMixture(n_components=c1_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c1_kmeans = KMeans(n_clusters=c1_comps, random_state=0).fit(c1_X_train)

    c2_classifier = GaussianMixture(n_components=c2_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c2_kmeans = KMeans(n_clusters=c2_comps, random_state=0).fit(c2_X_train)

    c3_classifier = GaussianMixture(n_components=c3_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c3_kmeans = KMeans(n_clusters=c3_comps, random_state=0).fit(c3_X_train)

    c4_classifier = GaussianMixture(n_components=c4_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c4_kmeans = KMeans(n_clusters=c4_comps, random_state=0).fit(c4_X_train)

    c5_classifier = GaussianMixture(n_components=c5_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c5_kmeans = KMeans(n_clusters=c5_comps, random_state=0).fit(c5_X_train)

    c6_classifier = GaussianMixture(n_components=c6_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c6_kmeans = KMeans(n_clusters=c6_comps, random_state=0).fit(c6_X_train)

    c7_classifier = GaussianMixture(n_components=c7_comps,
                                    covariance_type='diag',
                                    max_iter=20)
    c7_kmeans = KMeans(n_clusters=c7_comps, random_state=0).fit(c7_X_train)

    # Since we have class labels for the training data, we can
    # initialize the GaussianMixture parameters in a supervised manner.

    c1_classifier.means_ = np.array([
        c1_X_train[c1_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c1_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c1_classifier.fit(c1_X_train)

    # Display the GaussianMixture of c1
    # ax = fig.add_subplot(231, projection='3d')
    # ax.scatter(c1_X_train[c1_y_train_pred==0][:, 0], c1_X_train[c1_y_train_pred==0][:, 1], c1_X_train[c1_y_train_pred==0][:, 2], color='r',label='red')
    # ax.scatter(c1_X_train[c1_y_train_pred==1][:, 0], c1_X_train[c1_y_train_pred==1][:, 1], c1_X_train[c1_y_train_pred==1][:, 2], color='b',label='red')
    # plt.show()

    c2_classifier.means_ = np.array([
        c2_X_train[c2_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c2_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c2_classifier.fit(c2_X_train)

    c3_classifier.means_ = np.array([
        c3_X_train[c3_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c3_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c3_classifier.fit(c3_X_train)

    c4_classifier.means_ = np.array([
        c4_X_train[c4_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c4_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c4_classifier.fit(c4_X_train)

    c5_classifier.means_ = np.array([
        c5_X_train[c5_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c5_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c5_classifier.fit(c5_X_train)

    c6_classifier.means_ = np.array([
        c6_X_train[c6_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c6_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c6_classifier.fit(c6_X_train)

    c7_classifier.means_ = np.array([
        c7_X_train[c7_kmeans.labels_ == i].mean(axis=0)
        for i in xrange(c7_comps)
    ])
    # Train the other parameters using the EM algorithm.
    c7_classifier.fit(c7_X_train)

    # If test_mode = 'on', check accuracy of test set
    if test_mode == 'on':
        c1_X_train = c1_X_test
        c1_y_train = c1_y_test

    c1_X_train_score_on_c1 = np.exp(c1_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c2 = np.exp(c2_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c3 = np.exp(c3_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c4 = np.exp(c4_classifier.score_samples(c1_X_train))
    # ignore yellow color as it leads to large error in choosing a red color as yellow.
    c1_X_train_score_on_c5 = np.exp(c5_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c6 = np.exp(c6_classifier.score_samples(c1_X_train))
    c1_X_train_score_on_c7 = np.exp(c7_classifier.score_samples(c1_X_train))

    # print np.shape(np.array(c1_X_train_score_on_c1[0]))
    # Min of likelihood of c1_score_on_c1 is 8.69e-12
    print c1_X_train_score_on_c1.min(), c1_X_train_score_on_c1.max()
    print c1_X_train_score_on_c2.min(), c1_X_train_score_on_c2.max()
    print c1_X_train_score_on_c3.min(), c1_X_train_score_on_c3.max()
    print c1_X_train_score_on_c4.min(), c1_X_train_score_on_c4.max()
    print c1_X_train_score_on_c4.min(), c1_X_train_score_on_c5.max()
    print c1_X_train_score_on_c6.min(), c1_X_train_score_on_c6.max()
    print c1_X_train_score_on_c7.min(), c1_X_train_score_on_c7.max()
    # Add a row of 0,0,0.... to make sure that the returned index matches classes [1, 2, ... ,6]
    c1_zeros_row = np.zeros(np.shape(c1_X_train_score_on_c1))

    # c1_y_train_score_matrix = np.array([c1_zeros_row,c1_X_train_score_on_c1,c1_X_train_score_on_c2,c1_X_train_score_on_c3,c1_X_train_score_on_c4,c1_X_train_score_on_c5,c1_X_train_score_on_c6])
    c1_y_train_score_matrix = np.array([
        c1_zeros_row, c1_X_train_score_on_c1, c1_X_train_score_on_c2,
        c1_X_train_score_on_c3, c1_X_train_score_on_c4, c1_X_train_score_on_c5,
        c1_X_train_score_on_c6, c1_X_train_score_on_c7
    ])
    c1_y_train_pred = np.argmax(c1_y_train_score_matrix, axis=0)
    # print c1_y_train_pred, type(c1_y_train_pred), np.shape(c1_y_train_pred)
    # print np.unique(c1_y_train_pred)

    # Train accuracy = 94.81%
    # Test accuracy = 91.%

    c1_train_accuracy = np.mean(c1_y_train_pred == c1_y_train) * 100
    print 'Training Accuracy: ', c1_train_accuracy
    print np.mean(c1_y_train_pred == 1), np.mean(
        c1_y_train_pred == 2), np.mean(c1_y_train_pred == 3), np.mean(
            c1_y_train_pred == 4), np.mean(c1_y_train_pred == 5), np.mean(
                c1_y_train_pred == 6), np.mean(c1_y_train_pred == 7)

    return [
        c1_classifier, c2_classifier, c3_classifier, c4_classifier,
        c5_classifier, c6_classifier, c7_classifier
    ]