Esempio n. 1
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(columns=(7, 8, 11, 12, 13,
                                                         14))),
            ('scale',
             preprocessing.StandardScaler(with_mean=True, with_std=True)),
            ('expand',
             preprocessing.PolynomialFeatures(degree=1,
                                              interaction_only=False,
                                              include_bias=False)),
            ('reduce', decomposition.FastICA(
                fun='cube',
                random_state=1742,
            )),
            ('select',
             feature_selection.SelectKBest(
                 k=7,
                 score_func=feature_selection.mutual_info_classif,
             )),
            ('estim', naive_bayes.GaussianNB()),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            #('kselect', feature_selection.SelectKBest(feature_selection.f_regression, k=115)),
            ('drop', transformers.ColumnDropper(columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124))),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=True
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=85,#59,
                score_func=feature_selection.mutual_info_classif
            )),
            ('estim', svm.NuSVC(
                nu=0.0525,
                kernel='rbf',
                gamma='auto',
                shrinking=True,
                class_weight=None,
                random_state=1742
            )),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=False
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=54,
                score_func=feature_selection.mutual_info_classif
            )),
            ('estim', semi_supervised.LabelPropagation(
                kernel='rbf',
                alpha=0.65,
                n_neighbors=4,
                n_jobs=-1
            )),
        ])

        pipe.fit(x, y)
        self._transduction = pipe.named_steps['estim'].transduction_
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=True
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=59,#59,
                score_func=feature_selection.mutual_info_classif
            )),
            ('select', feature_selection.SelectKBest(
                k=101,
                score_func=feature_selection.f_classif
            )),
            ('estim', manifold.locally_linear_embedding(
                x,
                n_neighbors=6,
                n_components=101,
                eigen_solver='auto',
                method='standard'

            )),
        ])

        pipe.fit_transform(x)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=False
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=73,
                score_func=feature_selection.f_classif
            )),
            ('estim', neighbors.KNeighborsClassifier(
                n_neighbors=16,
                weights='distance',
                metric='euclidean',
                n_jobs=-1
            ))
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 6
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop',
             transformers.ColumnDropper(columns=(6, 7, 8, 11, 12, 13, 14))),
            (
                'scale',
                preprocessing.StandardScaler(
                    with_mean=True,
                    with_std=True  # this is not a typo!
                )),
            ('expand',
             preprocessing.PolynomialFeatures(degree=2,
                                              interaction_only=False,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectKBest(
                 k=25, score_func=feature_selection.mutual_info_classif)),
            ('estim', tree.DecisionTreeClassifier())
            #svm.SVC, svm.NuSVC, svm.LinearSVC
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=False
            )),
            ('reduce', decomposition.FastICA(
                n_components=40,
                fun='exp',
                random_state=1742,
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=57,
                score_func=feature_selection.mutual_info_classif,
            )),
            ('estim', naive_bayes.GaussianNB()),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 8
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop',
             transformers.ColumnDropper(columns=(6, 7, 8, 11, 12, 13, 14))),
            (
                'scale',
                preprocessing.StandardScaler(
                    with_mean=True,
                    with_std=False  # this is not a typo!
                )),
            #('scale', preprocessing.RobustScaler(
            #    with_centering=True, with_scaling=False, quantile_range=(1.0, 99.0)
            #)),
            ('expand',
             preprocessing.PolynomialFeatures(degree=2,
                                              interaction_only=False,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectPercentile(
                 percentile=98, score_func=feature_selection.f_classif)),
            ('estim',
             discriminant_analysis.QuadraticDiscriminantAnalysis(
                 reg_param=0.0043))
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 9
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            # x14 == x10
            # x8 == x3
            # x9 == x6^2 - C
            ('drop', transformers.ColumnDropper(columns=(7, 8, 11, 12, 13, 14))
             ),
            ('scale',
             preprocessing.StandardScaler(with_mean=True, with_std=True)),
            ('expand',
             preprocessing.PolynomialFeatures(degree=2,
                                              interaction_only=True,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectKBest(
                 k=26, score_func=feature_selection.mutual_info_classif)),
            ('estim',
             neighbors.NearestCentroid(metric='euclidean',
                                       shrink_threshold=None)),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 10
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop',
             transformers.ColumnDropper(columns=(6, 7, 8, 11, 12, 13, 14))),
            #('select', feature_selection.SelectKBest()),
            ('scale', preprocessing.StandardScaler()),
            ('expand', preprocessing.PolynomialFeatures()),
            ('estim', discriminant_analysis.QuadraticDiscriminantAnalysis()),
        ])

        param_grid = [{
            #'select__k': [i for i in range(15, 21)],
            #'select__score_func': [feature_selection.f_classif],
            'scale__with_mean': [True, False],
            'scale__with_std': [True],
            'expand__include_bias': [False, True],
            'expand__interaction_only': [False, True],
            'expand__degree': [1, 2]

            #'estim__reg_param': [0.5]
            #'estim__alpha': list(0.001 + 1 * i for i in range(0, 5))
        }]

        grid = model_selection.GridSearchCV(
            pipe,
            cv=10,
            n_jobs=1,
            param_grid=param_grid,
            verbose=1,
            scoring=metrics.make_scorer(metrics.accuracy_score),
        )
        grid.fit(x, y)

        print('Optimal Hyperparametres:')
        print('=======================')
        for step in grid.best_estimator_.steps:
            print(step)
        print("CV Score:", grid.best_score_)

        self._model = grid.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler()),
            ('select', feature_selection.SelectPercentile()),
            ('estim', discriminant_analysis.QuadraticDiscriminantAnalysis()),
        ])

        param_grid = [{
            'scale__with_mean': [True],
            'scale__with_std': [True],

            #'select__percentile': [i for i in range(40, 81, 3)],
            'select__percentile': [i for i in range(40, 51)],
            'select__score_func': [
                feature_selection.f_classif,
                feature_selection.mutual_info_classif
            ],

            'estim__reg_param': [0.1 + 0.025 * i for i in range(-1, 2)]
        }]

        grid = model_selection.GridSearchCV(
            pipe, cv=9, n_jobs=16, param_grid=param_grid, verbose=1,
            scoring=metrics.make_scorer(metrics.accuracy_score),
        )
        grid.fit(x, y)

        print('Optimal Hyperparametres:')
        print('=======================')
        for step in grid.best_estimator_.steps:
            print(step)
        print("CV Score:", grid.best_score_)

        estimator = pipe.named_steps['estim']
        if hasattr(estimator, 'transduction_'):
            self._transduction = estimator.transduction_
        self._model = grid.predict
Esempio n. 12
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(columns=(7, 8, 11, 12, 13,
                                                         14))),
            ('scale',
             preprocessing.StandardScaler(with_mean=True, with_std=True)),
            ('estim',
             svm.NuSVC(nu=0.19,
                       kernel='rbf',
                       gamma='auto',
                       shrinking=True,
                       class_weight='balanced',
                       random_state=1742)),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        x, x_val, y, y_val = model_selection.train_test_split(
            x, y,
            train_size=0.90,
            stratify=y,
            random_state=2345
        )

        y = ku.to_categorical(y, num_classes=10)
        y_val = ku.to_categorical(y_val, num_classes=10)

        model = Sequential()
        model.add(Dense(700, input_dim=128, init="he_uniform", activation="relu"))
        #model.add(Dropout(0.2))
        model.add(Dense(350, init="he_uniform", activation="relu"))
        model.add(Dense(10))
        model.add(Activation("softmax"))

        sgd = SGD(lr=0.1, momentum=0.9, decay=0.0, nesterov=False)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
        history = History()

        pipe = pipeline.Pipeline([
            #('kselect', feature_selection.SelectKBest(feature_selection.f_regression, k=115)),
            ('drop', transformers.ColumnDropper(columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124))),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=True
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=59,#59,
                score_func=feature_selection.mutual_info_classif
            )),
            ('estim', model),
        ])

        pipe.fit(x, y, epochs=50, batch_size=128, callbacks=[history], verbose=2, validation_data=(x_val, y_val), shuffle=2)
        self._model = pipe.predict_classes
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=True
            )),
            ('select', feature_selection.SelectPercentile(
                percentile=80,
                score_func=feature_selection.f_classif
            )),
            ('estim', discriminant_analysis.LinearDiscriminantAnalysis()),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(
                columns=(0, 3, 5, 14, 26, 35, 40, 65, 72, 95, 99, 104, 124)
            )),
            ('scale', preprocessing.StandardScaler(
                with_mean=True,
                with_std=True
            )),
            ('select', feature_selection.SelectKBest(
                k=101,
                score_func=feature_selection.f_classif
            )),
            ('estim', neighbors.NearestCentroid(
                metric='euclidean',
                shrink_threshold=None
            )),
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 16
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(columns=(7, 8, 11, 12, 13,
                                                         14))),
            ('scale',
             preprocessing.StandardScaler(with_mean=True, with_std=True)),
            ('expand',
             preprocessing.PolynomialFeatures(degree=1,
                                              interaction_only=False,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectKBest(
                 k=8, score_func=feature_selection.f_classif)),
            ('estim',
             neighbors.KNeighborsClassifier(n_neighbors=16,
                                            weights='distance',
                                            metric='chebyshev'))
        ])

        pipe.fit(x, y)
        self._model = pipe.predict
Esempio n. 17
0
    def _train(self):
        x = self._train_features
        y = self._train_outputs

        pipe = pipeline.Pipeline([
            ('drop',
             transformers.ColumnDropper(columns=(6, 7, 8, 11, 12, 13, 14))),
            (
                'estim',
                ensemble.VotingClassifier(estimators=[
                    (
                        'knn',
                        pipeline.Pipeline([
                            ('scale',
                             preprocessing.StandardScaler(with_mean=True,
                                                          with_std=True)),
                            ('expand',
                             preprocessing.PolynomialFeatures(
                                 degree=1,
                                 interaction_only=False,
                                 include_bias=False)),
                            #('select', feature_selection.SelectPercentile(score_func=feature_selection.f_classif)),
                            ('estim', neighbors.KNeighborsClassifier())
                        ])),
                    ('qda',
                     pipeline.Pipeline(
                         [('scale',
                           preprocessing.StandardScaler(
                               with_mean=True, with_std=False)),
                          ('expand',
                           preprocessing.PolynomialFeatures(
                               degree=2,
                               interaction_only=False,
                               include_bias=False)),
                          ('select',
                           feature_selection.SelectPercentile(
                               score_func=feature_selection.f_classif)),
                          ('estim',
                           discriminant_analysis.QuadraticDiscriminantAnalysis(
                           ))])),
                    ('dummy',
                     pipeline.Pipeline([
                         ('estim', dummy.DummyClassifier()),
                     ])),
                ]))
        ])

        param_grid = [{
            #'estim__knn__select__percentile': [i for i in range(5, 8)],
            'estim__knn__estim__n_neighbors': [i for i in range(5, 6)],
            'estim__knn__estim__weights': ['distance'],
            #'estim__knn__estim__metric': ['manhattan', 'euclidean', 'chebyshev'],
            'estim__knn__estim__metric': ['euclidean'],
            'estim__qda__select__percentile': [i for i in range(94, 95)],
            #'estim__qda__estim__reg_param': [0.052 + 0.001 * i for i in range(-5, 6)],
            'estim__qda__estim__reg_param': [0.052],
            'estim__dummy__estim__strategy': ['most_frequent'],
            'estim__dummy__estim__random_state': [1742],
            'estim__voting': ['soft'],
            'estim__weights': [[8, 8, 5]]
            #'estim__weights': list(itertools.product(
            #    [7.2 + 0.05 * i for i in range(-5, 6)],
            #    [7.2 + 0.05 * i for i in range(-5, 6)],
            #    [4.5 + 0.05 * i for i in range(-5, 6)]
            #))
        }]

        grid = model_selection.GridSearchCV(
            pipe,
            cv=20,
            n_jobs=4,
            param_grid=param_grid,
            verbose=1,
            scoring=metrics.make_scorer(metrics.accuracy_score),
        )
        grid.fit(x, y)

        print('Optimal Hyperparametres:')
        print('=======================')
        for name, step in grid.best_estimator_.steps:
            if name == 'estim':
                for (name2, _), estim2 in zip(step.estimators,
                                              step.estimators_):
                    print('  ', name2)
                    for name3, step3 in estim2.steps:
                        print('    ', step3)
                print('Weights:', step.voting, step.weights)
            else:
                print(step)
        print("CV Score:", grid.best_score_)

        self._model = grid.predict
Esempio n. 18
0
    def _train(self):

        #Train to distinguish 2 first!
        x = self._train_features
        y = self._train_outputs

        ywith02 = y.copy()
        ywith02[ywith02 == 1] = 0

        ywith01 = y[y < 2]
        xwith01 = x[y < 2]

        #First classifier to take out 2
        self._classifier1 = pipeline.Pipeline([
            ('drop',
             transformers.ColumnDropper(columns=(6, 7, 8, 11, 12, 13, 14))),
            (
                'scale',
                preprocessing.StandardScaler(
                    with_mean=True,
                    with_std=True  # this is not a typo!
                )),
            ('expand',
             preprocessing.PolynomialFeatures(degree=2,
                                              interaction_only=False,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectKBest(
                 k=25, score_func=feature_selection.mutual_info_classif)),
            ('estim',
             discriminant_analysis.QuadraticDiscriminantAnalysis(
                 reg_param=0.0043))
        ])

        #fit all values assuming y=1 is same as y=0
        self._classifier1.fit(x, ywith02)

        self._classifier2 = pipeline.Pipeline([
            ('drop', transformers.ColumnDropper(columns=(8, 9, 12, 13, 14,
                                                         15))),
            (
                'scale',
                preprocessing.StandardScaler(
                    with_mean=True,
                    with_std=True  # this is not a typo!
                )),
            ('expand',
             preprocessing.PolynomialFeatures(degree=2,
                                              interaction_only=False,
                                              include_bias=False)),
            ('select',
             feature_selection.SelectKBest(
                 k=25, score_func=feature_selection.mutual_info_classif)),
            ('estim',
             discriminant_analysis.QuadraticDiscriminantAnalysis(
                 reg_param=0.0043))
        ])

        self._classifier2.fit(xwith01, ywith01)

        self._model = self._hierarchical_model