Esempio n. 1
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     gbc = GradientBoostingClassifier(n_estimators=self.n_estimators)
     gbc.fit(train_features, y_train)
     print(gbc.score(test_features, y_test))
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     mnb = MultinomialNB()
     mnb.fit(train_features, y_train)
     print(mnb.score(test_features, y_test))
Esempio n. 3
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     lr = LogisticRegression()
     lr.fit(train_features, y_train)
     print(lr.score(test_features, y_test))
Esempio n. 4
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     svc = SVC()
     svc.fit(train_features, y_train)
     print(svc.score(test_features, y_test))
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     perceptron = Perceptron()
     perceptron.fit(train_features, y_train)
     print(perceptron.score(test_features, y_test))
Esempio n. 6
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     rf = RandomForestClassifier(n_estimators=self.n_estimators,
                                 criterion=self.criterion)
     rf.fit(train_features, y_train)
     print(rf.score(test_features, y_test))
Esempio n. 7
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
         self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(
         X_train, X_test)
     abc = AdaBoostClassifier(base_estimator=self.base_estimator,
                              n_estimators=self.n_estimators)
     abc.fit(train_features, y_train)
     print(abc.score(test_features, y_test))
    def run(self):
        X_train, X_test, y_train, y_test = data_utils.load_train_test_data(
            self.data_fname,
            is_raw=False,
            max_seq_len=self.para['max_seq_len'])
        embedding_matrix = self.vectorizer.get_embedding_matrix()
        max_acc, step, stop_num = 0.0, -1, 0

        # Model Training
        model = DAN(embedding_matrix, self.para['hidden_size'])
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=self.para['learning_rate'],
                                     weight_decay=self.para['l2_reg'])
        for i in range(self.para['epoch_num']):
            train_cost, train_acc = 0.0, 0
            for j in range(int(len(X_train) / self.para['batch_size'])):
                data_X = X_train[j * self.para['batch_size']:(j + 1) *
                                 self.para['batch_size']]
                data_y = y_train[j * self.para['batch_size']:(j + 1) *
                                 self.para['batch_size']]
                loss, correct_num = model.forward(data_X, data_y)
                train_cost += loss.item() * self.para['batch_size']
                train_acc += correct_num.item()
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            test_cost, test_acc = 0.0, 0
            for j in range(int(len(X_test) / self.para['batch_size'])):
                data_X = X_test[j * self.para['batch_size']:(j + 1) *
                                self.para['batch_size']]
                data_y = y_test[j * self.para['batch_size']:(j + 1) *
                                self.para['batch_size']]
                loss, correct_num = model.forward(data_X, data_y)
                test_cost += loss.item() * self.para['batch_size']
                test_acc += correct_num.item()
            print(
                'Epoch %d; Training Loss: %.3f; Training Acc: %.3f. Testing Loss: %.3f; Testing Acc: %.3f'
                % (i, train_cost / X_train.shape[0],
                   train_acc / X_train.shape[0], test_cost / X_test.shape[0],
                   test_acc / X_test.shape[0]))

            if test_acc / X_test.shape[0] > max_acc:
                max_acc = test_acc / X_test.shape[0]
                step = i
            else:
                stop_num += 1

            if stop_num == self.para['stop_num']:
                break

        print('Best Performance: %.3f at Epoch %d' % (max_acc, step))
Esempio n. 9
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(self.data_fname)
     train_features, test_features = self.vectorizer.feature_extraction(X_train, X_test)
     neigh = KNeighborsClassifier(n_neighbors=self.n_neighbors)
     neigh.fit(train_features, y_train)
     print(neigh.score(test_features, y_test))
Esempio n. 10
0
 def run(self):
     X_train, X_test, y_train, y_test = data_utils.load_train_test_data(self.data_fname)
     train_features, test_features = vectorizer.feature_extraction(X_train, X_test)
     dt = DecisionTreeClassifier(criterion=self.criterion)
     dt.fit(train_features, y_train)
     print(dt.score(test_features, y_test))