コード例 #1
0
le = LabelEncoder()
le.fit(data['Activity'])
data['Activity'] = le.transform(data['Activity'])
X = data.drop('Activity', axis=1)
y = data['Activity']
# split the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=40)
# Baseline model
classifier = SVC()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)
precision, accuracy, f_score, _ = error_metric(y_test,
                                               y_pred,
                                               average='weighted')
print(precision)
print(accuracy)
print(f_score)
model1_score = accuracy_score(y_test, y_pred)
print(model1_score)

# --------------
# importing libraries
from sklearn.feature_selection import SelectFromModel

from sklearn.svm import LinearSVC

# Feature selection using Linear SVC
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False,
コード例 #2
0
ファイル: code.py プロジェクト: PayalAg4/ga-learner-dsmp-repo
# split the dataset into train and test
X = data.drop(['Activity'], 1)
y = data['Activity']

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.3,
                                                    random_state=40)

# Baseline model
classifier = SVC()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)

precision = error_metric(y_test, y_pred, average='weighted')[0]
recall = error_metric(y_test, y_pred, average='weighted')[1]
f_score = error_metric(y_test, y_pred, average='weighted')[2]

model1_score = accuracy_score(y_test, y_pred)

print("Accuracy SVC : ", model1_score)
print("Precision SVC : ", precision)
print("Recall SVC : ", recall)
print("F1 Score SVC : ", f_score)

# --------------
# importing libraries
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import f1_score
コード例 #3
0
# Encoding the target variable
le = LabelEncoder()
data['Activity'] = le.fit_transform(data['Activity'])

# split the dataset into train and test
X = data.drop('Activity',1)
y = data['Activity']

X_train, X_test, y_train , y_test = train_test_split(X,y,test_size = 0.3, random_state = 40)

# Baseline model 
classifier = SVC()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)

precision, recall, f_score, support  = error_metric(y_test, y_pred, average = 'weighted')

model1_score = classifier.score(X_test, y_test)

print('precision',precision)
print('\n')

print('recall',recall)
print('\n')

print('f1_score',f_score)
print('\n')

print('score',model1_score)
print('\n')
コード例 #4
0
                        param_grid={
                            'kernel': ['linear', 'rbf'],
                            'C': [100, 20, 1, 0.1]
                        },
                        scoring='accuracy')

selector.fit(new_train_features, y_train)
print(selector.best_params_)
print(selector.cv_results_)

# Usage of grid search to select the best hyperparmeters
means = selector.cv_results_['mean_test_score']
stds = selector.cv_results_['std_test_score']
parameters = selector.cv_results_['params']
print(means)
print(stds)
print(parameters)

classifier_3 = SVC(kernel='rbf', C=100)

# Model building after Hyperparameter tuning
clf_3 = classifier_3.fit(new_train_features, y_train)
y_pred_final = clf_3.predict(new_test_features)
model3_score = accuracy_score(y_test, y_pred_final)
precision, recall, f_score, _ = error_metric(y_test,
                                             y_pred,
                                             average='weighted')
print(precision)
print(recall)
print(f_score)