def basic(): train, test = preprocessing.prepare_data(True) with open('nn_resultsfeaturedrop_nosmote.csv', 'w') as csvfile: writer = csv.writer(csvfile) # add header # vary each parameter of random forest split_train, split_labels = preprocessing.split_labels(train) split_train, split_labels = preprocessing.apply_smote( split_train, split_labels) nn_predict(split_train, split_labels, test, writer, {})
def basic(): train, test = preprocessing.prepare_data(True) train = train.drop('Amount', axis=1) test = test.drop('Amount', axis=1) with open('nn_results.csv', 'w') as csvfile: writer = csv.writer(csvfile) split_train, split_labels = preprocessing.split_labels(train) nn_predict(split_train, split_labels, test, writer) split_train, split_labels = preprocessing.apply_smote( split_train, split_labels) nn_predict(split_train, split_labels, test, writer)
def lr_predict(train, test, preprocessing_type): # separate class label (last column) train, labels = preprocessing.split_labels(train) if preprocessing_type == 'smote': train, labels = preprocessing.apply_smote(train, labels) classifier = linear_model.LogisticRegression() validation.cross_validate(classifier, train, labels) classifier.fit(train, labels) # test test, test_labels = preprocessing.split_labels(test) validation.test(classifier, test, test_labels)
def svm_predict(train, test, preprocessing_type): # separate class label (last column) train, labels = preprocessing.split_labels(train) if preprocessing_type == 'smote': train, labels = preprocessing.apply_smote(train, labels) # Classifier # Class weight parameter: weights positive class more strongly than negative class. # class_weight={1: 2.61, 0: 0.383} classifier = svm.SVC(kernel='rbf') validation.cross_validate(classifier, train, labels) classifier.fit(train, labels) # test test, test_labels = preprocessing.split_labels(test) validation.test(classifier, test, test_labels)
def rf_predict(train, test, preprocessing_type, results_file): # separate class label train, labels = preprocessing.split_labels(train) if preprocessing_type == 'smote': train, labels = preprocessing.apply_smote(train, labels) classifier = ensemble.RandomForestClassifier(class_weight={ 0: 0.75, 1: 1.5 }, min_samples_split=40, n_estimators=15) classifier.fit(train, labels) vresult = validation.cross_validate(classifier, train, labels) # test test, test_labels = preprocessing.split_labels(test) tresult = validation.test(classifier, test, test_labels) # save results results = [] results.append("low_skew (0=0.75, 1=1.5)") results.append(40) results.append(15) results.append(vresult['roc_auc']) results.append(vresult['precision']) results.append(vresult['recall']) results.append(vresult['f1']) results.append(vresult['fp']) results.append(vresult['fn']) results.append(tresult['roc_auc']) results.append(tresult['precision']) results.append(tresult['recall']) results.append(tresult['f1']) results.append(tresult['fp']) results.append(tresult['fn']) results_file.writerow(results)
spliced_data = data[:, :30] spliced_target = data[:, 30] print(spliced_data.shape, spliced_target.shape) #print(spliced_data) #print(spliced_target) xTrain, xTest, yTrain, yTest = model_selection.train_test_split(spliced_data, spliced_target, test_size=0.2, random_state=0) # applying smote preprocessing.apply_smote(xTrain, yTrain) print(xTrain.shape, yTrain.shape) print("Data loaded...") print("Training data") print(xTrain) print(yTrain) print("Verification data") print(xTest) print(yTest) print("length of x" + str(len(xTrain))) print("length of y" + str(len(yTrain)))