def predict(test_data, test_labels): predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": test_data}, num_epochs=1, shuffle=False) result = cnn_classifier.predict(predict_input_fn) #print("result:", list(result)) predict_labels = [] for l in list(result): #print(l['classes']) predict_labels.append(l['classes']) helper.evaluation(predict_labels, test_labels)
def decisionTree(): #x, y = help.generateData() #train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.33) train_x, test_x, train_y, test_y = help.generateTrainTest(preload=False, win_size=8) ## use decisionTree to classify dataset classifier = tree.DecisionTreeClassifier(criterion='gini') classifier.fit(train_x, train_y) joblib.dump(classifier, 'decisionTree.pkl') classifier = joblib.load('decisionTree.pkl') predictions = classifier.predict(test_x) help.evaluation(predictions, test_y)
def DNN(): # get data # x, y = help.generateData() # train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.33, random_state = 42) train_x, test_x, train_y, test_y = help.generateTrainTest(preload=False, win_size=8) train_y = train_y.astype(int) test_y = test_y.astype(int) feature_columns = [tf.contrib.layers.real_valued_column("", dimension = 1440)] # three 1000-units hidden layers classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[100, 100], n_classes=4) classifier.fit(x=train_x, y=train_y, steps=2000, batch_size=100) predictions = list(classifier.predict(test_x, as_iterable=True)) help.evaluation(predictions, test_y)
def LogisticClassifier(): #x, y = help.generateData() #train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.33, random_state = 42) train_x, test_x, train_y, test_y = help.generateTrainTest(preload=False, win_size=0.6) ## use logistic regression to classify dataset classifier = LogisticRegression(class_weight='balanced', multi_class='multinomial', solver='lbfgs') classifier.fit(train_x, train_y) joblib.dump(classifier, 'logistic.pkl') classifier = joblib.load('logistic.pkl') predictions = classifier.predict(test_x) help.evaluation(predictions, test_y)
def SVM(): #x, y = help.generateData() #train_x, test_x, train_y, test_y = train_test_split(x, y, test_size = 0.33, random_state = 42) train_x, test_x, train_y, test_y = help.generateTrainTest(preload=False, win_size=8) ## use SVM to classify dataset # poly is the best kernal for this case, ovr means one vs rest classifier = svm.SVC(C=0.8, kernel='poly', gamma=20, decision_function_shape='ovr') classifier.fit(train_x, train_y) joblib.dump(classifier, 'svm.pkl') classifier = joblib.load('svm.pkl') predictions = classifier.predict(test_x) help.evaluation(predictions, test_y)
step = 0 if sys.argv[1] == "-train": while step < max_steps: sess.run( [train_op], feed_dict={ x: train_x[step % train_x.shape[0]], y: train_y[step % train_y.shape[0]], }) # every 20 steps make a test and save the current model if step % 20 == 0: print(sess.run(accuracy, feed_dict={ x: test_x, y: test_y, })) saver.save(sess, './model/rnn/my.ckpt', global_step=tf.train.get_global_step()) step += 1 elif sys.argv[1] == "-test": help.evaluation( sess.run(outputlabel, feed_dict={ x: test_x, y: test_y, }), test_y.reshape(-1))