def main(): data_train, data_test, stars_train, stars_test = read_lexicon( int(sys.argv[1])) print("Training size: %d" % len(data_train)) if "-b" in sys.argv: stars_train, stars_test = binary_stars(stars_train, stars_test) clf = svm.SVC() clf.fit(data_train, stars_train) predictions = clf.predict(data_test) print("Accuracy: %f\n" % accuracy_score(stars_test, predictions))
def main(): data_train, data_test, stars_train, stars_test = get_datasets( attrs=["latitude", "longitude"]) print("Training size: %d" % len(data_train)) knn = neighbors.KNeighborsClassifier(n_neighbors=int(sys.argv[1])) if "-b" in sys.argv: stars_train, stars_test = binary_stars(stars_train, stars_test) knn.fit(data_train, stars_train) predictions = knn.predict(data_test) print("Accuracy: %f\n" % accuracy_score(stars_test, predictions))
def main(): data_train, data_test, stars_train, stars_test = get_datasets() print("Training size: %d" % len(data_train)) if "-b" in sys.argv: stars_train, stars_test = binary_stars(stars_train, stars_test) logreg = linear_model.LogisticRegression() logreg.fit(data_train, stars_train) predictions = logreg.predict(data_test) print("Accuracy: %f\n" % accuracy_score(stars_test, predictions)) print("Coef for each class and each attribute:\n") print(logreg.coef_)
def main(): types = None args = sys.argv bi = False if "-b" in sys.argv: bi = True args.remove("-b") if len(args) > 1: types = sys.argv[1].split(',') data_train, data_test, stars_train, stars_test = get_datasets(types=types) print("Training size: %d" % len(data_train)) if bi: stars_train, stars_test = binary_stars(stars_train, stars_test) clf = svm.SVC() clf.fit(data_train, stars_train) predictions = clf.predict(data_test) print("Accuracy: %f\n" % accuracy_score(stars_test, predictions))
BINARY_MODE = True n_classes = 5 NFEAUTRES = 15 def neural_net(data_test, stars_test): with tf.Session() as sess: new_saver = tf.train.import_meta_graph(META_PATH) new_saver.restore(sess, MODEL_PATH) graph = tf.get_default_graph() x = graph.get_tensor_by_name("x:0") y = graph.get_tensor_by_name("y:0") op_to_restore = graph.get_tensor_by_name("output_op:0") correct = tf.equal(tf.argmax(op_to_restore, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, 'float')) print('Test size:', len(stars_test)) print('Accuracy:',accuracy.eval({x: data_test, y: stars_test})) def convert_stars_obj(stars): return [[1 if (x + 1) == z else 0 for x in range(n_classes)] for z in stars] data, stars = get_test_routes() if BINARY_MODE: stars, stars_test = binary_stars(stars, stars) n_classes = 2 data = np.array(data, dtype=np.float32) stars = np.array(stars, dtype=np.float32) stars = convert_stars_obj(stars) neural_net(data, stars)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, 'float')) print('Min loss: ', min_loss) print('Training size:', len(stars_train)) print('Test size:', len(stars_test)) print('Accuracy:', accuracy.eval({x: data_test, y: stars_test})) def convert_stars_obj(stars): return [[1 if (x + 1) == z else 0 for x in range(n_classes)] for z in stars] data_train, data_test, stars_train, stars_test = get_datasets() if BINARY_MODE: stars_train, stars_test = binary_stars(stars_train, stars_test) n_classes = 2 y = tf.placeholder('float', [None, n_classes], name="y") # label of the data x = tf.placeholder('float', [None, NFEAUTRES], name="x") #input data data_train = np.array(data_train, dtype=np.float32) data_test = np.array(data_test, dtype=np.float32) stars_train = np.array(stars_train, dtype=np.float32) stars_test = np.array(stars_test, dtype=np.float32) stars_test = convert_stars_obj(stars_test) stars_train = convert_stars_obj(stars_train) if (batch_size > len(data_train)): batch_size = len(data_train) train_neural_network()