def run_BOW_baseline(args): print(args) train, dev, test = load_datasets(args) classnames = list(set(map(lambda tweet: tweet.label, train))) train_y = map(lambda tweet: classnames.index(tweet.label), train) dev_y = map(lambda tweet: classnames.index(tweet.label), dev) test_y = map(lambda tweet: classnames.index(tweet.label), test) # fx = FeatureExtractor(["BOW"], stopwords=args.stopwords) fx = FeatureExtractor(["hand-coded"], stopwords=args.stopwords) fx.build_vocab(train) train_x = np.asarray(map(lambda tweet: fx.process(tweet), train)) check = train_x[0] print("sample fv shape: ", check.shape) dev_x = np.asarray(map(lambda tweet: fx.process(tweet), dev)) test_x = np.asarray(map(lambda tweet: fx.process(tweet), test)) nclasses = len(classnames) ntrain = train_x.shape[0] nbatches = 100 batch_size = ntrain/nbatches train_data = (train_x, train_y) dev_data = (dev_x, dev_y) test_data = (test_x, test_y) neural_net.logistic_regression_optimization_sgd(train_data, dev_data, test_data, nclasses, batch_size=batch_size) print("train set performance:") train_ypred = neural_net.predict(train_x, train_y) print(evaluate.ConfusionMatrix(train_y, train_ypred, classnames)) print("validation set performance:") dev_ypred = neural_net.predict(dev_x, dev_y) print(evaluate.ConfusionMatrix(dev_y, dev_ypred, classnames)) print("test set performance:") test_ypred = neural_net.predict(test_x, test_y) print(evaluate.ConfusionMatrix(test_y, test_ypred, classnames))
def xor(): X = np.array([ [0, 0], [1, 0], [0, 1], [1, 1] ]) y = np.array([ [0], [1], [1], [0] ]) np.random.seed(1) model = train_network( X, y, hidden_layers=[2], learn_rate=10, batch_size=len(X) ) print('prediction after training:\n', predict(model, X)) for l, w in enumerate(model): print('weights for layer %s:' % l) print(w)
def train(trainSize): """ train the model and save the trained params to file "last_saved_params.npy" print the accuracy Parameters ---------- trainSize: number number of samples to be taken from data set for training """ print("Train and test shapes:") print(X_train.shape) print(X_test.shape) X_train_norm = np.array([normalize(img) for img in X_train[0:trainSize]]) X_train_flat = X_train_norm.reshape( (X_train_norm.shape[0], X_train_norm.shape[1]**2)).T Y_train_one_hot = to_categorical(Y_train[0:trainSize]).T """ Hardcode the model params here """ layers_dims = [X_train_flat.shape[0], 56, 28, 10] activations = ["relu", "relu", "sigmoid"] trained_params = train_model(X_train_flat, Y_train_one_hot, layers_dims, activations, learning_rate=0.06, num_iterations=2000, print_cost=True) np.save('last_saved_params.npy', trained_params) trainPredict = predict(X_train_flat, trained_params) testPredict = predict(X_test_flat, trained_params) trainAcc = countAccuracy(trainPredict, Y_train[0:trainSize]) testAcc = countAccuracy(testPredict, Y_test) print("Accuracy on training set: ", str(trainAcc * 100), "%") print("Accuracy on test set: ", str(testAcc * 100), "%")
def run_word2vec_baseline(args): print(args) print('subtask id: %s' % args.subtask_id) train, dev, test = load_datasets(args) classnames = list(set(map(lambda tweet: tweet.label, train))) train_y = map(lambda tweet: classnames.index(tweet.label), train) dev_y = map(lambda tweet: classnames.index(tweet.label), dev) test_y = map(lambda tweet: classnames.index(tweet.label), test) fx = FeatureExtractor(["word2vec"], word2vec_model=args.word2vec_model) fx.build_vocab(train) train_x = np.asarray(map(lambda tweet: fx.process(tweet), train)) check = train_x[0] print("sample fv shape: ", check.shape) dev_x = np.asarray(map(lambda tweet: fx.process(tweet), dev)) test_x = np.asarray(map(lambda tweet: fx.process(tweet), test)) nclasses = len(classnames) ntrain = train_x.shape[0] nbatches = 100 batch_size = ntrain / nbatches train_data = (train_x, train_y) dev_data = (dev_x, dev_y) test_data = (test_x, test_y) neural_net.logistic_regression_optimization_sgd(train_data, dev_data, test_data, nclasses, batch_size=batch_size) print("train set performance:") train_ypred = neural_net.predict(train_x, train_y) print(evaluate.ConfusionMatrix(train_y, train_ypred, classnames)) print("validation set performance:") dev_ypred = neural_net.predict(dev_x, dev_y) print(evaluate.ConfusionMatrix(dev_y, dev_ypred, classnames)) print("test set performance:") test_ypred = neural_net.predict(test_x, test_y) print(evaluate.ConfusionMatrix(test_y, test_ypred, classnames))
def xor(): X = np.array([[0, 0], [1, 0], [0, 1], [1, 1]]) y = np.array([[0], [1], [1], [0]]) np.random.seed(1) model = train_network(X, y, hidden_layers=[2], learn_rate=10, batch_size=len(X)) print('prediction after training:\n', predict(model, X)) for l, w in enumerate(model): print('weights for layer %s:' % l) print(w)
def use_pretrained(ind, params): """ get prediction of a model for one sample from the test dataset Parameters ---------- ind : number index of sample from the test dataset params : tuple params of a trauned model """ plt.imshow(X_test[ind], cmap='gray') print("Correct: ", Y_test[ind]) p = predict(np.array([X_test_flat.T[ind]]).T, params) print("NN output: ") print(p) print(np.argmax(p))
def analyze_portfolio(): uid = get_id_from_session(request.headers.get('session')) if not uid: return jsonify({"message": "Bad session :("}), 400 p = Parser( list(transactions.find({'_id': uid}, { 'transaction': 1, '_id': 0 }))[0], list(holdings.find({'_id': uid}, { 'holding': 1, '_id': 0 }))[0], 25) pv, direction = predict(p.get_neural_net_attrs(), train()) r = Recommender(direction.tolist()) recommendation = r.get_recommendation() return jsonify({ 'fitness': pv[0], 'recommendation': recommendation[0], 'link': recommendation[1] })
for doc in cursor: c = doc print(c) data_point = [[ float(c['etpd']), float(c['locpop']), float(c['weektraffic']), float(c['weekdaytraffic']), float(c['etsize']), float(c['serviceroutes']), float(c['atminprox']), float(c['ownatm']), float(c['nearatm']), float(c['lease']), float(c['mpot']), float(c['comm']), float(c['crimerate']), float(c['servicecost']) ]] data_point = np.array(data_point) pred = neural_net.predict(data_point) print(str(uniqueId)) print(pred) table.update_one({'uid': str(uniqueId)}, {'$set': { 'prediction': str(pred[0][0]) }})
def move_toward_tag(front_camera_filename, back_camera_filename): global last_motorInstruction global last_heading global last_power last = 0 d = datetime.now() move_time = d while True: m = (datetime.now() - d).microseconds if last != m - m % 100: last = m - m % 100 displayTTYSend(last_motorInstruction) if (datetime.now() - move_time).total_seconds() > 0: detections = detect_apriltags(front_camera_filename, back_camera_filename) # Find an apriltag, move toward it. if len(detections['front']) == 0 and len(detections['back']) == 0: if last_motorInstruction not in ["AA0", "aa0"]: last_motorInstruction = "AA0" last_heading = 10000 last_power = 10000 weapon_arm.goToRange(up=1) displayTTYSend(last_motorInstruction) continue # sendWeaponInstruction('1') if len(detections['front']) > 0: side = 'front' active_detection = detections['front'][0] else: side = 'back' active_detection = detections['back'][0] distance = active_detection[2] heading = active_detection[0] power = distance * 10 power = int(min(power, 20)) if side == 'back': power = -power up = abs(power) / 20 weapon_arm.goToRange(up=up, left=0.95 if side == "front" else 0.0, amplitude=up, t=(datetime.now() - d).total_seconds()) heading_char = degreesToMotorDirections(heading) left_adjustment, right_adjustment = (motorDirectionsToPower(letter) for letter in heading_char) if side == 'back': left_adjustment, right_adjustment = -left_adjustment, -right_adjustment leftPower = int(min(max(power + left_adjustment, -20), 20)) rightPower = int(min(max(power + right_adjustment, -20), 20)) #print(leftPower, rightPower) if abs(power) < 10: move_time = datetime.now() + timedelta(seconds=0.5) elif abs(power) >= 10 and abs(power) <= 20: move_time = datetime.now() + timedelta(seconds=1) if (datetime.now() - move_time).total_seconds() < 0 and abs( heading - last_heading) > 1 or abs(power - last_power) > 1: last_heading = heading last_power = power # last_motorInstruction = powerToMotorDirections(leftPower) + powerToMotorDirections(rightPower) last_motorInstruction = neural_net.predict( heading * np.pi / 180, 5 * np.abs(distance) / 20, np.sign(power)) displayTTYSend(last_motorInstruction + "1")
import numpy as np import pandas as pd from scipy.io import loadmat from sklearn.model_selection import train_test_split import neural_net data = loadmat('digits.mat') X = data['X'] y = data['y'] X_train, X_test, y_train, y_test = train_test_split(X, y) theta1, theta2 = neural_net.fit_model(X_train, y_train) y_pred = neural_net.predict(theta1, theta2, X_test, y_test)