def predict_games():
    """
        Defines functionality for '/predict' endpoint. 
        If GET request is received, gets season averages for each game played at todays date, processes the data through model,
        and returns list of predictions.
        If POST request is received, gets season averages for each game played at given date, processes the data through model, 
        and returns list of predictions.

        :return: Returns JSON response containing list of predictions.

        - Brandan Quinn
        2/4/19 3:42pm
    """
    predictions_to_return = {}
    print('Receiving: ', request.method, ' request from API.')

    if request.method == 'GET':
        utils.predict(utils.get_todays_date())
        predictions_to_return = get_predictions(persistent_model)
    # TODO: Implement POST request to retrieve other days predictions.
    elif request.method == 'POST':
        date_string = request.get_json().get('date')
        print('Date received: ', date_string)
        utils.predict(date_string)
        predictions_to_return = get_predictions(persistent_model)
        predictions_to_return = utils.assess_accuracy(date_string,
                                                      predictions_to_return)

    return jsonify(predictions=predictions_to_return)
Exemple #2
0
def train(n_vocab, labels, embedding, embed, int_to_vocab):
    loss_op, train_op = get_loss_and_training_op(n_vocab, labels, embed)
    valid_words = sample_eval_data()
    with tf.Session() as sess:
        saver = tf.train.Saver()
        all_losses = []
        batch_loss = []
        sess.run(tf.global_variables_initializer())
        start = time.time()
        for i in range(FLAGS.total_iterations):
            loss, _ = sess.run([loss_op, train_op])
            all_losses.append(loss)
            batch_loss.append(loss)
            if i % FLAGS.log_every == 0:
                end = time.time()
                print(
                    'Iteration {}/{} '.format(i, FLAGS.total_iterations),
                    'Average Loss: {:.4f}'.format(np.mean(batch_loss)),
                    '{:.4f} sec/{} iterations'.format((end - start),
                                                      FLAGS.log_every))
                batch_loss = []
                start = time.time()

            if i % FLAGS.evaluate_every == 0:
                saver.save(sess, 'checkpoint/model-{}.ckpt'.format(i))
                pred_op = get_predictions(valid_words, embedding)
                predictions = sess.run(pred_op)
                words = get_top_10_words(predictions, int_to_vocab)
        saver.save(sess, 'checkpoint/model.ckpt')
        np.savez('checkpoint/all_losses.npz', all_losses)
Exemple #3
0
def predict(valid_words, embedding, int_to_vocab):
    pred_op = get_predictions(valid_words, embedding)
    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, 'checkpoint/model.ckpt')
        predictions = sess.run(pred_op)
        words = get_top_10_words(predictions, int_to_vocab)
    return words
Exemple #4
0
def evaluate(test_horizon: int,
             wandb_proj: Optional[str] = None) -> None:
    if wandb_proj is not None:
        import wandb
        github_sha = os.getenv('WANDB_SHA')
        wandb.init(project=wandb_proj)
        wandb.config.github_sha = github_sha
    data = generate_data()
    train_data = data.head(-test_horizon)
    valid_data = data.tail(test_horizon)
    predictions = get_predictions(train_data, test_horizon)
    mse = mean_squared_error(valid_data.y, predictions)
    if wandb_proj is not None:
        wandb.log({'mse': mse})
    print(f'MSE: {mse:.2f}')
def update_output(n_clicks, value):
    if value is not None:
        value.encode('utf8')
        sentence = TextBlob(value)
        iso_code = sentence.detect_language()
        language = pycountry.languages.get(alpha_2=iso_code)
        if iso_code=="en":
            language_sentence = html.P("Your sentence is in English")
            tweet_class = get_predictions(value)
            return (html.P([html.P('Your sentence is classified as {}.'.format(tweet_class), style={'textAlign': 'center', 'font-size': 20}),html.Br(),'The sentence to classify was "{}".'.format(value),html.Br(),\
            language_sentence,html.Br(),'You tried to classify {} sentence(s) since you started using this app.'.format(n_clicks)]))
        else:
            language_sentence = html.P("Your sentence is in "+language.name+", please write a sentence in English")
    else:
        return
    return (html.P([html.P(language_sentence,style={'textAlign': 'center', 'font-size': 20, 'color': colors['text']}),html.Br(),'The sentence to classify was "{}".'.format(value),html.Br(),'You tried to classify {} sentence(s) since you started using this app.'\
    .format(n_clicks)]))
def predict_matchup():
    """
        Defines functionality for '/matchup' endpoint.
        If request is received, gets season averages for the teams sent in the body of the request.
        Processes this data through the model and returns prediction for matchup to web application.

        :return: Returns JSON response containing prediction.

        - Brandan Quinn
        5/1/19 5:30pm
    """
    predictions_to_return = {}

    t1 = request.get_json().get('t1')
    t2 = request.get_json().get('t2')
    utils.predict_matchup(t1, t2)
    predictions_to_return = get_predictions(persistent_model)

    return jsonify(predictions=predictions_to_return)
def predict():
    """Classifies JPEG image passed in as POST data
    
    Assuming a JPEG file is passed in (as raw bytes), this function saves the 
    image to a the local temp directory, passes in the image to the TensorFlow
    model, and returns the top-5 guesses and path to the saved image to be 
    rendered to the client.

    NOTE: This function is NOT SAFE. Strictly for demonstration purposes. Does
    not do any safe-checking of the data being saved locally. Only use locally.
    """
    results = []
    filename = None
    if 'file' not in request.files:
        flash('No file part')
        return redirect(request.url)
    file = request.files['file']
    if file.filename == '':
        flash('No selected file')
        return redirect(request.url)
    if file:
        for f in os.listdir(app.config['UPLOAD_FOLDER']):
            os.remove(os.path.join(app.config['UPLOAD_FOLDER'], f))
        filename = os.path.join(app.config['UPLOAD_FOLDER'],
                                '{}.jpg'.format(random.randint(0, 999999999)))
        file.save(filename)
        file.seek(0)
        data = file.read()
        feed_dict = {model.get_input(sess): data}
        prediction = sess.run(model.get_predictions(sess), feed_dict)
        top_k = prediction.argsort()[0][-5:][::-1]
        descriptions = get_descriptions()
        for idx in top_k:
            description = descriptions[idx]
            score = prediction[0][idx]
            print('{} (score = {})'.format(description, score))
            results.append((description, score))
    return render_template('predict.html', results=results, filename=filename)
Exemple #8
0
def process_image(path, save=False):
    start_time = time.process_time()

    # cropped = get_board.get_board(path, show=False)
    # squares, board_img = get_board.get_squares(cropped, show=False)  #
    # print("Anzahl gefundene Squares ", len(squares))

    squares, board_img, corners = get_slid.get_board_slid(path)

    if save:
        save_path = utility.fill_dir_with_squares(path, squares)
        print("Saved to '%s'" % save_path)
        # tensor_list, square_list = utility.load_square_lists_from_dir(save_path)

    tensor_list = utility.load_tensor_list_from_squares(
        squares, img_size, preprocess_input)

    elapsed_time = time.process_time() - start_time
    print("Processing took ", elapsed_time, "seconds...")

    # get Predictions
    start_time = time.process_time()
    reloaded_model = model.load_compiled_model(model_path)
    predictions = model.get_predictions(reloaded_model, tensor_list)

    elapsed_time = time.process_time() - start_time
    print("Model took ", elapsed_time, "seconds...")

    # Evaluate Predictions
    start_time = time.process_time()
    fen = get_fen_from_predictions(predictions,
                                   squares,
                                   num_of_classes=num_of_classes)
    elapsed_time = time.process_time() - start_time
    print("Get Fen took ", elapsed_time, "seconds...")
    return fen
if launch_train:
    #Launch train on GPU
    print('Launch train on GPU')
    losses = Model.launch_train(
        config,
        model,
        path_model,
        path_data + '/train/',
        path_data + '/dev/',
        nb_epoch=config['nb_epoch'],
        device=config['device'],
        type_sentence_embedding=config['type_sentence_embedding'],
        restart_at_epoch=restart_at_epoch)
    np.save(path_work + 'losses.npy', np.asarray(losses))

#Re-define the model on CPU
config['device'] = 'cpu'
model = Model.HierarchicalBiLSTM_on_sentence_embedding(config)
model = model.to(config['device'])

#To use model
subset = 'test'
print('Predict on test')
best_model_path = list(glob.glob(path_model + 'model_best_*.pth.tar'))[-1]
model.load_state_dict(torch.load(best_model_path))
Model.get_predictions(config, model, subset, path_data, path_work, save=True)

#Model.get_predictions(X_train, Y_train, embed, model_trained, is_eval=True, config)
#model.get_predictions(X_dev, Y_dev, idx_set_words, embed, model_trained, is_eval=True, config)
#model.get_predictions(X_test, Y_test, idx_set_words, embed, model_trained, is_eval=True, config)