Beispiel #1
0
import wandb
import json

from text_classification import config, data, predict, utils


app = FastAPI(
    title="text-classification",
    description="",
    version="1.0.0",
)


# Get best run
best_run = utils.get_best_run(project="mahjouri-saamahn/mwml-httynn-app_v2",
                              metric="test_loss", objective="minimize")

# Load best run (if needed)
best_run_dir = utils.load_run(run=best_run)

# Get run components for prediction
args, model, X_tokenizer, y_tokenizer = predict.get_run_components(
    run_dir=best_run_dir)



@utils.construct_response
@app.get("/")
async def _index():
    response = {
        'message': HTTPStatus.OK.phrase,
def normalize(x):
    return (x - min(x)) / (max(x) - min(x))


# Title
st.title("Creating an End-to-End ML Application")
st.write(
    """[<img src="https://github.com/madewithml/images/blob/master/images/yt.png?raw=true" style="width:1.2rem;"> Watch Lesson](https://www.youtube.com/madewithml?sub_confirmation=1) ┬╖ [<img src="https://github.com/madewithml/images/blob/master/images/github_logo.png?raw=true" style="width:1.1rem;"> GitHub](https://github.com/madewithml/e2e-ml-app-tensorflow) ┬╖ [<img src="https://avatars0.githubusercontent.com/u/60439358?s=200&v=4" style="width:1.2rem;"> Made With ML](https://madewithml.com)""",
    unsafe_allow_html=True)
st.write("Video lesson coming soon...")

# Get best run
project = 'GokuMohandas/e2e-ml-app-tensorflow'
best_run = utils.get_best_run(project=project,
                              metric="test_loss",
                              objective="minimize")

# Load best run (if needed)
best_run_dir = utils.load_run(run=best_run)

# Get run components for prediction
args, model, conv_outputs_model, X_tokenizer, y_tokenizer = predict.get_run_components(
    run_dir=best_run_dir)

# Pages
page = st.sidebar.selectbox("Choose a page", ['Prediction', 'Model details'])
if page == 'Prediction':

    st.header("ЁЯЪА Try it out!")
Beispiel #3
0

if __name__ == '__main__':
    # Arguments
    parser = ArgumentParser()
    parser.add_argument('--text',
                        type=str,
                        required=True,
                        help="text to predict")

    args = parser.parse_args()
    inputs = [{'text': args.text}]

    # Get best run
    best_run = utils.get_best_run(
        project="mahjouri-saamahn/mwml-app-tensorflow",
        metric="test_loss",
        objective="minimize")

    # Load best run (if needed)
    best_run_dir = utils.load_run(run=best_run)

    # Get run components for prediction
    args, model, conv_outputs_model, X_tokenizer, y_tokenizer = get_run_components(
        run_dir=best_run_dir)

    # Predict
    results = predict(inputs=inputs,
                      args=args,
                      model=model,
                      conv_outputs_model=conv_outputs_model,
                      X_tokenizer=X_tokenizer,
                                           conv_outputs=conv_outputs,
                                           filter_sizes=args.filter_sizes)})

    return results


if __name__ == '__main__':
    # Arguments
    parser = ArgumentParser()
    parser.add_argument('--text', type=str,
                        required=True, help="text to predict")
    args = parser.parse_args()
    inputs = [{'text': args.text}]

    # Get best run
    best_run = utils.get_best_run(project="GokuMohandas/e2e-ml-app-tensorflow",
                                  metric="test_loss", objective="minimize")

    # Load best run (if needed)
    best_run_dir = utils.load_run(run=best_run)

    # Get run components for prediction
    args, model, conv_outputs_model, X_tokenizer, y_tokenizer = get_run_components(
        run_dir=best_run_dir)

    # Predict
    results = predict(inputs=inputs, args=args, model=model,
                      conv_outputs_model=conv_outputs_model,
                      X_tokenizer=X_tokenizer, y_tokenizer=y_tokenizer)
    config.logger.info(json.dumps(results, indent=4, sort_keys=False))
Beispiel #5
0
    results.append(performance)

    return results


if __name__ == '__main__':
    # Arguments
    parser = ArgumentParser()
    parser.add_argument('--text',
                        type=str,
                        required=True,
                        help="text to predict")

    args = parser.parse_args()
    inputs = [{'text': args.text}]

    # Get best run
    best_run = utils.get_best_run(project="mahjouri-saamahn/mwml-tutorial-app",
                                  metric="test_loss",
                                  objective="minimize")

    # Load best run (if needed)
    best_run_dir = utils.load_run(run=best_run)

    # Get run components for prediction
    model, word_map = get_run_components(run_dir=best_run_dir)

    # Predict
    results = predict(inputs=inputs, args=args, model=model, word_map=word_map)
    config.logger.info(json.dumps(results, indent=4, sort_keys=False))