Exemplo n.º 1
0
def predict_json():
    content = request.json
    
    try:
        params = content['sample']
    except KeyError:
        return abort(
            400,
            "Key 'sample' was not found in the request",
        )
        
    if isinstance(params, str):
        params = process_str_params(params)
    elif isinstance(params, (tuple, list)):
        params = process_collection_params(params)
    else:
        abort(
            400,
            "Invalid data for prediction. Expected 13 numerical objects",
        )
    
    model_path = "./model/trained_model.pkl"
    if not os.path.exists(model_path):
        train_model()
    
    model = pickle.load(open(model_path, 'rb'))
    predict = model.predict(params)
    predict = {'class': str(predict[0])}
    
    return jsonify(predict)
Exemplo n.º 2
0
def heart(params):
    params = params.split(',')
    params = [float(num) for num in params]
    
    model_path = "./model/trained_model.pkl"
    if not os.path.exists(model_path):
        train_model()
    
    model = pickle.load(open(model_path, 'rb'))
    params = np.array(params).reshape(1, -1)
    predict = model.predict(params)
    
    return str(predict)
def multi_nativebayes_train(model_data):
    #

    class_eachtoken_likelihood = {}
    vocabulary = model_data.get_vocabulary()
    for class_label in model_data.get_class_labels():
        class_eachtoken_likelihood[class_label] = {}
        for voc in vocabulary:
            class_eachtoken_likelihood[class_label][voc] = 0
    logprior = {}
    vocabularyCount = model_data.get_vocabularyCount()
    class_eachtoken_count = model_data.get_class_eachtoken_count()
    for class_label in model_data.get_class_labels():

        total_class_token = model_data.get_total_class_token()

        logprior[class_label] = math.log(total_class_token[class_label] /
                                         vocabularyCount)

        for word in vocabulary:

            if (class_eachtoken_count[class_label][word] == 0):
                class_eachtoken_likelihood[class_label][word] = 0

            else:
                class_eachtoken_likelihood[class_label][word] = math.log(
                    class_eachtoken_count[class_label][word] /
                    total_class_token[class_label])
    train_model_data = train_model(logprior, class_eachtoken_likelihood,
                                   vocabulary, model_data.get_class_labels())
    return train_model_data
def main():

    # ============================ Setup ===================================

    configure_logger()

    config = get_config()

    # ========================== Load & prepare input data ==============================

    logging.info("Processing base data...")

    base_data_df = process_data(base_data_dir=config.base_data_dir,
                                input_data_file=config.input_data_file)

    # ========================= Train model ===============================

    logging.info("Training model and evaluating...")

    model = train_model(base_data_df, config.population_tests_dir, grid_search)

    # ========================== Export Results ===================================

    logging.info("Saving model...")

    save_model(model, config.out_file)
Exemplo n.º 5
0
def main():
    final_model = train_model()
    predictions_df = predict(final_model)
    print(predictions_df)
    driver_alerts = set(
        predictions_df[predictions_df['alert'] == True]['driver_id'])
    for driver in driver_alerts:
        send_sms(driver)


#if __name__ == '__main__':
#    main()
Exemplo n.º 6
0
    # read datasets ---------------------------
    imdb, roidb, valroidb, output_dir = read_datasets(args)

    # Remove roidb entries that have no usable RoIs.
    roidb = filter_roidb(roidb)
    valroidb = filter_roidb(valroidb)

    # config gpu
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True

    # -------------------build model-----------------
    # load networks ---------------------------
    model_net = load_network(args.net)
    # pre-training weights
    pre_net_file_path = load_net_weights(args.net)

    model_train, model_vol = build_model(imdb, model_net, pre_net_file_path)
    # -------------------build model-----------------

    # --------------------training-------------------
    train_model(imdb,
                roidb,
                model_train,
                valroidb,
                model_vol,
                output_dir,
                max_iters=args.max_iters)

    # --------------------training-------------------
Exemplo n.º 7
0
if not os.path.exists('data/raw_data.csv'):
    process = CrawlerProcess(get_project_settings())
    process.crawl(LyricsSpider)
    process.start()
else:
    print(
        '\033[33m' +
        "WARNING: There is already data saved in the directory, if you want to collect new ones delete the raw_data.csv file in the data folder."
        + '\033[0;0m')

print('----------------------------')
print('     TRAINING THE MODEL     ')
print('----------------------------')
train_df, test_df = prepare_data()
train_model(API_KEY, train_df)

print('----------------------------')
print('     TESTING THE MODEL      ')
print('----------------------------')
test_model(API_KEY, test_df)

print('----------------------------')
print('      PIPELINE FINISHED     ')
print('----------------------------')

while True:
    answer = input('Do you want to launch the application? [Y/N] ')
    if answer.lower().strip() == 'y':
        os.system('cmd /k "streamlit run main.py"')
    elif answer.lower().strip() == 'n':