Exemple #1
0
def train_script_2():

    dbreader = DbReader(PATH, split_size=ONE_PERSON_SPLIT)
    training_commands = getting_commands_from_signals(
        dbreader.training_signals[:2], dbreader.training_text[:2])
    valid_commands = getting_commands_from_signals(
        dbreader.training_signals[2:], dbreader.training_text[2:])

    training_mfcc_data = simple_mfcc(training_commands)
    valid_mfcc_data = simple_mfcc(valid_commands)

    y_train = training_mfcc_data['command']
    X_train = training_mfcc_data.drop(columns=['command'])

    y_valid = valid_mfcc_data['command']
    X_valid = valid_mfcc_data.drop(columns=['command'])

    rf_model = RandomForestModel()

    model_to_fit = rf_model.gridsearchCV()
    model_to_fit.fit(X_train, y_train)
    rf_model.set_internal_model(model_to_fit.best_estimator_)
    print(model_to_fit.best_estimator_)
    rf_model.save_model()

    joblib.dump(dbreader, "dbreader.mdl")

    predictions = rf_model.predict(X_valid)
    plot_confusion_matrix(y_valid, predictions)
def train_script():
    dbreader = DbReader(PATH, split_size=ONE_PERSON_SPLIT)
    commands = getting_commands_from_signals(dbreader.training_signals,
                                             dbreader.training_text)
    mfcc_data = simple_mfcc(commands)

    y_train = mfcc_data['command']
    X_train = mfcc_data.drop(columns=['command'])

    rf_model = RandomForestModel()

    model_to_fit = rf_model.gridsearchCV()
    model_to_fit.fit(X_train, y_train)
    rf_model.set_internal_model(model_to_fit.best_estimator_)
    print(model_to_fit.best_estimator_)
    rf_model.save_model()

    joblib.dump(dbreader, "dbreader.mdl")
def train_script():

    db_reader = DbReader()
    hyper_dataset = db_reader.load_csv("../allhyper.data")
    hypo_dataset = db_reader.load_csv("../allhypo.data")
    X, y = create_dataset_for_training(hyper_dataset, hypo_dataset)
    X = preprocess_the_data(X)

    rf_model = RandomForestModel()
    filtered_features = feature_selection(X, y, rf_model.internal_model)

    with open('selected_best_features.data', 'wb') as filehandle:
        pickle.dump(filtered_features,filehandle)

    model_to_fit = rf_model.gridsearchCV()
    model_to_fit.fit(X[filtered_features], y)
    print(model_to_fit.best_score_)
    print(model_to_fit.best_params_)
    print(filtered_features)
    rf_model.set_internal_model(model_to_fit.best_estimator_)
    rf_model.save_model()