示例#1
0
def analyze_signal(data_path):

    # calibrate zcr treshold
    Frame.zcr_treshold = calibrate_param(CALIBRATE_PATH)

    frames_processed = []
    y_pred = []

    # load data
    X, y, mapping = load_data(data_path)

    for(samples, c) in zip(X, y):
        print("processing")
        f = Frame(samples, c)
        f.classify_zcr()
        frames_processed.append(f)
        y_pred.append(f.prediction)

    cm = confusion_matrix(y, y_pred)
    print(accuracy_score(y, y_pred))
    print(cm)


    plot_res(y, y_pred)
    signal, noise, fec, msc, over, nds = analyze_objective_params(y, y_pred)
    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))

    return frames_processed
def classify_by_feature(X_train, X_test, y_train, y_test):

    # scale the features

    sc = StandardScaler()
    X_train = sc.fit_transform(X_train)
    X_test = sc.transform(X_test)

    # classifier and fitting
    classifier = LogisticRegression(random_state=0)
    classifier.fit(X_train, y_train)

    # predictions
    y_pred = classifier.predict(X_test)

    # confusion matrix

    cm = confusion_matrix(y_test, y_pred)
    print(cm)
    accuracy = accuracy_score(y_test, y_pred)
    print(accuracy)

    plot_res(y_test, y_pred)

    signal, noise, fec, msc, over, nds = analyze_objective_params(
        y_test, y_pred)
    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))

    return classifier, sc
示例#3
0
    X_train = X_train[..., np.newaxis]
    X_validation = X_validation[..., np.newaxis]
    X_test = X_test[..., np.newaxis]

    return X_train, X_validation, X_test, y_train, y_validation, y_test



if __name__ == "__main__":
    # load dataset
    X_train, X_validation, X_test, y_train, y_validation, y_test = prepare_dataset(
        DATA_TRAIN, DATA_EVAL, DATA_TEST)
    
    model = keras.models.load_model(PATH_TO_MODEL)
    y_pred_val = model.predict(X_test)
    
    # treshold
    y_pred_val = [0 if x < 0.5 else 1 for x in y_pred_val]

    cm = confusion_matrix(y_test, y_pred_val)
    print(cm)

    signal, noise, fec, msc, over, nds = analyze_objective_params(
        y_test, y_pred_val)


    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))

    plot_res(y_test, y_pred_val)
    
示例#4
0
    fig.savefig('plots_final/svm.png')
    plt.show()


if __name__ == "__main__":

    # analyze validation data
    y_val, y_pred_val = analyze_signal_samples(
        DATA_TRAIN_SAMPLES, DATA_TEST_SAMPLES)

    # analyze test data
    y_test, y_pred = analyze_signal_samples(
        DATA_TRAIN_SAMPLES, DATA_STREAM)

    # objective params for test data
    signal, noise, fec, msc, over, nds = analyze_objective_params(
        y_test, y_pred)

    # objective params for validation data
    signal_val, noise_val, fec_val, msc_val, over_val, nds_val = analyze_objective_params(
        y_val, y_pred_val)

    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))
    with open('svm/objective_stream.txt', 'w') as file:
        file.write("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
            signal, noise, fec, msc, over, nds))

    
    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal_val, noise_val, fec_val, msc_val, over_val, nds_val))
    with open('svm/objective_val.txt', 'w') as file:
示例#5
0
    ax.set_xlabel('numer ramki')
    ax.set_ylabel('mowa/brak mowy')
    ax.plot(y_pred, color='b', label='stan faktyczny')
    ax.plot(y_pred, color='r', linestyle='--', label='decyzja algorytmu')
    ax.set_yticks([0, 1])
    ax.set_yticklabels(['brak mowy', 'mowa'])
    ax.legend(loc="lower right")
    fig.savefig('plots_final/rfc.png')
    plt.show()


if __name__ == "__main__":

    # analyze signal
    y_prediction, y_fact = analyze_signal_samples(DATA_TRAIN_SAMPLES,
                                                  DATA_TEST_SAMPLES)

    # calculate objective params
    signal, noise, fec, msc, over, nds = analyze_objective_params(
        y_fact, y_prediction)

    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))
    with open('random_forest/objective.txt', 'w') as file:
        file.write(
            "signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
                signal, noise, fec, msc, over, nds))

    # plot vad's decisions
    plot_res(y_fact, y_prediction)
示例#6
0
def analyze_signal(data_path):

    # ! new conf matrix from sklearn
    y_pred = []

    # Set thresholds
    p = 0.1
    e_silence = 0

    e_r_old, unvoiced_buffer = calibrate_param(CALIBRATE_PATH)
    var_old = np.var(unvoiced_buffer)

    # set init treshold
    Frame.full_band_treshold = e_r_old

    X, y, mapping = load_data(data_path)

    # array with classified frames
    frames_processed = []

    # ! Delete it, it's global
    # unvoiced_buffer = []
    vector_threshold = []

    buffor_index = N - 1

    #frame by frame processing
    for (samples, c) in zip(X, y):
        f = Frame(samples, c)
        f.classify_aled_frame(f.samples)
        vector_threshold.append([Frame.full_band_treshold] * FRAME_SIZE)

        # save frame
        frames_processed.append(f)

        # save the prediction value
        y_pred.append(f.prediction)

        # check frame and update threshold
        if f.prediction is 1:
            continue

        # set index at 0, when it is the end of the buffer
        if buffor_index == N:
            buffor_index = 0

        unvoiced_buffer[buffor_index] = f.full_band_energy

        var_new = np.var(unvoiced_buffer)
        e_silence = f.full_band_energy
        p = p_actualize(var_new, var_old)
        Frame.full_band_treshold = calc_new_threshold(e_r_old, e_silence, p)
        var_old = var_new
        buffor_index += 1

    cm = confusion_matrix(y, y_pred)
    print(accuracy_score(y, y_pred))

    plot_res(y, y_pred)

    signal, noise, fec, msc, over, nds = analyze_objective_params(y, y_pred)
    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(
        signal, noise, fec, msc, over, nds))

    return frames_processed, vector_threshold, cm
示例#7
0
    print(f'Train Accuracy - : {rf_Grid.score(X_train,y_train):.3f}')
    print(f'Test Accuracy - : {rf_Grid.score(X_test,y_test):.3f}')
    print(f'Best params - :{rf_Grid.best_params_}')
    

    best_grid = RandomForestClassifier(n_estimators= rf_Grid.best_params_['n_estimators'],
                                       max_features= rf_Grid.best_params_['max_features'],
                                       max_depth= rf_Grid.best_params_['max_depth'],
                                       min_samples_split= rf_Grid.best_params_['min_samples_split'],
                                       min_samples_leaf= rf_Grid.best_params_['min_samples_leaf'],
                                       bootstrap= rf_Grid.best_params_['bootstrap'])

    print("confussion matrix")
    y_pred = best_grid.predict(X_test, y_test)
    cm = confusion_matrix(y_test, y_pred)

    print("calculate test stream")
    y_pred_stream = best_grid.predict(X_stream, y_stream)
    cm = confusion_matrix(y_stream, y_pred_stream)
    with open('random_forest/conf_test.txt', 'w') as file:
        file.write("True Neg {}, False Pos {}, False neg {}, True Pos {}".format(cm[0][0], cm[0][1], cm[1][0], cm[1][1]))

    signal, noise, fec, msc, over, nds = analyze_objective_params(y_stream,y_pred_stream)

    print("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(signal, noise, fec, msc, over, nds))
    with open('random_forest/objective.txt', 'w') as file:
        file.write("signal {}, noise {}, fec {}, msc {}, over {}, nds {}".format(signal, noise, fec, msc, over, nds))