Esempio n. 1
0
def plot_comparaison_filter2():
    """
    plot a figure comparing the performance using different correction filters
    (Raw, Unilateral Limiter, Bilateral Limiter, Time Averaging, Log Sepctrum Averaging)
    :return: figure
    """
    dir = "log/NY/peps mini"
    pattern = r'(internal|access|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    df_report = pd.DataFrame()
    filters = [
        'Raw', 'ULimiter', 'BLimiter', 'TA:n=5', 'TA:n=10', 'LSA:n=5',
        'LSA:n=10'
    ]
    for index, name in enumerate(filters):
        print('Filter: ' + name)
        utils.construct_set(dir, pattern, pattern_valid, filter=index)
        X_train, X_valid, y_train, y_valid = utils.load_train_valid()
        cm, report_temp, classes = utils.train(X_train,
                                               X_valid,
                                               y_train,
                                               y_valid,
                                               method='RF',
                                               param={
                                                   "max_features": 2,
                                                   "n_estimators": 100
                                               })
        df_report = df_report.append(report_temp, ignore_index=True)
    df_report.index = filters
    df_report.plot(kind='bar', rot=0, ylim=(0.8, 0.95))
    df_report.to_csv('data/comparaison_filter.csv', sep=';')
Esempio n. 2
0
def plot_distribution_prob(fig_name):
    """
    plot a figure showing the distribution of the max probability
    :param fig_name: saved figure name
    :return: figure
    """
    dir = "log/peps mini"
    pattern = r'(internal|access|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    utils.construct_set(dir, pattern, pattern_valid, filter=1)
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()
    utils.train(X_train,
                X_valid,
                y_train,
                y_valid,
                method='RF',
                param={
                    "max_features": 2,
                    "n_estimators": 100
                },
                save_prob=True)
    utils.plot_max_probablity_distribution('RF')
    plt.title(fig_name)
    if not os.path.exists(dir_fig):
        os.makedirs(dir_fig)
    plt.savefig(dir_fig + '/' + fig_name + '.png')
Esempio n. 3
0
def plot_report(fig_name, plot_cm=False):
    """
    plot the comparison result using different ML methods
    :param fig_name: saved figure name
    :param plot_cm: whether to plot confusion matrix result
    :return: figure
    """
    dir = "log/peps mini"
    pattern = r'(internal|access|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    utils.construct_set(dir, pattern, pattern_valid)
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()
    methods = ["Logistic", "LDA", "QDA", "KNN", "SVM", "RF", "GBM", "MLP"]
    params = [
        None, None, None, {
            "n_neighbors": 10
        }, {
            "C": 0.25,
            "gamma": 0.5
        }, {
            "max_features": 2,
            "n_estimators": 100
        }, {
            "n_estimators": 400,
            "max_depth": 3
        }, {
            "hidden_layer_sizes": (16, 8)
        }
    ]
    df_report = pd.DataFrame()
    for method, param in zip(methods, params):
        cm, report_temp, classes = utils.train(X_train,
                                               X_valid,
                                               y_train,
                                               y_valid,
                                               method=method,
                                               param=param)
        df_report = df_report.append(report_temp, ignore_index=True)
        if plot_cm:
            plt.figure()
            utils.plot_confusion_matrix(cm, classes, normalize=True)
            plt.title(method)
            if not os.path.exists(dir_fig + '/methods/'):
                os.makedirs(dir_fig + '/methods/')
            plt.savefig(dir_fig + '/methods/' + method + '.png')
    df_report.set_index('method', inplace=True)
    df_report.plot(kind='bar', rot=0, figsize=(16, 6), ylim=(0.6, 1))
    plt.title(fig_name)
    if not os.path.exists(dir_fig):
        os.makedirs(dir_fig)
    plt.savefig(dir_fig + '/' + fig_name + '.png')
def deploy_zone_prediction():
    """
    generate java model for zone prediction
    :return: training result and java model
    """
    dir = "log/peps normal"
    pattern = r'(left|right|front|back|start|trunk|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    utils.construct_set(dir, pattern, pattern_valid, filter=1)
    utils.save_to_csv()
    id = 'EightNormal'
    dir_path = 'model/'
    rf = utils.train_rf(model_id=id, ntrees=25, weight_lock=1)
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    rf.download_pojo(path=dir_path, get_genmodel_jar=False)
Esempio n. 5
0
def plot_TSNE(fig_name):
    """
    plot a figure for 2D t-SNE dimension reduction
    :param fig_name: saved figure name
    :return: figure
    """
    dir = "log/peps mini"
    pattern = r'(internal|access|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    utils.construct_set(dir, pattern, pattern_valid)
    X, y = utils.load_all()
    utils.plot_TSNE(X, y)
    plt.title(fig_name)
    if not os.path.exists(dir_fig):
        os.makedirs(dir_fig)
    plt.savefig(dir_fig + '/' + fig_name + '.png')
Esempio n. 6
0
def compare_binary_multi():
    """
    plot a figure comparing the binary classification(access, lock) and multiclass classification(left, right, front, back, lock)
    :return: figure
    """
    dir = "log\\NY\\peps"
    pattern = r'(front|left|right|back|lock)\\\d.csv$'
    pattern_valid = r'3.csv$'
    utils.construct_set(dir, pattern, pattern_valid, filter=1)
    cm_list = list()
    labels_list = list()
    # multi classification
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()
    cm, report_temp, classes = utils.train(X_train,
                                           X_valid,
                                           y_train,
                                           y_valid,
                                           method='RF',
                                           param={
                                               "max_features": 2,
                                               "n_estimators": 100
                                           })
    cm_list.append(cm)
    labels_list.append(classes)
    # binary classification
    y_train = np.array(y_train)
    y_train[y_train != 'lock'] = 'access'
    y_valid = np.array(y_valid)
    y_valid[y_valid != 'lock'] = 'access'
    cm, report_temp, classes = utils.train(X_train,
                                           X_valid,
                                           y_train,
                                           y_valid,
                                           method='RF',
                                           param={
                                               "max_features": 2,
                                               "n_estimators": 100
                                           })
    cm_list.append(cm)
    labels_list.append(classes)
Esempio n. 7
0
def plot_comparaison_filter(fig_name):
    """
    plot a figure illustrating improved performance using RSSI unilateral correction
    :param fig_name: saved figure name
    :return: figure
    """
    dir = "log/peps mini"
    pattern = r'(internal|access|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    df_report = pd.DataFrame()
    utils.construct_set(dir, pattern, pattern_valid, filter=0)
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()
    cm, report_temp, classes = utils.train(X_train,
                                           X_valid,
                                           y_train,
                                           y_valid,
                                           method='RF',
                                           param={
                                               "max_features": 2,
                                               "n_estimators": 100
                                           })
    df_report = df_report.append(report_temp, ignore_index=True)
    utils.construct_set(dir, pattern, pattern_valid, filter=1)
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()
    cm, report_temp, classes = utils.train(X_train,
                                           X_valid,
                                           y_train,
                                           y_valid,
                                           method='RF',
                                           param={
                                               "max_features": 2,
                                               "n_estimators": 100
                                           })
    df_report = df_report.append(report_temp, ignore_index=True)
    df_report.index = ['Original', 'Corrected']
    df_report.plot(kind='bar', rot=0, ylim=(0.6, 1))
    plt.title(fig_name)
    if not os.path.exists(dir_fig):
        os.makedirs(dir_fig)
    plt.savefig(dir_fig + '/' + fig_name + '.png')
Esempio n. 8
0
import utils

if __name__ == '__main__':

    dir = "log/peps normal"
    pattern = r'(left|right|front|back|start|trunk|lock)\\\d{1,2}.csv$'
    pattern_valid = r'(3|6|9|12).csv$'
    utils.construct_set(dir, pattern, pattern_valid, filter=1)
    utils.save_to_csv()

    X, y = utils.load_all()
    X_train, X_valid, y_train, y_valid = utils.load_train_valid()

    # compare train result
    methods = ["Logistic", "LDA", "QDA", "KNN", "SVM", "RF", "GBM", "MLP"]
    params = [
        None, None, None, {
            "n_neighbors": 10
        }, {
            "C": 0.25,
            "gamma": 0.5
        }, {
            "max_features": 2,
            "n_estimators": 100
        }, {
            "n_estimators": 400,
            "max_depth": 3
        }, {
            "hidden_layer_sizes": (16, 8)
        }
    ]