コード例 #1
0
def run(chosen_model, hp=None):
    fingers = hparams.pop('fingers')
    columns = sum(
        list(map(lambda f: list(range(6 * f, 6 * (f + 1))), fingers)), [])

    x_train = np.array(x_traing[:, :, columns])
    x_test = np.array(x_testg[:, :, columns])

    x_train = chosen_model.feature_extraction(x_train)
    x_test = chosen_model.feature_extraction(x_test)

    m = chosen_model.train(x_train, y_traing, class_count, **hp)
    fig, ax = plt.subplots(1,
                           1,
                           figsize=(1.3 * class_count, 1.3 * class_count))
    # Actual prediction results
    y_hat = chosen_model.test(m, x_test)
    cm = u_metrics.create_confusion_matrix(class_count, y_hat, y_testg)
    u_plot.plot_confusion_matrix(cm, ax=ax, cbar=False)

    class_names = [
        'Null', 'Hand sw. left', 'Hand sw. right', 'Pinch in', 'Pinch out',
        'Thumb d. tap', 'Grab', 'Ungrab', 'Page flip', 'Peace', 'Metal'
    ]
    class_namese = [''] * 10
    ax.set_xticklabels(class_names, rotation=90)
    ax.invert_yaxis()
    ax.set_yticklabels(class_names, rotation=0)
    fig.show()
コード例 #2
0
                        'dropout': dropout,
                    })
                    model.apply(init_weights)

                    trainer = Trainer(
                        gpus=1,
                        min_epochs=12,
                        max_epochs=100,
                        progress_bar_refresh_rate=0,
                    )
                    trainer.fit(model)

                    xs = model.tds[:][0]
                    ys = model.tds[:][1]
                    y_hat = common_test(model, xs.cpu().numpy())

                    fig, ax = plt.subplots(1, 1)
                    cm_train = u_metrics.create_confusion_matrix(
                        classes, y_hat, ys)
                    u_plot.plot_confusion_matrix(cm_train, title=f'All', ax=ax)
                    plt.show()
                    print('Dist: ',
                          np.histogram(ys.cpu().numpy(), bins=classes)[0])

                    acc_train, recall_train, f1_train = u_metrics.get_metrics(
                        y_hat,
                        ys.cpu().numpy())
                    print(
                        f'layers: {lstm_layers:6} hidden: {hidden:6} dout: {dropout:6} LR: {lr:6} Acc: {acc_train:6.3f}, Rec: {recall_train:6.3f}, F1: {f1_train:6.3f}'
                    )
コード例 #3
0
ファイル: evaluate.py プロジェクト: Zajozor/waveglove
def create_results(model,
                   test_f,
                   x_train,
                   x_test,
                   y_train,
                   y_test,
                   class_count,
                   dataset,
                   model_name,
                   show=False,
                   plot_y_dist=False):
    logger = get_logger()

    fig, (ax1, ax2) = plt.subplots(1,
                                   2,
                                   figsize=(1.6 * class_count,
                                            0.8 * class_count))

    # First re-test on the training data
    y_hat_train = test_f(model, x_train)
    cm_train = u_metrics.create_confusion_matrix(class_count, y_hat_train,
                                                 y_train)
    u_plot.plot_confusion_matrix(cm_train, title=f'Training', ax=ax1)

    acc_train, recall_train, f1_train = u_metrics.get_metrics(
        y_hat_train, y_train)
    add_log(
        model_name,
        f'TAcc: {acc_train:.3f} | TRec: {recall_train:.3f} | TF1: {f1_train:.3f} || ',
        newline=False)

    # Actual prediction results
    y_hat = test_f(model, x_test)
    cm = u_metrics.create_confusion_matrix(class_count, y_hat, y_test)
    u_plot.plot_confusion_matrix(cm, title=f'Test', ax=ax2)

    acc, recall, f1 = u_metrics.get_metrics(y_hat, y_test)
    add_log(model_name, f'Acc: {acc:.3f} | Rec: {recall:.3f} | F1: {f1:.3f}')

    fig.suptitle(f'Model: {model_name}, Dataset: {dataset.value}', fontsize=16)
    fig.subplots_adjust(top=.9)
    logger.experiment.add_figure('confusion_matrix', fig)
    if show:
        fig.show()

    if plot_y_dist:
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7))
        u_plot.plot_class_histogram(y_train,
                                    ax=ax1,
                                    title='Class distribution on train')
        u_plot.plot_class_histogram(y_test,
                                    ax=ax2,
                                    title='Class distribution on test')
        fig.suptitle(f'Model: {model_name}, dataset: {dataset.value}',
                     fontsize=16)
        fig.subplots_adjust(top=.9)
        logger.experiment.add_figure('y_dist', fig)
        if show:
            fig.show()

    logger.save()