Esempio n. 1
0
def evaluate_model(metrics, categorical, model, y_test, X_test):
    y_pred = model.predict(X_test, verbose=1)
    if (categorical):  ##Check this out, weird##
        y_pred_coded = (y_pred == y_pred.max(axis=1)[:, None]).astype(int)
        metric = []
        metric.append(
            ['f1score',
             f1_score(y_test, y_pred_coded, average='weighted')])
        metric.append([
            'precision',
            precision_score(y_test, y_pred_coded, average='weighted')
        ])
        metric.append(
            ['recall',
             recall_score(y_test, y_pred_coded, average='weighted')])
        metric.append(['accuracy', accuracy_score(y_test, y_pred_coded)])
        print(metric)
        metrics.append(metric)
    else:
        y_pred_coded = np.where(y_pred > 0.5, 1, 0)
        y_pred_coded = y_pred_coded.flatten()
        metric = []
        metric.append(['f1score', f1_score(y_test, y_pred_coded)])
        metric.append(['precision', precision_score(y_test, y_pred_coded)])
        metric.append(['recall', recall_score(y_test, y_pred_coded)])
        metric.append(['accuracy', accuracy_score(y_test, y_pred_coded)])
        print(metric)
        metrics.append(metric)

    return metrics, y_pred
Esempio n. 2
0
def evaluate_model(metrics, model, y_test, X_test):
    y_pred=model.predict(X_test,verbose=1)
    y_pred_coded=np.where(y_pred>0.5,1,0)
    y_pred_coded=y_pred_coded.flatten()
    metric=[]
    metric.append(['f1score',f1_score(y_test,y_pred_coded)])
    metric.append(['precision',precision_score(y_test,y_pred_coded)])
    metric.append(['recall',recall_score(y_test,y_pred_coded)])
    metric.append(['accuracy',accuracy_score(y_test,y_pred_coded)])
    metrics.append(metric)
    return metrics, y_pred
Esempio n. 3
0
def ExtraTrees(X_train, X_test, y_train, y_test):
    model = ExtraTreesClassifier(random_state=0, n_estimators=150, bootstrap=True, oob_score=True, warm_start=True)
    model.fit(X_train, y_train)
    # use the model to make predictions with the test data
    y_pred = model.predict(X_test)
    metrics = []
    metrics.append(['f1score',f1_score(y_pred, y_test)])
    #metrics.append(['precision',precision_score(y_pred, y_test)])
    #metrics.append(['recall',recall_score(y_pred, y_test)])
    metrics.append(['accuracy',accuracy_score(y_pred, y_test)])
    print(metrics) 
    return model
Esempio n. 4
0
def ExtraTrees(X_train, X_test, y_train, y_test):
    model = load('Models/exttree_inst3_round_4.joblib')
    model.fit(X_train, y_train)
    # use the model to make predictions with the test data
    y_pred = model.predict(X_test)
    metrics = []
    metrics.append(['f1score', f1_score(y_pred, y_test)])
    #metrics.append(['precision',precision_score(y_pred, y_test)])
    #metrics.append(['recall',recall_score(y_pred, y_test)])
    metrics.append(['accuracy', accuracy_score(y_pred, y_test)])
    print(metrics)
    return model
Esempio n. 5
0
def get_metrics():
    metrics = []
    # Separate losses.
    # metrics.append(rot_loss)
    # metrics.append(trans_loss)
    # Metrics.
    metrics.append(met.rot_angle_error)
    metrics.append(met.tilt_error)
    metrics.append(met.pan_error)
    metrics.append(met.roll_error)
    # metrics.append(trans_error)
    # metrics.append(trans_error_x)
    # metrics.append(trans_error_y)
    # metrics.append(trans_error_z)
    return metrics