コード例 #1
0
def performance_dump(PARAMS,
                     PtdLabels,
                     GroundTruths,
                     labels,
                     info='',
                     fName_suffix=''):
    ConfMat, precision, recall, fscore = misc.getPerformance(
        PtdLabels, GroundTruths, labels)
    accuracy = np.round(np.sum(np.diag(ConfMat)) / np.sum(ConfMat), 4)
    print('Total data performance: ', fscore)
    print(ConfMat)

    if len(labels) == 2:
        classnames = ['neg', 'pos']
    else:
        classnames = ['mu', 'sp', 'spmu']

    res_dict = {}
    res_dict['0'] = 'feature_name:' + PARAMS['featName'][PARAMS['Model']]
    res_dict['1'] = 'model:' + PARAMS['Model']
    ln = 2
    if not info == '':
        res_dict[str(ln)] = info
        ln += 1
    res_dict[str(ln)] = 'loss:--'
    ln += 1
    res_dict[str(ln)] = 'accuracy:' + str(accuracy)
    ln += 1
    res_dict[str(ln)] = 'Prec_' + classnames[0] + ':' + str(precision[0])
    ln += 1
    res_dict[str(ln)] = 'Rec_' + classnames[0] + ':' + str(recall[0])
    ln += 1
    res_dict[str(ln)] = 'F1_' + classnames[0] + ':' + str(fscore[0])
    ln += 1
    res_dict[str(ln)] = 'Prec_' + classnames[1] + ':' + str(precision[1])
    ln += 1
    res_dict[str(ln)] = 'Rec_' + classnames[1] + ':' + str(recall[1])
    ln += 1
    res_dict[str(ln)] = 'F1_' + classnames[1] + ':' + str(fscore[1])
    if len(labels) == 3:
        ln += 1
        res_dict[str(ln)] = 'Prec_' + classnames[2] + ':' + str(precision[2])
        ln += 1
        res_dict[str(ln)] = 'Rec_' + classnames[2] + ':' + str(recall[2])
        ln += 1
        res_dict[str(ln)] = 'F1_' + classnames[2] + ':' + str(fscore[2])
    ln += 1
    res_dict[str(ln)] = 'F1_avg:' + str(np.round(np.mean(fscore), 4))
    misc.print_results(PARAMS, fName_suffix, res_dict)
コード例 #2
0
                        feature_indexes_dict_keys[classifier_num] + '_iter' +
                        str(numIter))

                kwargs = {
                    '0': 'epochs:' + str(Train_Params['epochs']),
                    '1': 'batch_size:' + str(Train_Params['batch_size']),
                    '2': 'learning_rate:' + str(Train_Params['learning_rate']),
                    '3':
                    'training_time:' + str(Train_Params['trainingTimeTaken']),
                    '4': 'loss:' + str(Test_Params['loss']),
                    '5': 'performance:' + str(Test_Params['performance']),
                    '6': 'F_score_mu:' + str(Test_Params['fscore'][0]),
                    '7': 'F_score_sp:' + str(Test_Params['fscore'][1]),
                    '8': 'F_score_avg:' + str(Test_Params['fscore'][2]),
                }
                misc.print_results(PARAMS, **kwargs)
                print('Performance: ', Test_Params['performance'],
                      Test_Params['ConfMat'])
                print('Avg. F1-score: ', Test_Params['fscore'][-1])
                print('Training time taken: ',
                      Train_Params['trainingTimeTaken'],
                      Test_Params['testingTimeTaken'])

            elif PARAMS['clFunc'] == 'SVM-Ensemble':
                ''' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ '''
                PARAMS['modelName'] = PARAMS[
                    'opDir'] + '/SVM_' + feature_indexes_dict_keys[
                        classifier_num] + '_iter' + str(
                            PARAMS['iter']) + '.pkl'
                Train_Params, Test_Params = SVM.grid_search_svm(
                    PARAMS, data_dict_Classifier_Part)
コード例 #3
0
                i = 0
                if  (PARAMS['clFunc']=='DNN-Ensemble') or (PARAMS['clFunc']=='CNN-Ensemble'):
                    for key in Ensemble_Test_Params['individual_performances'].keys():
                        kwargs[str(i+1)] = key + ':' + str(Ensemble_Test_Params['individual_performances'][key]['fscore'][2])
                        i += 1
                elif  (PARAMS['clFunc']=='SVM-Ensemble') or (PARAMS['clFunc']=='NB-Ensemble'):
                    for key in All_Test_Params.keys():
                        kwargs[str(i+1)] = key + ':' + str(All_Test_Params[key]['fscore'][2])
                        i += 1
                    
                kwargs['8'] = 'Accuracy:'+str(Ensemble_Test_Params['accuracy_Ensemble'])
                kwargs['9'] = 'F_score_mu:'+str(Ensemble_Test_Params['fscore_Ensemble'][0])
                kwargs['10'] = 'F_score_sp:'+str(Ensemble_Test_Params['fscore_Ensemble'][1])
                kwargs['11'] = 'F_score_avg:'+str(Ensemble_Test_Params['fscore_Ensemble'][2])
                
                misc.print_results(PARAMS, '', **kwargs)
            else:
                if  PARAMS['clFunc']=='DNN-Ensemble':
                    Ensemble_Test_Params = DNN.test_dnn_ensemble_noise(PARAMS, Ensemble_Train_Params)
                elif PARAMS['clFunc']=='CNN-Ensemble':
                    Ensemble_Test_Params = CNN.test_cnn_ensemble_noise(PARAMS, Ensemble_Train_Params)

                kwargs = {}
                kwargs['0'] = 'feature_type:'+feature_type
                i = 1
                for dB in PARAMS['noise_dB_range']:
                    kwargs[str(i)] = str(dB)+'dB_ACC:'+str(Ensemble_Test_Params['accuracy_Ensemble'][dB])
                    kwargs[str(i+1)] = str(dB)+'dB_AVG_F1:'+str(Ensemble_Test_Params['fscore_Ensemble'][dB][2])
                    i += 2
                misc.print_results(PARAMS, 'noise_exp', **kwargs)
        
コード例 #4
0
            print('Test_Params: ', Test_Params.keys())
            print(Test_Params['precision_annot'], Test_Params['recall_annot'],
                  Test_Params['fscore_annot'])

            res_dict = {}
            res_dict['0'] = 'SMR:Annot'
            res_dict['1'] = Test_Params['metric_names'][0] + ':' + str(
                Test_Params['metrics'][0])
            res_dict['2'] = Test_Params['metric_names'][1] + ':' + str(
                Test_Params['metrics'][1])
            res_dict['3'] = 'Prec_mu:' + str(Test_Params['precision_annot'][0])
            res_dict['4'] = 'Rec_mu:' + str(Test_Params['recall_annot'][0])
            res_dict['5'] = 'F1_mu:' + str(Test_Params['fscore_annot'][0])
            res_dict['6'] = 'Prec_sp:' + str(Test_Params['precision_annot'][1])
            res_dict['7'] = 'Rec_sp:' + str(Test_Params['recall_annot'][1])
            res_dict['8'] = 'F1_sp:' + str(Test_Params['fscore_annot'][1])
            if len(PARAMS['classes']) == 3:
                res_dict['9'] = 'Prec_spmu:' + str(
                    Test_Params['precision_annot'][2])
                res_dict['10'] = 'Rec_spmu:' + str(
                    Test_Params['recall_annot'][2])
                res_dict['11'] = 'F1_spmu:' + str(
                    Test_Params['fscore_annot'][2])
            misc.print_results(PARAMS, '', res_dict)

            Train_Params = None
            Test_Params = None

            if PARAMS['use_GPU']:
                reset_TF_session()