Пример #1
0
def computeGem2(gtf,pef ,debug=1):
    groundtruth_dataset=datatool.seddata.SED(gtf,'gt',None)
    groundtruth_dataset.load()
    ######
    pred=datatool.seddata.SED(pef,'',groundtruth_dataset)
    pred.load()
    ########
    evalres={0:{'test':general.utils.Data('SED')}}
    evalres[0]['test'].real_events=groundtruth_dataset.activity_events
    evalres[0]['test'].pred_events=pred.activity_events
    #######
    res=an.mergeEvals(groundtruth_dataset,evalres,metric.Metrics.GEM())
    
    import pandas as pd
    from IPython.display import display, HTML


    compact=pd.DataFrame(columns=['avg'])
    actres={}
    for k in res:
        if(k==0):continue
        if(k=='avg'):
            a='avg'
            actres[k]={e:res['avg'][e] for e in res['avg']}    
        else:
            a=groundtruth_dataset.activities[k]
            actres[k]={e:res[k]['avg'][e]   for e in res[k]['avg']}    
            
        print('act=',a,'==============================')
    #                 print(actres[k])
        compact.loc[a]= None
        if(len(actres[k])==0):
            print('No Eval')
        else:
            df2=pd.DataFrame(actres[k]).round(2)
            for c in df2.columns: 
                if not(c in compact.columns):compact[c]=None
                compact.loc[a][c]=df2.loc['f1'][c]
            if(debug):display(HTML(df2.to_html()))
    
    # df2=pd.DataFrame(res['avg']).round(2)
    # for c in df2.columns: 
    #     if not(c in compact.columns):compact[c]=None
    #     compact.loc[a][c]=df2.loc['f1'][c]
    compact['avg']=compact.mean(axis=1)
    if(debug):
        display(HTML(df2.to_html()))
        display(HTML(compact.to_html()))
        vs.plotJoinMetric({'eval':res},[k for k in res],groundtruth_dataset.activities_map)
    return compact
Пример #2
0
        def compare(files=widgets.SelectMultiple(options=result_analyse.
                                                 resultloader.get_runs_summary(
                                                     dataset),
                                                 description='Files',
                                                 layout=Layout(
                                                     width='100%',
                                                     height='180px')),
                    metric=metrics,
                    titles="title1,title2"):

            run_info = {}
            dataset = {}
            evalres = {}
            res = {}
            titles = titles.split(',')
            if (len(titles) != len(files)):
                print('Titles are not correct. use files names instead')
                titles = files
            print(files)
            for i, file in enumerate(files):
                print(i, file)
                t = titles[i]
                run_info[t], dataset[t], evalres[t] = utils.loadState(file)
                #             print(evalres[t])
                #                 for i in evalres[t]:
                #                     evalres[t][i]['test'].Sdata=None

                dataset[t].sensor_events = None
                res[t] = an.mergeEvals(dataset[t], evalres[t], metric)
            res = {t: res[t] for t in sorted(res.keys())}
            import pandas as pd
            from IPython.display import display, HTML

            actres = {}
            for k in dataset[t].activities_map:
                if (k == 0): continue
                actres[k] = {(m, e): res[m][k]['avg'][e]
                             for m in res for e in res[m][k]['avg']}
                print('act=', k, '==============================')
                #                 print(actres[k])
                if (len(actres[k]) == 0):
                    print('No Eval')
                else:
                    df2 = pd.DataFrame(actres[k]).round(2)
                    display(HTML(df2.to_html()))
            vs.plotJoinMetric(res, [k for k in res[t]],
                              dataset[t].activities_map)
Пример #3
0
            if(term in x):
                result[terms[term]]=float(x[len(term)+3:])
       
    return result
    
def eval_my_metric(real,pred,debug=0):
    result={}
    result['Tatbul(a=0)']=call(real,pred,0,beta=1,alpha=0,gamma='one',delta='flat')
    result['Existence(a=1)']=call(real,pred,0,beta=1,alpha=1,gamma='one',delta='udf_delta')
    result['Cardinality(γ=reci)']=call(real,pred,0,beta=1,alpha=0,gamma='reciprocal',delta='udf_delta')
    result['Positional(δ=flat)']=call(real,pred,0,beta=1,alpha=0,gamma='one',delta='flat')
    
    return result
    # print(a)
    # # os.system(f'/workspace/TSAD-Evaluator/src/evaluate -v -tn {real} {pred} 1 0 one flat flat')
    # print(f'/workspace/TSAD-Evaluator/src/evaluate -v -tn {real} {pred} 1 0 one flat flat');

if __name__ == "__main__":
    import result_analyse.resultloader
    import result_analyse.kfold_analyse as an
    # import metric.MyMetric as mymetric
    import metric.TatbulMetric as mymetric
    import general.utils as utils
    
    run_info,dataset,evalres=utils.loadState('201104_19-48-50-Home1')
    #             print(evalres[t])
    for i in evalres:
        evalres[i]['test'].Sdata=None
    dataset.sensor_events=None
    res=an.mergeEvals(dataset,evalres,mymetric)
    print(res)
Пример #4
0
            evalres={}
            res={}
            titles=titles.split(',')
            if(len(titles)!=len(files)):
                print('Titles are not correct. use files names instead')
                titles=files
            print(files)
            for i, file in enumerate(files):
                print(i,file)
                t=titles[i]
                run_info[t],dataset[t],evalres[t]=utils.loadState(file)
    #             print(evalres[t])
#                 for i in evalres[t]:
#                     evalres[t][i]['test'].Sdata=None
                    
                dataset[t].sensor_events=None
                res[t]=an.mergeEvals(dataset[t],evalres[t],metric)
            res={t:res[t] for t in sorted(res.keys())}
            import pandas as pd
            
            actres={}
            for k in dataset[t].activities_map:
                if(k==0):continue
                actres[k]={m:res[m][k]['avg'] for m in res}    
                print('act=',k,'==============================')
                print(actres[k])
                if(len(actres[k])==0):
                    print('No Eval')
                else:
                    df2=pd.DataFrame([actres[k]])
                    print(df2)