Ejemplo n.º 1
0
def load_run_table(file):
    runtable=utils.loadState(file,'runtable1',raiseException=False)
    if(runtable is None):
        res=utils.loadState(file)
        if(len(res)!=3):
            #raise Error
            logger.warn('File %s can not import'%file)
            return
        [run_info,datasetdscr,evalres]=res
        runtable=getRunTable(run_info,datasetdscr,evalres)
        utils.saveState(runtable,file,'runtable1')
    return runtable
Ejemplo n.º 2
0
def load_run_info(file):
    runinfo=utils.loadState(file,'info',raiseException=False)
    if(runinfo is None):
        try:
            res=utils.loadState(file)
            if(len(res)!=3):
                #raise Error
                logger.warn('File %s can not import'%file)
                return
            [run_info,datasetdscr,evalres]=res
            runinfo=getRunInfo(run_info,datasetdscr,evalres)
            utils.saveState([runinfo],file,'info')
            return runinfo
        except:
            logger.warn('File %s can not import'%file)
            return
    return runinfo[0]
Ejemplo n.º 3
0
        def compare(files=widgets.SelectMultiple(options=result_analyse.
                                                 resultloader.get_runs_summary(
                                                     dataset),
                                                 description='Files',
                                                 layout=Layout(
                                                     width='100%',
                                                     height='180px')),
                    metric=metrics,
                    titles="title1,title2"):

            run_info = {}
            dataset = {}
            evalres = {}
            res = {}
            titles = titles.split(',')
            if (len(titles) != len(files)):
                print('Titles are not correct. use files names instead')
                titles = files
            print(files)
            for i, file in enumerate(files):
                print(i, file)
                t = titles[i]
                run_info[t], dataset[t], evalres[t] = utils.loadState(file)
                #             print(evalres[t])
                #                 for i in evalres[t]:
                #                     evalres[t][i]['test'].Sdata=None

                dataset[t].sensor_events = None
                res[t] = an.mergeEvals(dataset[t], evalres[t], metric)
            res = {t: res[t] for t in sorted(res.keys())}
            import pandas as pd
            from IPython.display import display, HTML

            actres = {}
            for k in dataset[t].activities_map:
                if (k == 0): continue
                actres[k] = {(m, e): res[m][k]['avg'][e]
                             for m in res for e in res[m][k]['avg']}
                print('act=', k, '==============================')
                #                 print(actres[k])
                if (len(actres[k]) == 0):
                    print('No Eval')
                else:
                    df2 = pd.DataFrame(actres[k]).round(2)
                    display(HTML(df2.to_html()))
            vs.plotJoinMetric(res, [k for k in res[t]],
                              dataset[t].activities_map)
Ejemplo n.º 4
0
def get(key, valf):
    hkey = hashkey(key)
    try:
        val = utils.loadState(cachefolder, hkey)
        logger.debug(f'cached file found {key} {hkey}')
    except Exception as e:

        if not (os.path.exists(f'save_data/{hkey}')):
            logger.debug(f'cached file not found {key} {hkey}')
        else:
            logger.error(f'error in cached {e}', exc_info=True)

        val = valf()
        utils.saveState(val, cachefolder, hkey)
        with open(f'save_data/{cachefolder}/{hkey}.txt', 'w') as f:
            print(key, file=f)
            f.close()

    return val
Ejemplo n.º 5
0
def get_runs_summary2(dataset=''):
    list=os.listdir('save_data/')
    list.sort(key=lambda f:os.path.getmtime('save_data/'+f),reverse=True)
    result=[]
    for item in list:
        if dataset not in item:
               	continue
        try:
            res=utils.loadState(item,'info')
            # if(len(res)!=3):
            #     raise Error
            # [run_info,datasetdscr,evalres]=res
            # disp_name='dataset:%s date:%s %s'%(run_info['dataset'],run_info['run_date'], evalres[0].shortrunname)
            f1=res['folds'][0]['quality']['f1']
            disp_name=item+":f1="+str(f1)+"=="+res['folds'][0]['shortrunname']+"===="+str(res['folds'])
            result.append((disp_name,item))
        except:
            logger.warn('File %s can not import'%item)
#            raise
    return result
Ejemplo n.º 6
0
 def loader(file):
     if dataset not in file:
            return
     
     try:
         res=utils.load_run_info(file)
         f1=res['folds'][0]['test']['quality']['f1']
         disp_name=file+":f1="+str(f1)+"=="+res['folds'][0]['test']['shortrunname']+"===="+str(res['folds'])
         return(disp_name,file)
     except:
         try:
             res=utils.loadState(file)
             if(len(res)!=3):
                 raise Error
             [run_info,datasetdscr,evalres]=res
             disp_name='dataset:%s date:%s %s'%(run_info['dataset'],run_info['run_date'], evalres[0]['test'].shortrunname)
             return(disp_name,file)
         except Exception as e:
             logger.warn('File %s can not import'%file)
             import sys
             import traceback
             print(e, file=sys.stderr)
             traceback.print_exc()
Ejemplo n.º 7
0
            if(term in x):
                result[terms[term]]=float(x[len(term)+3:])
       
    return result
    
def eval_my_metric(real,pred,debug=0):
    result={}
    result['Tatbul(a=0)']=call(real,pred,0,beta=1,alpha=0,gamma='one',delta='flat')
    result['Existence(a=1)']=call(real,pred,0,beta=1,alpha=1,gamma='one',delta='udf_delta')
    result['Cardinality(γ=reci)']=call(real,pred,0,beta=1,alpha=0,gamma='reciprocal',delta='udf_delta')
    result['Positional(δ=flat)']=call(real,pred,0,beta=1,alpha=0,gamma='one',delta='flat')
    
    return result
    # print(a)
    # # os.system(f'/workspace/TSAD-Evaluator/src/evaluate -v -tn {real} {pred} 1 0 one flat flat')
    # print(f'/workspace/TSAD-Evaluator/src/evaluate -v -tn {real} {pred} 1 0 one flat flat');

if __name__ == "__main__":
    import result_analyse.resultloader
    import result_analyse.kfold_analyse as an
    # import metric.MyMetric as mymetric
    import metric.TatbulMetric as mymetric
    import general.utils as utils
    
    run_info,dataset,evalres=utils.loadState('201104_19-48-50-Home1')
    #             print(evalres[t])
    for i in evalres:
        evalres[i]['test'].Sdata=None
    dataset.sensor_events=None
    res=an.mergeEvals(dataset,evalres,mymetric)
    print(res)
Ejemplo n.º 8
0
        events = pd.DataFrame(events)
        if(len(events)>0):
            events = events.sort_values(['StartTime'])
        events = events.reset_index()
        events = events.drop(['index'], axis=1)
        return events


if __name__ == '__main__':
    import result_analyse.visualisation as vs
    import metric.CMbasedMetric as CMbasedMetric 
    # gt = vs.convert2event(np.array([(65,75), (157,187)])) 
    # a  = vs.convert2event(np.array([(66,73), (78,126)]))
    import general.utils as utils
    import result_analyse.visualisation as vs
    r,p=utils.loadState('ali')
    a=utils.loadState('200506_17-08-41-Home1')
    
    # r=a[2][0].real_events
    # p=a[2][0].pred_events
    vs.plotJoinAct(a[1], r, p)
    times=[]
    act_data=np.zeros((len(p),12))
    for i in range(len(p)):
        times.append({'begin':p.iloc[i]['StartTime'],'end':p.iloc[i]['EndTime']})
        act_data[i,p.iloc[i]['Activity']]=1

    com=EmptyCombiner()
    p2=com.combine2(times,act_data)
    vs.plotJoinAct(a[1], p, p2)
    vs.plotJoinAct(a[1], r, p2)
Ejemplo n.º 9
0
            files=['200515_13-42-24-VanKasteren']
            titles='a,b,c'
            metric=metric.Metrics.Tatbul()
            run_info={}
            dataset={}
            evalres={}
            res={}
            titles=titles.split(',')
            if(len(titles)!=len(files)):
                print('Titles are not correct. use files names instead')
                titles=files
            print(files)
            for i, file in enumerate(files):
                print(i,file)
                t=titles[i]
                run_info[t],dataset[t],evalres[t]=utils.loadState(file)
    #             print(evalres[t])
#                 for i in evalres[t]:
#                     evalres[t][i]['test'].Sdata=None
                    
                dataset[t].sensor_events=None
                res[t]=an.mergeEvals(dataset[t],evalres[t],metric)
            res={t:res[t] for t in sorted(res.keys())}
            import pandas as pd
            
            actres={}
            for k in dataset[t].activities_map:
                if(k==0):continue
                actres[k]={m:res[m][k]['avg'] for m in res}    
                print('act=',k,'==============================')
                print(actres[k])
Ejemplo n.º 10
0
import result_analyse.visualisation as vs
import logging
import numpy as np
from intervaltree.intervaltree import IntervalTree
from general.utils import Data 
from general import utils
logger = logging.getLogger(__file__)
from collections import defaultdict
from metric.CMbasedMetric import CMbasedMetric
from metric.event_confusion_matrix import event_confusion_matrix

savedata=utils.loadState('sepg1c')
intree=savedata.intree

segments = defaultdict(dict)
for item in intree.items():
    segments[item.begin.value<<64|item.end.value]['begin']=item.begin
    segments[item.begin.value<<64|item.end.value]['end']=item.end
    segments[item.begin.value<<64|item.end.value][item.data.gindx]=item.data
    # segments[str(item.begin)+'-'+str(item.end)][item.data.gindx]=item.data

print('finsihed')
acts=savedata.gacts[-1] #savedata.acts
f=np.zeros((len(segments),len(savedata.gacts)*len(acts)))
label=np.zeros(len(segments))
times=[]
iseg=0
for timeseg in segments:
    seg=segments[timeseg]
    # b=timeseg>>64
Ejemplo n.º 11
0
    tree.merge_equals(data_reducer=data_reducer)

    return tree


def cmTreePlot(tree):
    ptree = IntervalTree()
    rtree = IntervalTree()
    for item in tree:
        if not (item.data.R is None):
            rtree[item.begin:item.end] = item
        if not (item.data.P is None):
            ptree[item.begin:item.end] = item
    from result_analyse.visualisation import plotJoinTree
    plotJoinTree(rtree, ptree)


if __name__ == '__main__':
    import result_analyse.visualisation as vs
    import metric.CMbasedMetric as CMbasedMetric
    # gt = vs.convert2event(np.array([(65,75), (157,187)]))
    # a  = vs.convert2event(np.array([(66,73), (78,126)]))
    import general.utils as utils
    r, p = utils.loadState('ali')
    cm = event_confusion_matrix(r, p, range(11))

    print(cm)
    print(CMbasedMetric(cm, average='macro'))
    print(CMbasedMetric(cm))
Ejemplo n.º 12
0
    def result_selector(file=result_analyse.resultloader.get_runs()):
        if (file == None): return
        print('Analysing ', file)
        run_info, dataset, evalres = utils.loadState(file)
        stime = dataset.activity_events.iloc[0].StartTime
        #etime=stime+np.timedelta64(1,'D')
        etime = dataset.activity_events.iloc[-1].EndTime

        for i in range(len(evalres)):
            quality = evalres[i]['test'].quality
            print('Evalution quality fold=%d is %s' % (i, quality))
        print(len(dataset.sensor_events))

        #     vs.plot_CM(dataset,evalres)

        @interact
        def viewFold(fold=range(len(evalres))):
            @interact_manual
            def view(
                    start_date=widgets.DatePicker(value=pd.to_datetime(stime)),
                    end_date=widgets.DatePicker(value=pd.to_datetime(etime)),
                    debug=widgets.Checkbox(value=False)):
                duration = (pd.to_datetime(start_date),
                            pd.to_datetime(end_date))
                duration2 = (pd.to_datetime(start_date),
                             pd.to_datetime(start_date) +
                             pd.DateOffset(days=7))
                real_events = vs.filterTime(evalres[fold]['test'].real_events,
                                            duration)
                pred_events = vs.filterTime(evalres[fold]['test'].pred_events,
                                            duration)
                #vs.plotJoinAct(dataset,real_events,pred_events)
                acts = [p for p in dataset.activities_map]
                labels = [dataset.activities_map[p] for p in acts]
                print(acts)
                print(labels)
                vs.plotJoinAct2(real_events,
                                pred_events,
                                acts,
                                labels,
                                duration=duration2)
                #vs.plot_per_act(dataset,{'test':evalres})

                from matplotlib import pyplot as plt
                plt.rc_context(rc={'figure.max_open_warning': 0})
                import result_analyse.SpiderChart
                result_analyse.SpiderChart.radar_factory(5, frame='polygon')
                acount = len(dataset.activities_map)
                a_fig, a_ax = plt.subplots(
                    acount - 1,
                    1,
                    figsize=(10, acount * .25),
                )
                #             a_fig.tight_layout(pad=3.0)
                col = 4
                row = int(np.ceil((acount - 1.0) / float(col)))
                m_fig, m_ax = plt.subplots(row,
                                           col,
                                           figsize=(col * 3, row * 3),
                                           subplot_kw=dict(projection='radar'))
                if type(a_ax) != np.ndarray:
                    print('dddd', a_ax)
                    print(type(a_ax))
                    a_ax = np.array([a_ax])
                else:
                    m_ax = m_ax.flatten()
                for i in range(acount - 1, len(m_ax)):
                    m_ax[i].set_visible(False)

                for i in range(1, len(dataset.activities_map)):
                    #                 real_events2,pred_events2=vs.remove_gaps(real_events,pred_events,i)
                    #real_events2,pred_events2=vs.remove_gaps(real_events,pred_events,i,max_events=10)
                    real_events2, pred_events2 = real_events, pred_events
                    vs.plotJoinAct(dataset,
                                   real_events,
                                   pred_events,
                                   onlyAct=i,
                                   ax=a_ax[i - 1])
                    try:
                        #                     vs.plotJoinAct(dataset,real_events2,pred_events2,onlyAct=i,ax=a_ax[i-1])

                        vs.plotMyMetric2(dataset,
                                         real_events2,
                                         pred_events2,
                                         onlyAct=i,
                                         ax=m_ax[i - 1],
                                         debug=debug,
                                         calcne=0)

                    except Exception as e:
                        import sys
                        import traceback
                        print(e, file=sys.stderr)
                        traceback.print_exc()

                    #    print('error in ',i)
                    #vs.plotWardMetric(dataset,real_events,pred_events,onlyAct=i)
    #             vs.plotJoinMyMetric(dataset,real_events2,pred_events2,calcne=0)
    # a_fig.show()
                m_fig.tight_layout(pad=0, h_pad=-20.0, w_pad=3.0)
Ejemplo n.º 13
0
    def result_selector(file=result_analyse.resultloader.get_runs()):
        if (file == None): return
        print('Analysing ', file)
        run_info, dataset, evalres = utils.loadState(file)
        stime = dataset.activity_events.iloc[0].StartTime
        #etime=stime+np.timedelta64(1,'D')
        etime = dataset.activity_events.iloc[-1].EndTime

        for i in range(len(evalres)):
            quality = evalres[i]['test'].quality
            print('Evalution quality fold=%d is %s' % (i, quality))
        print(len(dataset.sensor_events))

        #     vs.plot_CM(dataset,evalres)

        @interact
        def viewFold(fold=range(len(evalres))):
            @interact_manual
            def view(
                    start_date=widgets.DatePicker(value=pd.to_datetime(stime)),
                    end_date=widgets.DatePicker(value=pd.to_datetime(etime)),
                    debug=widgets.Checkbox(value=False)):
                duration = (pd.to_datetime(start_date),
                            pd.to_datetime(end_date))
                duration2 = (pd.to_datetime(start_date),
                             pd.to_datetime(start_date) +
                             pd.DateOffset(days=7))
                real_events = vs.filterTime(evalres[fold]['test'].real_events,
                                            duration)
                pred_events = vs.filterTime(evalres[fold]['test'].pred_events,
                                            duration)
                #vs.plotJoinAct(dataset,real_events,pred_events)
                acts = [p for p in dataset.activities_map]
                labels = [dataset.activities_map[p] for p in acts]
                print(acts)
                print(labels)
                vs.plotJoinAct2(real_events,
                                pred_events,
                                acts,
                                labels,
                                duration=duration2)
                #vs.plot_per_act(dataset,{'test':evalres})

                from matplotlib import pyplot as plt
                plt.rc_context(rc={'figure.max_open_warning': 0})
                acount = len(dataset.activities_map)

                for i in range(1, len(dataset.activities_map)):
                    #                 real_events2,pred_events2=vs.remove_gaps(real_events,pred_events,i)
                    #real_events2,pred_events2=vs.remove_gaps(real_events,pred_events,i,max_events=10)
                    real_events2, pred_events2 = real_events, pred_events
                    # vs.plotJoinAct(dataset,real_events,pred_events,onlyAct=i,ax=a_ax[i-1])
                    try:
                        #                     vs.plotJoinAct(dataset,real_events2,pred_events2,onlyAct=i,ax=a_ax[i-1])
                        vs.plotWardMetric(dataset,
                                          real_events,
                                          pred_events,
                                          onlyAct=i)

                    except Exception as e:
                        import sys
                        import traceback
                        print(e, file=sys.stderr)
                        traceback.print_exc()
Ejemplo n.º 14
0
import numpy as np
from activity_fetcher.CookActivityFetcher import CookActivityFetcher


def calc_cm_per_s_event(dataset, evalres):
    activities = dataset.activities
    summycm = np.zeros((len(activities), len(activities)))

    for i in range(len(evalres)):
        # print(evalres[i]['test'].__dict__)
        sdata = evalres[i]['test'].Sdata
        cook = CookActivityFetcher()
        cook.precompute(sdata)
        c = 0
        for k in range(0, len(sdata.s_event_list)):
            real = cook.getActivity2(sdata.s_event_list, [k])
            while (c < len(sdata.set_window)
                   and k >= max(sdata.set_window[c])):
                c = c + 1
            if (c >= len(sdata.set_window)):
                break
            pred = sdata.label[c]
            summycm[real][pred] = summycm[real][pred] + 1
        # evalres[i]['test'].Sdata.s_event_list[evalres[i]['test'].Sdata.set_window[400][-1]][1]

    return summycm


if __name__ == '__main__':
    [run_info, datasetdscr, evalres] = utils.loadState('200515_13-22-21-Home2')
    calc_cm_per_s_event(datasetdscr, evalres)
Ejemplo n.º 15
0
def display_result(file):
    [run_info,datasetdscr,evalres]=utils.loadState(file)
    for i in range(len(evalres)):
        evaldata=evalres[i]['test']
        quality=evaldata.quality
        logger.debug('Evalution quality fold=%d is f1=%.2f acc=%.2f precision=%.2f recall=%.2f' % (i, quality.f1,quality.accuracy,quality.precision,quality.recall))