Beispiel #1
0
def mmr_eval_angle(yTest,yPred):

#   accuracy=0.0
#   precision=0.0
#   recall=0.0

  ## (m,n)=yTest.shape

  cEvaluation=mmr_base_classes.cls_evaluation()

  xdot=np.sum(yTest*yPred,axis=1)
  xnormt=np.sqrt(np.sum(yTest**2,axis=1))
  xnormt=xnormt+(xnormt==0)
  xnormp=np.sqrt(np.sum(yPred**2,axis=1))
  xnormp=xnormp+(xnormp==0)
  xangle=xdot/(xnormt*xnormp)
  iacc=np.where(xangle>1.0)
  xangle[iacc]=1.0
  iacc=np.where(xangle<-1.0)
  xangle[iacc]=-1.0
  xangle=np.arccos(xangle)
  accuracy=np.sqrt(np.mean(xangle**2))

  cEvaluation.accuracy=accuracy
  cEvaluation.precision=0
  cEvaluation.recall=0
  cEvaluation.f1=0

  cEvaluation.confusion=np.zeros((2,2))
  cEvaluation.confusion[0,0]=0
  cEvaluation.confusion[0,1]=0
  cEvaluation.confusion[1,0]=0
  cEvaluation.confusion[1,1]=0
  
  return(cEvaluation)
Beispiel #2
0
def mmr_eval_angle(yTest, yPred):

    #   accuracy=0.0
    #   precision=0.0
    #   recall=0.0

    ## (m,n)=yTest.shape

    cEvaluation = mmr_base_classes.cls_evaluation()

    xdot = np.sum(yTest * yPred, axis=1)
    xnormt = np.sqrt(np.sum(yTest**2, axis=1))
    xnormt = xnormt + (xnormt == 0)
    xnormp = np.sqrt(np.sum(yPred**2, axis=1))
    xnormp = xnormp + (xnormp == 0)
    xangle = xdot / (xnormt * xnormp)
    iacc = np.where(xangle > 1.0)
    xangle[iacc] = 1.0
    iacc = np.where(xangle < -1.0)
    xangle[iacc] = -1.0
    xangle = np.arccos(xangle)
    accuracy = np.sqrt(np.mean(xangle**2))

    cEvaluation.accuracy = accuracy
    cEvaluation.precision = 0
    cEvaluation.recall = 0
    cEvaluation.f1 = 0

    cEvaluation.confusion = np.zeros((2, 2))
    cEvaluation.confusion[0, 0] = 0
    cEvaluation.confusion[0, 1] = 0
    cEvaluation.confusion[1, 0] = 0
    cEvaluation.confusion[1, 1] = 0

    return (cEvaluation)
Beispiel #3
0
def mmr_eval_real(yTest,yPred):

  ## (m,n)=yTest.shape

  cEvaluation=mmr_base_classes.cls_evaluation()

  ## RMSE
  cEvaluation.accuracy=np.sqrt(np.mean(np.sum((yTest-yPred)**2,axis=1)))  

  cEvaluation.precision=0
  cEvaluation.recall=0
  cEvaluation.f1=0

  cEvaluation.confusion=np.zeros((2,2))
  return(cEvaluation)
Beispiel #4
0
def mmr_eval_real(yTest, yPred):

    ## (m,n)=yTest.shape

    cEvaluation = mmr_base_classes.cls_evaluation()

    ## RMSE
    cEvaluation.accuracy = np.sqrt(np.mean(np.sum((yTest - yPred)**2, axis=1)))

    cEvaluation.precision = 0
    cEvaluation.recall = 0
    cEvaluation.f1 = 0

    cEvaluation.confusion = np.zeros((2, 2))
    return (cEvaluation)
Beispiel #5
0
def mmr_eval_binvector(yTest,yPred):

  accuracy=0.0
  precision=0.0
  recall=0.0

  (m,n)=yTest.shape

  cEvaluation=mmr_base_classes.cls_evaluation()
  cEvaluation.classconfusion=np.zeros((n,n))

  ## class confusion
  for i in range(m):
    for j in range(n):
      for k in range(n):
        if yTest[i,j]>0 and yPred[i,k]>0:
          cEvaluation.classconfusion[k,j]+=1

  xtp=(yTest>0)*(yPred>0)
  xfp=(yTest<=0)*(yPred>0)
  xfn=(yTest>0)*(yPred<=0)
  xtn=(yTest<=0)*(yPred<=0)
  
  xctp=np.sum(xtp,axis=0)
  xcfp=np.sum(xfp,axis=0)
  xcfn=np.sum(xfn,axis=0)
  ## xctn=np.sum(xtn,axis=0)


  cEvaluation.cprecision=np.zeros(n)
  cEvaluation.crecall=np.zeros(n)
  cEvaluation.cf1=np.zeros(n)
  for i in range(n):
    if xctp[i]+xcfp[i]>0:
      cEvaluation.cprecision[i]=xctp[i]/(xctp[i]+xcfp[i])
    else:
      cEvaluation.cprecision[i]=0

    if xctp[i]+xcfn[i]>0:
      cEvaluation.crecall[i]=xctp[i]/(xctp[i]+xcfn[i])
    else:
      cEvaluation.crecall[i]=0

    if cEvaluation.cprecision[i]+cEvaluation.crecall[i]>0:
      cEvaluation.cf1[i]=2*cEvaluation.cprecision[i]*cEvaluation.crecall[i] \
                       /(cEvaluation.cprecision[i]+cEvaluation.crecall[i])
    else:
      cEvaluation.cf1[i]=0

  tp=np.sum(xtp)
  fp=np.sum(xfp)
  fn=np.sum(xfn)
  tn=np.sum(xtn)

  for i in range(m):
    if np.sum(yTest[i]*yPred[i])==n:
      accuracy+=1.0

  if tp+fp>0:
    precision=tp/(tp+fp)
  else:
    precision=0
    
  if tp+fn>0:
    recall=tp/(tp+fn)
  else:
    recall=0
  
  if precision+recall>0:
    f1=2*precision*recall/(precision+recall)
  else:
    f1=0

  cEvaluation.accuracy=accuracy/m
  cEvaluation.precision=precision
  cEvaluation.recall=recall
  cEvaluation.f1=f1

  cEvaluation.confusion=np.zeros((2,2))
  cEvaluation.confusion[0,0]=tp
  cEvaluation.confusion[0,1]=fp
  cEvaluation.confusion[1,0]=fn
  cEvaluation.confusion[1,1]=tn


  
  return(cEvaluation)
Beispiel #6
0
def mmr_eval_binvector(yTest, yPred):

    accuracy = 0.0
    precision = 0.0
    recall = 0.0

    (m, n) = yTest.shape

    cEvaluation = mmr_base_classes.cls_evaluation()
    cEvaluation.classconfusion = np.zeros((n, n))

    ## class confusion
    for i in range(m):
        for j in range(n):
            for k in range(n):
                if yTest[i, j] > 0 and yPred[i, k] > 0:
                    cEvaluation.classconfusion[k, j] += 1

    xtp = (yTest > 0) * (yPred > 0)
    xfp = (yTest <= 0) * (yPred > 0)
    xfn = (yTest > 0) * (yPred <= 0)
    xtn = (yTest <= 0) * (yPred <= 0)

    xctp = np.sum(xtp, axis=0)
    xcfp = np.sum(xfp, axis=0)
    xcfn = np.sum(xfn, axis=0)
    ## xctn=np.sum(xtn,axis=0)

    cEvaluation.cprecision = np.zeros(n)
    cEvaluation.crecall = np.zeros(n)
    cEvaluation.cf1 = np.zeros(n)
    for i in range(n):
        if xctp[i] + xcfp[i] > 0:
            cEvaluation.cprecision[i] = xctp[i] / (xctp[i] + xcfp[i])
        else:
            cEvaluation.cprecision[i] = 0

        if xctp[i] + xcfn[i] > 0:
            cEvaluation.crecall[i] = xctp[i] / (xctp[i] + xcfn[i])
        else:
            cEvaluation.crecall[i] = 0

        if cEvaluation.cprecision[i] + cEvaluation.crecall[i] > 0:
            cEvaluation.cf1[i]=2*cEvaluation.cprecision[i]*cEvaluation.crecall[i] \
                             /(cEvaluation.cprecision[i]+cEvaluation.crecall[i])
        else:
            cEvaluation.cf1[i] = 0

    tp = np.sum(xtp)
    fp = np.sum(xfp)
    fn = np.sum(xfn)
    tn = np.sum(xtn)

    for i in range(m):
        if np.sum(yTest[i] * yPred[i]) == n:
            accuracy += 1.0

    if tp + fp > 0:
        precision = tp / (tp + fp)
    else:
        precision = 0

    if tp + fn > 0:
        recall = tp / (tp + fn)
    else:
        recall = 0

    if precision + recall > 0:
        f1 = 2 * precision * recall / (precision + recall)
    else:
        f1 = 0

    cEvaluation.accuracy = accuracy / m
    cEvaluation.precision = precision
    cEvaluation.recall = recall
    cEvaluation.f1 = f1

    cEvaluation.confusion = np.zeros((2, 2))
    cEvaluation.confusion[0, 0] = tp
    cEvaluation.confusion[0, 1] = fp
    cEvaluation.confusion[1, 0] = fn
    cEvaluation.confusion[1, 1] = tn

    return (cEvaluation)
Beispiel #7
0
def mvm_eval(ieval_type,nrow,xdatacls,ZrowT):
  """
  Compute the gloabal error measures of the predition
  
  Input:
  ieval_type        error measures =0 0/1 loss,=1 RMSE, =2 MAE error
  nrow         number of rows
  datacls           class of features kernels
  ZrowT        predicted values, list indexed by row index,
                    and each list element contains the prediction
                    of all column elements belonging to that row
  Output:
  deval             accuracy in the corresponding error measure
  """
  if xdatacls.testontrain==0:
    xranges_tes=xdatacls.xranges_rel_test
    xdata_tes=xdatacls.xdata_tes
  else:
    xranges_tes=xdatacls.xranges_rel
    xdata_tes=xdatacls.xdata_tra
    
  txdim=xdata_tes[2].shape
  if len(txdim)==1:
    nxdim=1
  else:
    nxdim=txdim[1]

  cEval=mmr_base_classes.cls_evaluation()
  icandidate_w=0
  icandidate_b=0
  
  if ieval_type==0:  ## 0/1 loss
    Y0=xdatacls.Y0
    ncategory=len(Y0)

    nall=0
    nright=0
    tp=0    ## true positive
    tn=0    ## true negative
    fp=0    ## false positive
    fn=0    ## false negative
    xworst=10**3
    xbest=-xworst

    xconfusion=np.zeros((ncategory+1,ncategory+1))
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        xobserved=xdata_tes[2][istart_tes:istart_tes+nlength_tes]
        xpredicted=ZrowT[irow][0]+0.0
        for i in range(nlength_tes):
          if xdatacls.category==0 or xdatacls.category==3:    ## rank
            ipredicted=Y0[np.abs(Y0-xpredicted[i]).argmin()]
          else:
            ipredicted=xpredicted[i]
          xconfusion[xobserved[i],ipredicted]+=1
          if xdatacls.ibinary==0:
            if ipredicted==xobserved[i]:
              nright+=1
          else:  ## Y0=[-1,+1]
            if ipredicted==1:
              if xobserved[i]==1:
                tp+=1
              else:
                fp+=1
            else:
              if xobserved[i]==1:
                fn+=1
              else:
                tn+=1
          
          xconfidence=ZrowT[irow][2][i]
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
            
        nall+=nlength_tes

    if nall==0:
      nall=1
    deval=float(nright)/nall
    if xdatacls.ibinary==0:
      cEval.accuracy=deval
    else:
      cEval.accuracy=(tp+tn)/nall
    cEval.xconfusion=xconfusion
    cEval.deval=cEval.accuracy
    
    if tp+fp>0:
      cEval.precision=float(tp)/(tp+fp)
    else:
      cEval.precision=0.0
    if tp+fn>0:
      cEval.recall=float(tp)/(tp+fn)
    else:
      cEval.recall=0.0

    if cEval.recall+cEval.precision>0:
      cEval.f1=2*cEval.precision*cEval.recall/(cEval.recall+cEval.precision)
    else:
      cEval.f1=0.0
      
    
  elif ieval_type==1:     # RMSE root mean square error
    nall=0
    nright=0
    xworst=10**3
    xbest=-xworst
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        nright+=np.sum((ZrowT[irow][0] \
                        -xdata_tes[2][istart_tes:istart_tes+nlength_tes])**2)
        for i in range(nlength_tes):
          if nxdim==1:
            xconfidence=ZrowT[irow][2][i]**2  ## raw prediction
          else:
            xconfidence=np.mean(ZrowT[irow][2][i])**2  ## raw prediction
            
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
        nall+=nlength_tes*nxdim
        
    if nall==0:
      nall=1
    deval=np.sqrt(float(nright)/nall)
    cEval.rmse=deval
    cEval.deval=deval

  elif ieval_type==2:   # MAE mean absolute error
    nall=0
    nright=0
    xworst=10**3
    xbest=-xworst
    lpredict=[]
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        ## nright+=np.sum(np.abs(np.exp(ZrowT[irow][0]) \
        ##            -np.exp(xdata_tes[2][istart_tes:istart_tes+nlength_tes])))
        nright+=np.sum(np.abs(ZrowT[irow][0] \
                   -xdata_tes[2][istart_tes:istart_tes+nlength_tes]))
        for i in range(nlength_tes):
          if nxdim==1:
            xconfidence=ZrowT[irow][2][i]**2  ## raw prediction
          else:
            xconfidence=np.mean(ZrowT[irow][2][i])**2  ## raw prediction
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
        nall+=nlength_tes*nxdim
        
    if nall==0:
      nall=1
    deval=float(nright)/nall
    cEval.mae=deval
    cEval.deval=deval
    cEval.accuracy=deval

  elif ieval_type==3:   # median absolute error
    nall=0
    nright=0
    xworst=10**3
    xbest=-xworst
    lpredict=[]
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        lpredict.extend(np.abs(ZrowT[irow][0] \
                       -xdata_tes[2][istart_tes:istart_tes+nlength_tes]))
        for i in range(nlength_tes):
          xconfidence=np.abs(ZrowT[irow][2][i]) ## raw prediction
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
        nall+=nlength_tes
        
    if nall==0:
      nall=1
    deval=np.median(np.array(lpredict))
    cEval.mae=deval
    cEval.deval=deval
    cEval.accuracy=deval
    # cEval.xpredict=np.array(lpredict)

  if ieval_type==10:  ## \{0,1,2,3\}^n
    nall=0
    nright=0
    tp=0    ## true positive
    tn=0    ## true negative
    fp=0    ## false positive
    fn=0    ## false negative
    xworst=10**3
    xbest=-xworst

    ndim=xdatacls.YKernel.ndim
    valrange=xdatacls.YKernel.valrange
    nval=max(valrange)+1
    tdim=[nval]*ndim
    xconfusion=np.zeros((ndim,nval,nval))
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        xobserved=xdata_tes[2][istart_tes:istart_tes+nlength_tes]
        xpredicted=ZrowT[irow][0].astype(int)
        ixobserved=np.unravel_index(xobserved,tdim)
        ixpredicted=np.unravel_index(xpredicted,tdim)
        for i in range(nlength_tes):
          for j in range(ndim):
            xconfusion[j,ixobserved[j][i],ixpredicted[j][i]]+=1
          ## !!!!!! should be changed 
          xconfidence=ZrowT[irow][2][i]
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
            
        nall+=nlength_tes

    cEval.accuracy=0
    cEval.xconfusion3=xconfusion

    ndim=xconfusion.shape[0]
    (accuracy_full,accuracy_no0)=confusion_toys(xconfusion)    
    cEval.accuracy=accuracy_no0[ndim]
    cEval.accuracy_full=accuracy_full
    cEval.accuracy_no0=accuracy_no0
    
    cEval.deval=cEval.accuracy  
    if tp+fp>0:
      cEval.precision=float(tp)/(tp+fp)
    else:
      cEval.precision=0.0
    if tp+fn>0:
      cEval.recall=float(tp)/(tp+fn)
    else:
      cEval.recall=0.0

    if cEval.recall+cEval.precision>0:
      cEval.f1=2*cEval.precision*cEval.recall/(cEval.recall+cEval.precision)
    else:
      cEval.f1=0.0

  ## sign comparison of the residues of test and prediction
  elif ieval_type==11:  ## 0/1 loss
    Y0=xdatacls.Y0
    ncategory=len(Y0)

    nall=0
    nright=0
    tp=0    ## true positive
    tn=0    ## true negative
    fp=0    ## false positive
    fn=0    ## false negative
    xworst=10**3
    xbest=-xworst

    xconfusion=np.zeros((ncategory+1,ncategory+1))
    for irow in range(nrow):
      if xranges_tes[irow,1]>0:
        istart_tes=xranges_tes[irow,0]
        nlength_tes=xranges_tes[irow,1]
        xobserved=xdata_tes[2][istart_tes:istart_tes+nlength_tes]
        ## xpredicted=ZrowT[irow][0]+0.0
        xobserved=np.sign(xobserved-ZrowT[irow][0]+ZrowT[irow][1])
        xpredicted=np.sign(ZrowT[irow][1]+0.0)
        
        for i in range(nlength_tes):
          if xdatacls.category==0 or xdatacls.category==3:    ## rank
            ipredicted=Y0[np.abs(Y0-xpredicted[i]).argmin()]
          else:
            ipredicted=xpredicted[i]
          ## we have values -1,0,+1 to make them be correct index 1 is added
          xconfusion[xobserved[i]+1,ipredicted+1]+=1
          if xdatacls.ibinary==0:
            if ipredicted==xobserved[i]:
              nright+=1
          else:  ## Y0=[-1,+1]
            if ipredicted==1:
              if xobserved[i]==1:
                tp+=1
              else:
                fp+=1
            else:
              if xobserved[i]==1:
                fn+=1
              else:
                tn+=1
          
          xconfidence=ZrowT[irow][2][i]
          if xconfidence<xworst:
            xworst=xconfidence
            icandidate_w=istart_tes+i
          if xconfidence>xbest:
            xbest=xconfidence
            icandidate_b=istart_tes+i
            
        nall+=nlength_tes

    if nall==0:
      nall=1
    deval=float(nright)/nall
    if xdatacls.ibinary==0:
      cEval.accuracy=deval
    else:
      cEval.accuracy=(tp+tn)/nall
    cEval.xconfusion=xconfusion
    cEval.deval=cEval.accuracy
    
    if tp+fp>0:
      cEval.precision=float(tp)/(tp+fp)
    else:
      cEval.precision=0.0
    if tp+fn>0:
      cEval.recall=float(tp)/(tp+fn)
    else:
      cEval.recall=0.0

    if cEval.recall+cEval.precision>0:
      cEval.f1=2*cEval.precision*cEval.recall/(cEval.recall+cEval.precision)
    else:
      cEval.f1=0.0
      
    
      
  return(cEval,icandidate_w,icandidate_b)