Exemplo n.º 1
0
  def mmr_validation(self,xdatacls):
    """

    Input:
    xdatacls      data class

    Output:
    best_param    the best kernel parameters found by cross validation
                  on the split training
    """

    if self.validation_rkernel in xdatacls.dkernels:
      kernbest=xdatacls.dkernels[self.validation_rkernel].kernel_params 
    else:
      kernbest=xdatacls.XKernel[0].kernel_params

    if self.ivalid==1:
      best_param=self.mmr_validation_body(xdatacls)
    else:
      best_param=mmr_base_classes.cls_empty_class()
      best_param.c=xdatacls.penalty.c
      best_param.d=xdatacls.penalty.d
      best_param.par1=kernbest.ipar1
      best_param.par2=kernbest.ipar2

    xdatacls.penalty.c=best_param.c
    xdatacls.penalty.d=best_param.d
    kernbest.ipar1=best_param.par1
    kernbest.ipar2=best_param.par2

    self.best_param=best_param

    return(best_param)
Exemplo n.º 2
0
    def mvm_validation(self, xdatacls):
        """

    Input:
    xdatacls      data class
    params        global parameters

    Output:
    best_param    the best kernel parameters found by cross validation
                  on the split training
    """

        if self.validation_rkernel in xdatacls.dkernels:
            kernbest = xdatacls.dkernels[self.validation_rkernel].kernel_params
        else:
            kernbest = xdatacls.XKernel[0].kernel_params

        if self.ivalid == 1:
            best_param = self.mvm_validation_body(xdatacls)
        else:
            best_param = cls_empty_class()
            best_param.c = xdatacls.penalty.c
            best_param.d = xdatacls.penalty.d
            best_param.par1 = kernbest.ipar1
            best_param.par2 = kernbest.ipar2

        xdatacls.penalty.c = best_param.c
        xdatacls.penalty.d = best_param.d
        kernbest.ipar1 = best_param.par1
        kernbest.ipar2 = best_param.par2

        self.best_param = best_param

        return (best_param)
Exemplo n.º 3
0
def mvm_largest_category(xdatacls):
  """
  find the largest category in each training row as default prediction
  Input:
  xdatacls      data class  

  Output:
  xdatacls.largest_class.row_max_category is filled
                                  with the largest category label 
  """
  xdata=xdatacls.xdata_tra
  xranges=xdatacls.xranges_rel
  mdata=xdata[0].shape[0]
  nrow=xdatacls.nrow
  ncol=xdatacls.ncol
  
  xdatacls.largest_class=mmr_base_classes.cls_empty_class()

  row_max_category=np.zeros(nrow)
  col_max_category=np.zeros(ncol)

  if xdatacls.category==1:
    nyrange0=xdatacls.categorymax
    for irow in range(nrow):
      (istart,nlength)=xranges[irow,:]
      xcat=np.zeros(nyrange0)
      for i in range(nlength):
        icat=xdata[2][istart+i]
        xcat[icat]+=1
      row_max_category[irow]=xcat.argmax()  

    xcat=np.zeros(nyrange0)
    for i in range(mdata):
      icat=xdata[2][i]
      xcat[icat]+=1

    xdatacls.largest_class.row_max_category=row_max_category
    xdatacls.largest_class.max_category=xcat/np.sum(xcat)
    
  elif xdatacls.category==2:
    ndim=xdatacls.YKernel.ndim
    valrange=xdatacls.YKernel.valrange
    nval=max(valrange)+1
    tdim=[nval]*ndim

    xtotalmax=np.zeros((ndim,nval))
    xmaxrow=np.zeros((nrow,ndim,nval))
    xmaxcol=np.zeros((ncol,ndim,nval))
    
    for irow in range(nrow):
      (istart,nlength)=xranges[irow,:]
      icat=xdata[2][istart:istart+nlength]
      xcat=np.array(np.unravel_index(icat,tdim)).T
      for i in range(nlength):
        for j in range(ndim):
          if xcat[i,j]!=0:
            xmaxrow[irow,j,xcat[i,j]]+=1
            xmaxcol[i,j,xcat[i,j]]+=1
            xtotalmax[j,xcat[i,j]]+=1

    for irow in range(nrow):  
      row_max_category[irow]=np.ravel_multi_index(xmaxrow[irow].argmax(1),tdim)
    xdatacls.largest_class.row_max_category=row_max_category
    for icol in range(ncol):  
      col_max_category[icol]=np.ravel_multi_index(xmaxcol[icol].argmax(1),tdim)
    xdatacls.largest_class.col_max_category=col_max_category
    
    xdatacls.largest_class.max_category= \
             np.ravel_multi_index(xtotalmax.argmax(1),tdim)

  return
Exemplo n.º 4
0
def mvm_glm(xdatacls):
  """
  It computes the row and column wise averages to additive and to multiplicative models, and centralize the training data

  Input:
  xdatacls      data class
  """
  
  ## print('GLM')

  xdata=xdatacls.xdata_tra
  mdata=xdata[0].shape[0]
  nrow=xdatacls.nrow
  ncol=xdatacls.ncol

  tydim=xdata[2].shape
  if len(tydim)==1:
    nydim=1
  else:
    nydim=tydim[1]

## =1 multiplicative model =0 additive model  
  xdatacls.kmode=0

  col_sum=np.zeros((nrow,nydim))
  row_sum=np.zeros((ncol,nydim))
  col_num=np.zeros(nrow)
  row_num=np.zeros(ncol)
  
  if xdatacls.kmode==1:
    for idata in range(mdata):
      vdata=np.log(xdata[2][idata])
      col_sum[xdata[0][idata]]+=vdata
      row_sum[xdata[1][idata]]+=vdata
      col_num[xdata[0][idata]]+=1
      row_num[xdata[1][idata]]+=1
  else:
    for idata in range(mdata):
      vdata=xdata[2][idata]
      col_sum[xdata[0][idata]]+=vdata
      row_sum[xdata[1][idata]]+=vdata
      col_num[xdata[0][idata]]+=1
      row_num[xdata[1][idata]]+=1
      
  col_num=col_num+(col_num==0)
  row_num=row_num+(row_num==0)

  total_sum=np.sum(col_sum)
  total_num=np.sum(col_num)
  
  total_mean=total_sum/total_num
  
  col_mean=col_sum/np.outer(col_num,np.ones(nydim))
  row_mean=row_sum/np.outer(row_num,np.ones(nydim))
  if nydim==1:
    col_mean=np.squeeze(col_mean)
    row_mean=np.squeeze(row_mean)
  
  if xdatacls.kmode==1:
    col_mean=np.exp(col_mean)
    row_mean=np.exp(row_mean)
    total_mean=np.exp(total_mean)
  
  if xdatacls.kmode==1:
    for idata in range(mdata):
      irow=xdata[0][idata]
      icol=xdata[1][idata]
      xdata[2][idata]*=total_mean/(col_mean[irow]*row_mean[icol])
  else:
    for idata in range(mdata):
      irow=xdata[0][idata]
      icol=xdata[1][idata]
      xdata[2][idata]+=-col_mean[irow]-row_mean[icol]+total_mean 
  
  xdatacls.glm_model=mmr_base_classes.cls_empty_class()
  xdatacls.glm_model.col_mean=col_mean
  xdatacls.glm_model.row_mean=row_mean
  xdatacls.glm_model.total_mean=total_mean

  return
Exemplo n.º 5
0
  def mmr_validation_body(self,cMMR):

    np.set_printoptions(precision=4)

    ## mtrain=cMMR.mtrain

    best_param=mmr_base_classes.cls_empty_class()
    best_param.c=0.0
    best_param.d=0.0
    best_param.par1=0.0
    best_param.par2=0.0
    xparam=mmr_base_classes.cls_empty_class()

    cMMRVal=mmr_mmr_cls.cls_mmr(cMMR.ninputview)
    cMMRVal.XKernel=[None]*cMMR.ninputview
    cMMR.copy(cMMRVal,cMMR.itrain)
    ## params.validation.rkernel=cMMRVal.XKernel[0].title

    if self.validation_rkernel in cMMRVal.dkernels:
      rkernel=cMMRVal.dkernels[self.validation_rkernel]
    else:
      rkernel=cMMRVal.XKernel[0]

    kernel_type=rkernel.kernel_params.kernel_type
    kinput=rkernel.crossval

    if kernel_type==0:
      ip1min=0.0
      ip1max=0.0
      ip2min=0.0
      ip2max=0.0
      ip1step=1.0
      ip2step=1.0
    elif kernel_type in (1,11,12,2,51):
      ip1min=kinput.par1min
      ip1max=kinput.par1max
      ip2min=kinput.par2min
      ip2max=kinput.par2max
      ip1step=kinput.par1step
      ip2step=kinput.par2step
    elif kernel_type in (3,31,32,41,53):
      if kinput.nrange>1:
        if kinput.par1max>kinput.par1min:
          dpar= np.power(kinput.par1max/kinput.par1min,1/(kinput.nrange-1))
          ip1max=kinput.nrange
        else:
          dpar=1.0
          ip1max=1.0
      else:
        ip1max=1.0
        dpar=1.0
      ip1min=1.0
      ip2min=kinput.par2min
      ip2max=kinput.par2max
      ip1step=1.0
      ip2step=kinput.par2step
    else: 
      ip1min=1.0
      ip1max=1.0
      ip2min=1.0
      ip2max=1.0
      ip1step=1.0
      ip2step=1.0

  ## Validation 

  ## number of validation folds
    if self.vnfold<2:
      self.vnfold=2
    vnfold=self.vnfold ## number of validation folds
    vxsel=np.floor(np.random.random(cMMRVal.mdata)*vnfold)
    vxsel=vxsel-(vxsel==vnfold)
  ## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    alternating_test=0
    if alternating_test==1:
      vxsel=np.zeros(cMMRVal.mdata)
      for i in range(0,cMMRVal.mdata,2):
        vxsel[i]=1
  ## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    vpredtr=np.zeros(vnfold)     ## valid
    vpred=np.zeros(vnfold)     ## train
    print('C,D,par1,par2,traning accuracy,validation test accuracy')    

    ## scanning the parameter space
    if cMMR.ieval_type in (0,10):
      xxmax=-np.inf
    else:
      xxmax=np.inf

    penalty=cMMRVal.penalty.crossval
    crange=np.arange(penalty.par1min,penalty.par1max+penalty.par1step/2, \
                     penalty.par1step)
    drange=np.arange(penalty.par2min,penalty.par2max+penalty.par2step/2, \
                     penalty.par2step)

    p1range=np.arange(ip1min,ip1max+ip1step/2,ip1step)
    p2range=np.arange(ip2min,ip2max+ip2step/2,ip2step)

    for iC in crange:
      for iD in drange:
        for ip1 in p1range:
          for ip2 in p2range:
            if kernel_type in (3,31,32,41,53): 
              dpar1=kinput.par1min*dpar**(ip1-1)
              dpar2=ip2
            else:
              dpar1=ip1
              dpar2=ip2

            cMMRVal.penalty.c=iC;
            cMMRVal.penalty.d=iD;
            rkernel.kernel_params.ipar1=dpar1;
            rkernel.kernel_params.ipar2=dpar2;

            for vifold in range(vnfold):

              cMMRVal.split_train_test(vxsel,vifold)
              cMMRVal.compute_kernels()
              cMMRVal.Y0=cMMRVal.YKernel.get_Y0(cMMRVal.itrain)

              cOptDual=cMMRVal.mmr_train()
  ## validation training         
              cPredictValTrain=cMMRVal.mmr_test(cOptDual,itraindata=0)
  ## counts the proportion the ones predicted correctly    
  ## ##############################################
              if cMMRVal.itestmode==2:
                ypred=inverse_knn(cMMRVal.YKernel.get_Y0(cMMRVal.itrain), \
                                  cPredictValTrain)
              else:
                ypred=cPredictValTrain.zPred
              cEvaluationValTrain= \
                  mmr_eval_binvector(cMMRVal.YKernel.get_train(cMMRVal.itrain), \
                                     ypred)    
              vpredtr[vifold]=cEvaluationValTrain.f1

  ## ##############################################
  ## validation test
              cPredictValTest=cMMRVal.mmr_test(cOptDual,itraindata=1)

  ## counts the proportion the ones predicted correctly    
  ## ##############################################
              if cMMRVal.itestmode==2:
                ypred=inverse_knn(cMMRVal.YKernel.get_Y0(cMMRVal.itrain), \
                                  cPredictValTest)
              else:
                ypred=cPredictValTest.zPred
              cEvaluationValTest= \
                  mmr_eval_binvector(cMMRVal.YKernel.get_test(cMMRVal.itest), \
                                     ypred)

              vpred[vifold]=cEvaluationValTest.f1

  ## ##############################################
            np.set_printoptions(precision=4)
            print('%9.5g'%iC,'%9.5g'%iD,'%9.5g'%dpar1,'%9.5g'%dpar2, \
                  '%9.5g'%(np.mean(vpredtr)),'%9.5g'%(np.mean(vpred)))
  ##          print(array((iC,iD,dpar1,dpar2,mean(vpredtr),mean(vpred))))
  ##          print(iC,iD,dpar1,dpar2,mean(vpredtr),mean(vpred))
  ## searching for the best configuration in validation
            mvpred=np.mean(vpred)

            if cMMR.ieval_type in (0,10):
              if mvpred>xxmax:
                xxmax=mvpred
                xparam.c=iC
                xparam.d=iD
                xparam.par1=dpar1
                xparam.par2=dpar2
                print('The best:',xxmax)
            else:
              if mvpred<xxmax:
                xxmax=mvpred
                xparam.c=iC
                xparam.d=iD
                xparam.par1=dpar1
                xparam.par2=dpar2
                print('The best:',xxmax)

            sys.stdout.flush()  

    self.validationScore = xxmax
    best_param=xparam

    return(best_param)
Exemplo n.º 6
0
def roar_main(workmode):

  params=mmr_setparams.cls_params()
  params.setvalidation()
  params.setsolver()
  params.setgeneral()
  params.setoutput()
  params.setinput()

## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  xdatacls=mvm_mvm_cls.cls_mvm()

  roar_prepare.roar_prepare(xdatacls)

  nfold=xdatacls.nfold
  if xdatacls.itestmode in (0,3):
    nfold0=1        ## active learning
  else:
    nfold0=nfold    ## n-fold cross validation
  nrepeat=xdatacls.nrepeat

# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  scombine=''

  if xdatacls.itestmode==0:
    if xdatacls.ibootstrap==0:
      fname='xresultte_rand'+scombine+'.csv'
    elif xdatacls.ibootstrap==1:  
      fname='xresultte_active'+scombine+'.csv'
    elif xdatacls.ibootstrap==2:  
      fname='xresultte_greedy'+scombine+'.csv'
    elif xdatacls.ibootstrap==3:  
      fname='xresultte_act_rand'+scombine+'.csv'
  else:
    fname='xresultte_ncross'+scombine+'.csv'

  ## xdatacls.YKernel.ymax=ctables.ncategory
  # it will be recomputed in mvm_ranges
  xdatacls.YKernel.ymin=0
  xdatacls.YKernel.yrange=100 # it will be recomputed in classcol_ranges
  xdatacls.YKernel.ystep=1  

  # load the databases
  # data file
  ndata=xdatacls.ndata
  
##  set_printoptions(precision=4)
  npar=1   ## number of parameter selected for random subsample
  
  nparam=4    # C,D,par1,par2
  nreport=4   ## accuracy, precision, recall, f1

  if xdatacls.itestmode==0:
    nrepeat0=ndata-1   ## active learning
  else:
    nrepeat0=nrepeat

  if xdatacls.itestmode==0:
    ## initialize the active learning seeds
    ## pzero=0.001
    ## xselector=1*(np.random.rand(ndata)<pzero)

    nzero=100  ## !!!!!!!! initial training size
    xselector=np.zeros(ndata)
    nprime=4999
    ip=0
    for i in range(nzero):
      ip+=nprime
      if ip>ndata:
        ip=ip%ndata
      xselector[ip]=1  

    ndatainit=int(np.sum(xselector))
    mtest=ndata-ndatainit
    xdatacls.itest=np.where(xselector==0)[0]
    icandidate_w=-1
    icandidate_b=-1
    ## nrepeat0=ndata-ndatainit-10
    nrepeat0=min(100000,ndata-ndatainit-1000)  ## !!!!!! test size
    ## nrepeat0=1
  else:   ## n-fold cross validation
    nrepeat0=nrepeat
    
  xresulttr=np.zeros((nrepeat0,nfold0))
  xresultte=np.zeros((nrepeat0,nfold0,nreport))
  xbest_param=np.zeros((nrepeat0,nfold0,nparam))

  # ############################################################

  # number iterations in the optimization
  params.solver.niter=100
  print('niter:',params.solver.niter)

  for ipar in range(npar):

    nval=len(xdatacls.YKernel.valrange)
    xconfusion3=np.zeros((nrepeat0,nfold0,xdatacls.YKernel.ndim,nval,nval))

    ireport=0
    ## for irepeat in range(int(float(ndata)/3)):
    for irepeat in range(nrepeat0):

      if xdatacls.itestmode==0:
        if xdatacls.ibootstrap==0:
          if icandidate_w>=0:
            icandidate_w=np.random.randint(mtest,size=1)
            icandidate_w=xdatacls.itest[icandidate_w]
            xselector[icandidate_w]=1
            ## xselector[icandidate_b]=0     ## delete the best 
        elif xdatacls.ibootstrap==1:  ## worst confidence
          if icandidate_w>=0:
            xselector[icandidate_w]=1
            ## xselector[icandidate_b]=0     ## delete the best 
        elif xdatacls.ibootstrap==2:  ## best confidence
          if icandidate_b>=0:
            xselector[icandidate_b]=1
        elif xdatacls.ibootstrap==3:  ## worst+random
          if icandidate_w>=0:
            pselect=np.random.rand()
            if pselect<0.5:
              icandidate_w=np.random.randint(mtest)
              icandidate_w=xdatacls.itest[icandidate_w]
            xselector[icandidate_w]=1
            ## xselector[icandidate_b]=0     ## delete the best
      elif xdatacls.itestmode==1:   ## n-fold cross-validation
        ## !!! Emre !!!
        xselector=np.floor(np.random.random(ndata)*nfold0)
        xselector=xselector-(xselector==nfold0)

      ## if xdatacls.itestmode==1:  ## n-fold crossvalidation
      ##   xselector=np.random.randint(nfold0, size=ndata)
      ## elif xdatacls.itestmode==2:  ## random subset
      ##   xselector=1*(np.random.rand(ndata)<float(plist[ipar])/100)
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## for test only
      elif xdatacls.itestmode==-1:
        for i in range(ndata):
          xselector[i]=i%nfold0
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!          
##        xselector_row=np.floor(nfold0*np.random.rand(nrow))

      for ifold in range(nfold0):

        xdatacls.split_train_test(xselector,ifold)
        mtest=len(xdatacls.itest)
        if mtest<=0:
          print('!!!!!!!')
          break

        print('mtest:',mtest,'mtrain:',len(xdatacls.itrain))

        xdatacls.mvm_datasplit()        

    # sparse matrices of ranks-row_avarage-col_average+total_avarege  
        xdatacls.xranges_rel=mvm_ranges(xdatacls.xdata_tra,xdatacls.nrow, \
                                     params)
        xdatacls.xranges_rel_test=mvm_ranges(xdatacls.xdata_tes, \
                                          xdatacls.nrow,params)
        ## mvm_loadmatrix(xdatacls,isubset_tra,params)
        if xdatacls.category==0:
          mvm_glm(xdatacls,params)
          mvm_ygrid(xdatacls,params)
        elif xdatacls.category==1:
          mvm_largest_category(xdatacls)
        elif xdatacls.category==2:
          mvm_largest_category(xdatacls)

    # validation to choose the best parameters
        print('Validation')
        xdatacls.set_validation()
        params.validation.rkernel=xdatacls.XKernel[0].title
        if params.validation.rkernel in xdatacls.dkernels:
          kernbest=xdatacls.dkernels[params.validation.rkernel].kernel_params
        else:
          kernbest=xdatacls.XKernel[0].kernel_params
        
        if params.validation.ivalid==1:
          best_param=mvm_validation(xdatacls,params)
        else:
          best_param=cls_empty_class()
          best_param.c=xdatacls.penalty.c
          best_param.d=xdatacls.penalty.d
          best_param.par1=kernbest.ipar1
          best_param.par2=kernbest.ipar2

        xdatacls.penalty.c=best_param.c
        xdatacls.penalty.d=best_param.d
        kernbest.ipar1=best_param.par1
        kernbest.ipar2=best_param.par2

        print('Parameters:',xdatacls.penalty.c,xdatacls.penalty.d, \
              kernbest.ipar1,kernbest.ipar2)
        
        print('Best parameters found by validation')
        xbest_param[irepeat,ifold,0]=best_param.c
        xbest_param[irepeat,ifold,1]=best_param.d
        xbest_param[irepeat,ifold,2]=best_param.par1
        xbest_param[irepeat,ifold,3]=best_param.par2

    # training with the best parameters
        print('training')

        time0=time.time()
        cOptDual= xdatacls.mvm_train(params)
        print('Training time:',time.time()-time0)
        
    # cls transfers the dual variables to the test procedure
    # compute test 

    # check the train accuracy
        print('test on training')

    # $$$ # counts the proportion the ones predicted correctly    
    # $$$ # ######################################
    # $$$     deval=col_eval(xdatacls.ieval_type,nrow,isubset_tra, \
    # $$$                      xranges_tra,Zrow)
    # $$$     xresulttr(irepeat,ifold)=deval
    # ######################################     
    # check the test accuracy
        print('test on test')
        time0=time.time()
        cPredict=xdatacls.mvm_test(cOptDual.alpha,params)
        print('Test time:',time.time()-time0)

    # counts the proportion the ones predicted correctly
    # ####################################
        time0=time.time()
        (cEval,icandidate_w,icandidate_b)=mvm_eval(xdatacls.ieval_type, \
                                          xdatacls.nrow,xdatacls,cPredict.Zrow)
        print('Evaluation time:',time.time()-time0)

        if xdatacls.ieval_type==0:
          xresultte[irepeat,ifold,0]=cEval.accuracy
          ## prediction of effective categories
          ## part_accuracy=float(np.sum(np.diag(cEval.xconfusion)[1:]))/ \
          ##           np.sum(cEval.xconfusion[1:,1:])
          ## xresultte[irepeat,ifold,1]=part_accuracy
          xresultte[irepeat,ifold,1]=cEval.precision
          xresultte[irepeat,ifold,2]=cEval.recall
          xresultte[irepeat,ifold,3]=cEval.f1
        elif xdatacls.ieval_type==10:
          xresultte[irepeat,ifold,0]=cEval.accuracy
          xconfusion3[irepeat,ifold]=cEval.xconfusion3
        else:
          xresultte[irepeat,ifold,0]=cEval.deval
        icandidate_w=xdatacls.itest[icandidate_w]
        icandidate_b=xdatacls.itest[icandidate_b]
        ireport+=1

        ## print(cEval.xconfusion)
        if xdatacls.ieval_type!=10:
          for xconfrow in cEval.xconfusion:
            for ditem in xconfrow:
              print('%7.0f'%ditem,end='')
            print()
          print()
        else:
          for xtable in cEval.xconfusion3:
            xsum=np.sum(xtable)
            if xsum==0:
              xsum=1
            xtable=100*xtable/xsum
            for xconfrow in xtable:
              for ditem in xconfrow:
                print('%8.4f'%ditem,end='')
              print()
            print()
          print()
        
    # ####################################    
        print('*** ipar, repeatation, fold ***') 
        print(ipar,irepeat,ifold)

        if xdatacls.itestmode==1: ## n-fold crossvalidation
          print('Result in one fold and one repeatation')
          ## print('Accuracy on train')
          ## print(xresulttr[irepeat,ifold])
          print('Accuracy on test')
          if xdatacls.ieval_type==0:
            print(xresultte[irepeat,ifold])
          else:
            print(xresultte[irepeat,ifold,0])

      print('Result in one repetation')
      print('Mean and std of the accuracy on test')
      if xdatacls.ieval_type==0:
        print(np.mean(xresultte[irepeat,:,0]),
            np.std(xresultte[irepeat,:,0]))
      else:
        print(np.mean(xresultte[irepeat,:,0]),
            np.std(xresultte[irepeat,:,0]))
        
      sys.stdout.flush()
        
      if xdatacls.itestmode==0: ## n-fold crossvalidation
        np.savetxt(fname,xresultte[:ireport,0,:],delimiter=',',fmt='%6.4f')
      else:
        if xdatacls.ieval_type==0:
          np.savetxt(fname,xresultte[:ireport,:,:],delimiter=',',fmt='%6.4f')
        else:
          np.savetxt(fname,xresultte[:ireport,:,0],delimiter=',',fmt='%6.4f')

    print('***** Overall result ****')
    print('Mean and std of the accuracy on test + error')
    if xdatacls.ieval_type==0:
      print(np.mean(xresultte[:,:,0]),
            np.std(xresultte[:,:,0]))
    else:
      print(np.mean(xresultte[:,:,0]),
            np.std(xresultte[:,:,0]))

#     if xdatacls.ieval_type==10:
#       confusion_latex(xconfusion3,lfiles)      
      
    print('Average best parameters')
    ##  sfield=dir(best_param)
    xlabels=('c','d','par1','par2')
    for i in range(nparam):
    ##    print(sfield[i])
      print(xlabels[i],': ',np.mean(xbest_param[:,:,i]), \
              '(',np.std(xbest_param[:,:,i]),')')
  
  ## np.savetxt(fname,xresultte[:ireport,0,:],delimiter=',',fmt='%6.4f')
  print('Bye')    
  
  return
Exemplo n.º 7
0
def roar_main(workmode):

    params = mmr_setparams.cls_params()
    params.setvalidation()
    params.setsolver()
    params.setgeneral()
    params.setoutput()
    params.setinput()

    ## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    xdatacls = mvm_mvm_cls.cls_mvm()

    roar_prepare.roar_prepare(xdatacls)

    nfold = xdatacls.nfold
    if xdatacls.itestmode in (0, 3):
        nfold0 = 1  ## active learning
    else:
        nfold0 = nfold  ## n-fold cross validation
    nrepeat = xdatacls.nrepeat

    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    scombine = ''

    if xdatacls.itestmode == 0:
        if xdatacls.ibootstrap == 0:
            fname = 'xresultte_rand' + scombine + '.csv'
        elif xdatacls.ibootstrap == 1:
            fname = 'xresultte_active' + scombine + '.csv'
        elif xdatacls.ibootstrap == 2:
            fname = 'xresultte_greedy' + scombine + '.csv'
        elif xdatacls.ibootstrap == 3:
            fname = 'xresultte_act_rand' + scombine + '.csv'
    else:
        fname = 'xresultte_ncross' + scombine + '.csv'

    ## xdatacls.YKernel.ymax=ctables.ncategory
    # it will be recomputed in mvm_ranges
    xdatacls.YKernel.ymin = 0
    xdatacls.YKernel.yrange = 100  # it will be recomputed in classcol_ranges
    xdatacls.YKernel.ystep = 1

    # load the databases
    # data file
    ndata = xdatacls.ndata

    ##  set_printoptions(precision=4)
    npar = 1  ## number of parameter selected for random subsample

    nparam = 4  # C,D,par1,par2
    nreport = 4  ## accuracy, precision, recall, f1

    if xdatacls.itestmode == 0:
        nrepeat0 = ndata - 1  ## active learning
    else:
        nrepeat0 = nrepeat

    if xdatacls.itestmode == 0:
        ## initialize the active learning seeds
        ## pzero=0.001
        ## xselector=1*(np.random.rand(ndata)<pzero)

        nzero = 100  ## !!!!!!!! initial training size
        xselector = np.zeros(ndata)
        nprime = 4999
        ip = 0
        for i in range(nzero):
            ip += nprime
            if ip > ndata:
                ip = ip % ndata
            xselector[ip] = 1

        ndatainit = int(np.sum(xselector))
        mtest = ndata - ndatainit
        xdatacls.itest = np.where(xselector == 0)[0]
        icandidate_w = -1
        icandidate_b = -1
        ## nrepeat0=ndata-ndatainit-10
        nrepeat0 = min(100000, ndata - ndatainit - 1000)  ## !!!!!! test size
        ## nrepeat0=1
    else:  ## n-fold cross validation
        nrepeat0 = nrepeat

    xresulttr = np.zeros((nrepeat0, nfold0))
    xresultte = np.zeros((nrepeat0, nfold0, nreport))
    xbest_param = np.zeros((nrepeat0, nfold0, nparam))

    # ############################################################

    # number iterations in the optimization
    params.solver.niter = 100
    print('niter:', params.solver.niter)

    for ipar in range(npar):

        nval = len(xdatacls.YKernel.valrange)
        xconfusion3 = np.zeros(
            (nrepeat0, nfold0, xdatacls.YKernel.ndim, nval, nval))

        ireport = 0
        ## for irepeat in range(int(float(ndata)/3)):
        for irepeat in range(nrepeat0):

            if xdatacls.itestmode == 0:
                if xdatacls.ibootstrap == 0:
                    if icandidate_w >= 0:
                        icandidate_w = np.random.randint(mtest, size=1)
                        icandidate_w = xdatacls.itest[icandidate_w]
                        xselector[icandidate_w] = 1
                        ## xselector[icandidate_b]=0     ## delete the best
                elif xdatacls.ibootstrap == 1:  ## worst confidence
                    if icandidate_w >= 0:
                        xselector[icandidate_w] = 1
                        ## xselector[icandidate_b]=0     ## delete the best
                elif xdatacls.ibootstrap == 2:  ## best confidence
                    if icandidate_b >= 0:
                        xselector[icandidate_b] = 1
                elif xdatacls.ibootstrap == 3:  ## worst+random
                    if icandidate_w >= 0:
                        pselect = np.random.rand()
                        if pselect < 0.5:
                            icandidate_w = np.random.randint(mtest)
                            icandidate_w = xdatacls.itest[icandidate_w]
                        xselector[icandidate_w] = 1
                        ## xselector[icandidate_b]=0     ## delete the best
            elif xdatacls.itestmode == 1:  ## n-fold cross-validation
                ## !!! Emre !!!
                xselector = np.floor(np.random.random(ndata) * nfold0)
                xselector = xselector - (xselector == nfold0)

            ## if xdatacls.itestmode==1:  ## n-fold crossvalidation
            ##   xselector=np.random.randint(nfold0, size=ndata)
            ## elif xdatacls.itestmode==2:  ## random subset
            ##   xselector=1*(np.random.rand(ndata)<float(plist[ipar])/100)
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## for test only
            elif xdatacls.itestmode == -1:
                for i in range(ndata):
                    xselector[i] = i % nfold0
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
##        xselector_row=np.floor(nfold0*np.random.rand(nrow))

            for ifold in range(nfold0):

                xdatacls.split_train_test(xselector, ifold)
                mtest = len(xdatacls.itest)
                if mtest <= 0:
                    print('!!!!!!!')
                    break

                print('mtest:', mtest, 'mtrain:', len(xdatacls.itrain))

                xdatacls.mvm_datasplit()

                # sparse matrices of ranks-row_avarage-col_average+total_avarege
                xdatacls.xranges_rel=mvm_ranges(xdatacls.xdata_tra,xdatacls.nrow, \
                                             params)
                xdatacls.xranges_rel_test=mvm_ranges(xdatacls.xdata_tes, \
                                                  xdatacls.nrow,params)
                ## mvm_loadmatrix(xdatacls,isubset_tra,params)
                if xdatacls.category == 0:
                    mvm_glm(xdatacls, params)
                    mvm_ygrid(xdatacls, params)
                elif xdatacls.category == 1:
                    mvm_largest_category(xdatacls)
                elif xdatacls.category == 2:
                    mvm_largest_category(xdatacls)

        # validation to choose the best parameters
                print('Validation')
                xdatacls.set_validation()
                params.validation.rkernel = xdatacls.XKernel[0].title
                if params.validation.rkernel in xdatacls.dkernels:
                    kernbest = xdatacls.dkernels[
                        params.validation.rkernel].kernel_params
                else:
                    kernbest = xdatacls.XKernel[0].kernel_params

                if params.validation.ivalid == 1:
                    best_param = mvm_validation(xdatacls, params)
                else:
                    best_param = cls_empty_class()
                    best_param.c = xdatacls.penalty.c
                    best_param.d = xdatacls.penalty.d
                    best_param.par1 = kernbest.ipar1
                    best_param.par2 = kernbest.ipar2

                xdatacls.penalty.c = best_param.c
                xdatacls.penalty.d = best_param.d
                kernbest.ipar1 = best_param.par1
                kernbest.ipar2 = best_param.par2

                print('Parameters:',xdatacls.penalty.c,xdatacls.penalty.d, \
                      kernbest.ipar1,kernbest.ipar2)

                print('Best parameters found by validation')
                xbest_param[irepeat, ifold, 0] = best_param.c
                xbest_param[irepeat, ifold, 1] = best_param.d
                xbest_param[irepeat, ifold, 2] = best_param.par1
                xbest_param[irepeat, ifold, 3] = best_param.par2

                # training with the best parameters
                print('training')

                time0 = time.time()
                cOptDual = xdatacls.mvm_train(params)
                print('Training time:', time.time() - time0)

                # cls transfers the dual variables to the test procedure
                # compute test

                # check the train accuracy
                print('test on training')

                # $$$ # counts the proportion the ones predicted correctly
                # $$$ # ######################################
                # $$$     deval=col_eval(xdatacls.ieval_type,nrow,isubset_tra, \
                # $$$                      xranges_tra,Zrow)
                # $$$     xresulttr(irepeat,ifold)=deval
                # ######################################
                # check the test accuracy
                print('test on test')
                time0 = time.time()
                cPredict = xdatacls.mvm_test(cOptDual.alpha, params)
                print('Test time:', time.time() - time0)

                # counts the proportion the ones predicted correctly
                # ####################################
                time0 = time.time()
                (cEval,icandidate_w,icandidate_b)=mvm_eval(xdatacls.ieval_type, \
                                                  xdatacls.nrow,xdatacls,cPredict.Zrow)
                print('Evaluation time:', time.time() - time0)

                if xdatacls.ieval_type == 0:
                    xresultte[irepeat, ifold, 0] = cEval.accuracy
                    ## prediction of effective categories
                    ## part_accuracy=float(np.sum(np.diag(cEval.xconfusion)[1:]))/ \
                    ##           np.sum(cEval.xconfusion[1:,1:])
                    ## xresultte[irepeat,ifold,1]=part_accuracy
                    xresultte[irepeat, ifold, 1] = cEval.precision
                    xresultte[irepeat, ifold, 2] = cEval.recall
                    xresultte[irepeat, ifold, 3] = cEval.f1
                elif xdatacls.ieval_type == 10:
                    xresultte[irepeat, ifold, 0] = cEval.accuracy
                    xconfusion3[irepeat, ifold] = cEval.xconfusion3
                else:
                    xresultte[irepeat, ifold, 0] = cEval.deval
                icandidate_w = xdatacls.itest[icandidate_w]
                icandidate_b = xdatacls.itest[icandidate_b]
                ireport += 1

                ## print(cEval.xconfusion)
                if xdatacls.ieval_type != 10:
                    for xconfrow in cEval.xconfusion:
                        for ditem in xconfrow:
                            print('%7.0f' % ditem, end='')
                        print()
                    print()
                else:
                    for xtable in cEval.xconfusion3:
                        xsum = np.sum(xtable)
                        if xsum == 0:
                            xsum = 1
                        xtable = 100 * xtable / xsum
                        for xconfrow in xtable:
                            for ditem in xconfrow:
                                print('%8.4f' % ditem, end='')
                            print()
                        print()
                    print()

        # ####################################
                print('*** ipar, repeatation, fold ***')
                print(ipar, irepeat, ifold)

                if xdatacls.itestmode == 1:  ## n-fold crossvalidation
                    print('Result in one fold and one repeatation')
                    ## print('Accuracy on train')
                    ## print(xresulttr[irepeat,ifold])
                    print('Accuracy on test')
                    if xdatacls.ieval_type == 0:
                        print(xresultte[irepeat, ifold])
                    else:
                        print(xresultte[irepeat, ifold, 0])

            print('Result in one repetation')
            print('Mean and std of the accuracy on test')
            if xdatacls.ieval_type == 0:
                print(np.mean(xresultte[irepeat, :, 0]),
                      np.std(xresultte[irepeat, :, 0]))
            else:
                print(np.mean(xresultte[irepeat, :, 0]),
                      np.std(xresultte[irepeat, :, 0]))

            sys.stdout.flush()

            if xdatacls.itestmode == 0:  ## n-fold crossvalidation
                np.savetxt(fname,
                           xresultte[:ireport, 0, :],
                           delimiter=',',
                           fmt='%6.4f')
            else:
                if xdatacls.ieval_type == 0:
                    np.savetxt(fname,
                               xresultte[:ireport, :, :],
                               delimiter=',',
                               fmt='%6.4f')
                else:
                    np.savetxt(fname,
                               xresultte[:ireport, :, 0],
                               delimiter=',',
                               fmt='%6.4f')

        print('***** Overall result ****')
        print('Mean and std of the accuracy on test + error')
        if xdatacls.ieval_type == 0:
            print(np.mean(xresultte[:, :, 0]), np.std(xresultte[:, :, 0]))
        else:
            print(np.mean(xresultte[:, :, 0]), np.std(xresultte[:, :, 0]))

#     if xdatacls.ieval_type==10:
#       confusion_latex(xconfusion3,lfiles)

        print('Average best parameters')
        ##  sfield=dir(best_param)
        xlabels = ('c', 'd', 'par1', 'par2')
        for i in range(nparam):
            ##    print(sfield[i])
            print(xlabels[i],': ',np.mean(xbest_param[:,:,i]), \
                    '(',np.std(xbest_param[:,:,i]),')')

    ## np.savetxt(fname,xresultte[:ireport,0,:],delimiter=',',fmt='%6.4f')
    print('Bye')

    return
Exemplo n.º 8
0
    def mvm_validation_body(self, xdatacls):
        """

    Input:
    xdatacls      data class
    params        global parameters

    Output:
    best_param    the best kernel parameters found by cross validation
                  on the split training
    """

        nrow = xdatacls.nrow

        ## construct the data object out of the training items
        xdatacls_val = mvm_mvm_cls.cls_mvm()
        xdatacls.copy(xdatacls_val)

        xparam = cls_empty_class()

        best_param = cls_empty_class()
        best_param.c = 1
        best_param.d = 0
        best_param.par1 = 0
        best_param.par2 = 0

        if self.validation_rkernel in xdatacls_val.dkernels:
            rkernel = xdatacls_val.dkernels[self.validation_rkernel]
        else:
            rkernel = xdatacls_val.XKernel[0]

        kernel_type = rkernel.kernel_params.kernel_type
        kinput = rkernel.crossval

        if kernel_type == 0:
            ip1min = 0
            ip1max = 0
            ip2min = 0
            ip2max = 0
            ip1step = 1
            ip2step = 1
        elif kernel_type in (1, 2):
            ip1min = kinput.par1min
            ip1max = kinput.par1max
            ip2min = kinput.par2min
            ip2max = kinput.par2max
            ip1step = kinput.par1step
            ip2step = kinput.par2step
        elif kernel_type in (3, 31, 32, 41, 53, 5):
            if kinput.nrange > 1:
                if kinput.par1max > kinput.par1min:
                    dpar = np.power(kinput.par1max / kinput.par1min,
                                    1 / (kinput.nrange - 1))
                    ip1max = kinput.nrange
                else:
                    dpar = 1.0
                    ip1max = 1.0
            else:
                ip1max = 1.0
                dpar = 1.0

            ip1min = 1
            ip2min = kinput.par2min
            ip2max = kinput.par2max
            ip1step = 1
            ip2step = kinput.par2step
        else:
            ip1min = 1
            ip1max = 1
            ip2min = 1
            ip2max = 1
            ip1step = 1
            ip2step = 1

    #  vnfold=4 # number of validation folds
        mdata = xdatacls_val.xdata_rel[0].shape[0]
        vnfold = self.vnfold  # number of validation folds
        vxsel = np.floor(np.random.rand(mdata) * vnfold)
        vxsel = vxsel - (vxsel == vnfold)
        ##  vpredtr=np.zeros(vnfold) # valid
        vpred = np.zeros(vnfold)  # train

        print('C,D,par1,par2,traning accuracy,validation test accuracy')

        # scanning the parameter space

        if xdatacls_val.ieval_type in (0, 10, 11):
            xxmax = -np.inf
        else:
            xxmax = np.inf

        penalty = xdatacls_val.penalty.crossval
        crange=np.arange(penalty.par1min,penalty.par1max+penalty.par1step/2, \
                         penalty.par1step)
        drange=np.arange(penalty.par2min,penalty.par2max+penalty.par2step/2, \
                         penalty.par2step)

        p1range = np.arange(ip1min, ip1max + ip1step / 2, ip1step)
        p2range = np.arange(ip2min, ip2max + ip2step / 2, ip2step)

        for iC in crange:
            for iD in drange:
                for ip1 in p1range:
                    for ip2 in p2range:
                        if kernel_type in (3, 31, 32, 41, 53, 5):
                            dpar1 = kinput.par1min * dpar**(ip1 - 1)
                            dpar2 = ip2
                        else:
                            dpar1 = ip1
                            dpar2 = ip2

                        xdatacls_val.penalty.c = iC
                        xdatacls_val.d = iD
                        rkernel.kernel_params.ipar1 = dpar1
                        rkernel.kernel_params.ipar2 = dpar2

                        for vifold in range(vnfold):

                            xdatacls_val.split_train_test(vxsel, vifold)
                            xdatacls_val.mvm_datasplit()
                            xdatacls_val.xranges_rel=mvm_ranges(xdatacls_val.xdata_tra, \
                                                             xdatacls_val.nrow)
                            xdatacls_val.xranges_rel_test=mvm_ranges(xdatacls_val.xdata_tes, \
                                                             xdatacls_val.nrow)
                            if xdatacls.category == 0 or xdatacls.category == 3:
                                ## pass
                                mvm_glm(xdatacls_val)
                                mvm_ygrid(xdatacls_val)
                            else:
                                mvm_largest_category(xdatacls_val)

                            if self.report == 1:
                                print('validation training')
                            xdatacls_val.mvm_train()

                            # validation test
                            if self.report == 1:
                                print('validation test on validation test')
                            cPredict = xdatacls_val.mvm_test()

                            # counts the proportion the ones predicted correctly
                            # ##############################################
                            cEval=mvm_eval(xdatacls_val.ieval_type,nrow,xdatacls_val, \
                                               cPredict.Zrow)[0]
                            if xdatacls_val.ieval_type in (0, 10, 11):
                                if xdatacls_val.ibinary == 0:
                                    vpred[vifold] = cEval.accuracy
                                elif xdatacls_val.ibinary == 1:
                                    vpred[vifold] = cEval.f1
                            else:
                                vpred[vifold] = cEval.deval

                        print('%9.5g'%iC,'%9.5g'%iD,'%9.5g'%dpar1,'%9.5g'%dpar2, \
                              '%9.5g'%(np.mean(vpred)))
                        ## print(iC,iD,dpar1,dpar2,np.mean(vpred))
                        # searching for the best configuration in validation
                        mvpred = np.mean(vpred)

                        if xdatacls_val.ieval_type in (0, 10, 11):
                            if mvpred > xxmax:
                                xxmax = mvpred
                                xparam.c = iC
                                xparam.d = iD
                                xparam.par1 = dpar1
                                xparam.par2 = dpar2
                                print('The best:', xxmax)
                        else:
                            if mvpred < xxmax:
                                xxmax = mvpred
                                xparam.c = iC
                                xparam.d = iD
                                xparam.par1 = dpar1
                                xparam.par2 = dpar2
                                print('The best:', xxmax)

                        sys.stdout.flush()

        best_param = xparam

        return (best_param)
Exemplo n.º 9
0
  def mvm_validation_body(self,xdatacls):
    """

    Input:
    xdatacls      data class
    params        global parameters

    Output:
    best_param    the best kernel parameters found by cross validation
                  on the split training
    """

    nrow=xdatacls.nrow

    ## construct the data object out of the training items
    xdatacls_val=mvm_mvm_cls.cls_mvm()
    xdatacls.copy(xdatacls_val)

    xparam=cls_empty_class()

    best_param=cls_empty_class()
    best_param.c=1
    best_param.d=0
    best_param.par1=0
    best_param.par2=0

    if self.validation_rkernel in xdatacls_val.dkernels:
      rkernel=xdatacls_val.dkernels[self.validation_rkernel]
    else:
      rkernel=xdatacls_val.XKernel[0]

    kernel_type=rkernel.kernel_params.kernel_type
    kinput=rkernel.crossval

    if kernel_type==0:
      ip1min=0
      ip1max=0
      ip2min=0
      ip2max=0
      ip1step=1
      ip2step=1
    elif kernel_type in (1,2):
      ip1min=kinput.par1min
      ip1max=kinput.par1max
      ip2min=kinput.par2min
      ip2max=kinput.par2max
      ip1step=kinput.par1step
      ip2step=kinput.par2step
    elif kernel_type in (3,31,32,41,53,5):
      if kinput.nrange>1:
        if kinput.par1max>kinput.par1min:
          dpar= np.power(kinput.par1max/kinput.par1min,1/(kinput.nrange-1))
          ip1max=kinput.nrange
        else:
          dpar=1.0
          ip1max=1.0
      else:
        ip1max=1.0
        dpar=1.0

      ip1min=1
      ip2min=kinput.par2min
      ip2max=kinput.par2max
      ip1step=1
      ip2step=kinput.par2step
    else: 
      ip1min=1
      ip1max=1
      ip2min=1
      ip2max=1
      ip1step=1
      ip2step=1

  #  vnfold=4 # number of validation folds
    mdata=xdatacls_val.xdata_rel[0].shape[0]
    vnfold=self.vnfold # number of validation folds
    vxsel=np.floor(np.random.rand(mdata)*vnfold)
    vxsel=vxsel-(vxsel==vnfold)
  ##  vpredtr=np.zeros(vnfold) # valid
    vpred=np.zeros(vnfold) # train

    print('C,D,par1,par2,traning accuracy,validation test accuracy')    

    # scanning the parameter space

    if xdatacls_val.ieval_type in (0,10,11):
      xxmax=-np.inf
    else:
      xxmax=np.inf

    penalty=xdatacls_val.penalty.crossval
    crange=np.arange(penalty.par1min,penalty.par1max+penalty.par1step/2, \
                     penalty.par1step)
    drange=np.arange(penalty.par2min,penalty.par2max+penalty.par2step/2, \
                     penalty.par2step)

    p1range=np.arange(ip1min,ip1max+ip1step/2,ip1step)
    p2range=np.arange(ip2min,ip2max+ip2step/2,ip2step)

    for iC in crange:
      for iD in drange:
        for ip1 in p1range:
          for ip2 in p2range:
            if kernel_type in (3,31,32,41,53,5): 
              dpar1=kinput.par1min*dpar**(ip1-1)
              dpar2=ip2
            else:
              dpar1=ip1
              dpar2=ip2

            xdatacls_val.penalty.c=iC;
            xdatacls_val.d=iD;
            rkernel.kernel_params.ipar1=dpar1;
            rkernel.kernel_params.ipar2=dpar2;

            for vifold in range(vnfold):

              xdatacls_val.split_train_test(vxsel,vifold)
              xdatacls_val.mvm_datasplit()        
              xdatacls_val.xranges_rel=mvm_ranges(xdatacls_val.xdata_tra, \
                                               xdatacls_val.nrow)
              xdatacls_val.xranges_rel_test=mvm_ranges(xdatacls_val.xdata_tes, \
                                               xdatacls_val.nrow)
              if xdatacls.category==0 or xdatacls.category==3:
                ## pass
                mvm_glm(xdatacls_val)
                mvm_ygrid(xdatacls_val)
              else:
                mvm_largest_category(xdatacls_val)

              if self.report==1:
                print('validation training')
              xdatacls_val.mvm_train()

  # validation test
              if self.report==1:
                print('validation test on validation test')
              cPredict=xdatacls_val.mvm_test() 

  # counts the proportion the ones predicted correctly    
  # ##############################################
              cEval=mvm_eval(xdatacls_val.ieval_type,nrow,xdatacls_val, \
                                 cPredict.Zrow)[0]
              if xdatacls_val.ieval_type in (0,10,11):
                if xdatacls_val.ibinary==0:
                  vpred[vifold]=cEval.accuracy
                elif xdatacls_val.ibinary==1:
                  vpred[vifold]=cEval.f1
              else:
                vpred[vifold]=cEval.deval

            print('%9.5g'%iC,'%9.5g'%iD,'%9.5g'%dpar1,'%9.5g'%dpar2, \
                  '%9.5g'%(np.mean(vpred)))
            ## print(iC,iD,dpar1,dpar2,np.mean(vpred))
  # searching for the best configuration in validation
            mvpred=np.mean(vpred)

            if xdatacls_val.ieval_type in (0,10,11):
              if mvpred>xxmax:
                xxmax=mvpred
                xparam.c=iC
                xparam.d=iD
                xparam.par1=dpar1
                xparam.par2=dpar2
                print('The best:',xxmax)
            else:
              if mvpred<xxmax:
                xxmax=mvpred
                xparam.c=iC
                xparam.d=iD
                xparam.par1=dpar1
                xparam.par2=dpar2
                print('The best:',xxmax)

            sys.stdout.flush()

    best_param=xparam

    return(best_param)