def trainnoParallel(self,scaledAlpha,**kwargs):
     dim=self.dimension
     alpha=np.random.randn(dim)
     variance=np.random.rand(1)
     st=np.concatenate((np.sqrt(np.exp(alpha)),np.exp(variance),[0.0]))
     args2={}
     args2['start']=st
     job=misc.kernOptWrapper(self,**args2)
     temp=job.xOpt
     self.alpha=np.sqrt(np.exp(np.array(temp[0:self.dimension])))
     self.variance=np.exp(np.array(temp[self.dimension]))
     self.mu=np.array(temp[self.dimension+1])
 def trainnoParallel(self,scaledAlpha,**kwargs):
     """
     Train the hyperparameters starting in only one point the algorithm.
     
     Args:
         -scaledAlpha: The definition may be found above.
     """
     dim=self.dimension
     alpha=np.random.randn(dim)
     variance=np.random.rand(1)
     st=np.concatenate((np.sqrt(np.exp(alpha)),np.exp(variance),[0.0]))
     args2={}
     args2['start']=st
     job=misc.kernOptWrapper(self,**args2)
     temp=job.xOpt
     self.alpha=np.sqrt(np.exp(np.array(temp[0:self.dimension])))
     self.variance=np.exp(np.array(temp[self.dimension]))
     self.mu=np.array(temp[self.dimension+1])