def findMin(self, x, y): meanfunc = self.model.meanfunc covfunc = self.model.covfunc likfunc = self.model.likfunc inffunc = self.model.inffunc hypInArray = self._convert_to_array() if isinstance(covfunc, pyGPs.cov.SM): Lm = len(meanfunc.hyp) Lc = len(covfunc.hyp) opt = rt_minimize.rt_minimize(hypInArray, self._nlzAnddnlz, length=-40) optimalHyp = deepcopy(opt[0]) funcValue = opt[1][-1] if self.searchConfig: searchRange = self.searchConfig.meanRange + \ self.searchConfig.covRange + self.searchConfig.likRange if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold): raise Exception('Specify at least one of the stop conditions') while True: self.trailsCounter += 1 # increase counter # TODO Replace with better initialization for i in xrange(hypInArray.shape[0]): # random init of hyp hypInArray[i] = np.random.uniform(low=searchRange[i][0], high=searchRange[i][1]) if isinstance(self.model.covfunc, pyGPs.cov.SM): hyps = cov.initSMhypers(self.model.covfunc.para[0], x, y) hypInArray[Lm:Lm + Lc] = hyps[:] # value this time is better than optimal min value try: thisopt = rt_minimize.rt_minimize(hypInArray, self._nlzAnddnlz, length=-40) if thisopt[1][-1] < funcValue: funcValue = thisopt[1][-1] optimalHyp = thisopt[0] except: self.errorCounter += 1 if self.searchConfig.num_restarts and self.errorCounter > self.searchConfig.num_restarts / 2: print "[RTMinimize] %d out of %d trails failed during optimization" % ( self.errorCounter, self.trailsCounter) raise Exception( "Over half of the trails failed for minimize") # if exceed num_restarts if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts - 1: print "[RTMinimize] %d out of %d trails failed during optimization" % ( self.errorCounter, self.trailsCounter) return optimalHyp, funcValue # reach provided mininal if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: print "[RTMinimize] %d out of %d trails failed during optimization" % ( self.errorCounter, self.trailsCounter) return optimalHyp, funcValue return optimalHyp, funcValue
def findMin(self, x, y): meanfunc = self.model.meanfunc covfunc = self.model.covfunc likfunc = self.model.likfunc inffunc = self.model.inffunc hypInArray = self._convert_to_array() if isinstance(covfunc, pyGPs.cov.SM): Lm = len(meanfunc.hyp) Lc = len(covfunc.hyp) opt = rt_minimize.rt_minimize(hypInArray, self._nlzAnddnlz, length=-40) optimalHyp = deepcopy(opt[0]) funcValue = opt[1][-1] if self.searchConfig: searchRange = self.searchConfig.meanRange + \ self.searchConfig.covRange + self.searchConfig.likRange if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold): raise Exception('Specify at least one of the stop conditions') while True: self.trailsCounter += 1 # increase counter # TODO Replace with better initialization for i in xrange(hypInArray.shape[0]): # random init of hyp hypInArray[i] = np.random.uniform(low=searchRange[i][0], high=searchRange[i][1]) if isinstance(self.model.covfunc, pyGPs.cov.SM): hyps = cov.initSMhypers(self.model.covfunc.para[0], x, y) hypInArray[Lm:Lm + Lc] = hyps[:] # value this time is better than optimal min value try: thisopt = rt_minimize.rt_minimize(hypInArray, self._nlzAnddnlz, length=-40) if thisopt[1][-1] < funcValue: funcValue = thisopt[1][-1] optimalHyp = thisopt[0] except: self.errorCounter += 1 if self.searchConfig.num_restarts and self.errorCounter > self.searchConfig.num_restarts / 2: print "[RTMinimize] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter) raise Exception( "Over half of the trails failed for minimize") # if exceed num_restarts if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts - 1: print "[RTMinimize] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter) return optimalHyp, funcValue # reach provided mininal if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: print "[RTMinimize] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter) return optimalHyp, funcValue return optimalHyp, funcValue
def bocpdGPTlearn( X, # Training data model, # The current GP model theta_m, # the hyperparameters for the GP model theta_h, # the hyperparameters for the hazard function dt=1, # the timestep ): max_minimize_iter = 30 num_hazard_params = len(theta_h) if model.ScalePrior: theta_s = model.ScalePrior[ 0] # alpha from the prior on scale (assumed beta is identity) else: theta_s = 0 theta = np.append(np.append(theta_h, theta_m), theta_s) (theta, nlml, i) = rt_minimize( theta, dbocpdGP, -max_minimize_iter, X, model, num_hazard_params, dt, ) hazard_params = theta[:num_hazard_params] model_params = theta[num_hazard_params:-1] scale_params = theta[-1] return (hazard_params, model_params, scale_params, nlml[-1])