def train(self): print("Total number of parameters: %d" % (self.hyp.shape[0])) # Gradients from autograd NLML = value_and_grad(self.likelihood) start_time = timeit.default_timer() for i in range(1, self.max_iter + 1): # Fetch minibatch self.X_batch, self.y_batch = fetch_minibatch( self.X, self.y, self.N_batch) # Compute likelihood and gradients nlml, D_NLML = NLML(self.hyp) # Update hyper-parameters self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam( self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i) if i % self.monitor_likelihood == 0: elapsed = timeit.default_timer() - start_time print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed)) start_time = timeit.default_timer() nlml, D_NLML = NLML(self.hyp)
def train(self): # Gradients from autograd MSE = value_and_grad(self.MSE) for i in range(1,self.max_iter+1): # Fetch minibatch self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch) # Compute MSE and gradients MSE_value, D_MSE = MSE(self.hyp) # Update hyper-parameters self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i) if i % self.monitor_likelihood == 0: print("Iteration: %d, MSE: %.5e" % (i, MSE_value))
def train(self): # Gradients from autograd NLML = value_and_grad(self.likelihood) for i in range(1,self.max_iter+1): # Fetch minibatch self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch) # Compute likelihood_UB and gradients NLML_value, D_NLML = NLML(self.hyp) # Update hyper-parameters self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i) if i % self.monitor_likelihood == 0: print("Iteration: %d, likelihood: %.2f" % (i, NLML_value))
def train(): X = ModelInfo["X"] y = ModelInfo["y"] init_params() max_iter = ModelInfo["max_iter"] N_batch = ModelInfo["N_batch"] hyp = ModelInfo["hyp"] monitor_likelihood = ModelInfo["monitor_likelihood"] # Adam optimizer parameters mt_hyp = np.zeros(hyp.shape) vt_hyp = np.zeros(hyp.shape) lrate = ModelInfo["lrate"] print("Total number of parameters: %d" % (hyp.shape[0])) # Gradients from autograd UB = value_and_grad(likelihood_UB) start = time.time() for i in range(1, max_iter + 1): # Fetch minibatch X_batch, y_batch = fetch_minibatch(X, y, N_batch) ModelInfo.update({"X_batch": X_batch}) ModelInfo.update({"y_batch": y_batch}) # Compute likelihood_UB and gradients hyp = ModelInfo["hyp"] NLML, D_NLML = UB(hyp) # Update hyper-parameters hyp, mt_hyp, vt_hyp = stochastic_update_Adam(hyp, D_NLML, mt_hyp, vt_hyp, lrate, i) ModelInfo.update({"hyp": hyp}) if i % monitor_likelihood == 0: end = time.time() print( "Iteration: %d, likelihood_UB: %.2f, elapsed time: %.2f seconds" % (i, NLML, end - start)) start = time.time() NLML, D_NLML = UB(hyp)