def calculate(self, training_file): """ Since GP standardizes both inputs and outputs, we do that here as well """ #Assumes the training data is of [X,Y] train = np.loadtxt(training_file) train_x = train[:, :self.mean_var[0]] train_y = train[:, self.mean_var[0]:] ##################################################### #Step 1: standardize X AND Y # ##################################################### X_0 = torch.as_tensor(self.DR_func.Scale(train_x)) train_y = torch.as_tensor(train_y) ##################################################### #Step 5: Train on training data # ##################################################### #use batching if len(X_0) * .1 >= 2: num_batch = min(2**int(math.log(len(X_0) * .1, 2)), 256) else: num_batch = len(X_0) dataset = NN.Data(X_0, torch.as_tensor(train_y)) train_loader = DataLoader(dataset=dataset, batch_size=num_batch, shuffle=True) self.mean_func.train() for epochs in range(self.mean_var[-1]): for batch_idx, (inputs, labels) in enumerate(train_loader): self.mean_func.optimiser.zero_grad() #get estimate of y from DR y_hat = self.mean_func(inputs) #calc loss loss = self.mean_func.error_function(labels, y_hat) loss.backward() self.mean_func.optimiser.step() if (batch_idx == 0 and epochs % 100 == 0): print("loss: %s" % (loss.item())) self.mean_func.eval() return print('NN model created')
def calculate(self, train_x, train_y): ###assumes train_x or train_y is not standardized """ NN works best with normalized values """ dim_in = self.NN_var[0] ##################################################### #Step 1: normalize X # ##################################################### X_0 = torch.as_tensor(self.Scale(train_x)) self.norm_range = np.c_[np.zeros(dim_in), np.ones(dim_in)] ##################################################### #Step 2: Set seed for consistent results # ##################################################### torch.random.manual_seed(self.seed_value) ##################################################### #Step 3: create Model # ##################################################### self.Model = NN.DR_Network(self.dim_DR, self.NN_var) ##################################################### #Step 4: Train on training data # ##################################################### #use batching if len(X_0) * .1 >= 2: num_batch = min(2**int(math.log(len(X_0) * .1, 2)), 256) else: num_batch = len(X_0) dataset = NN.Data(X_0, torch.as_tensor(train_y)) train_loader = DataLoader(dataset=dataset, batch_size=num_batch, shuffle=True) self.Model.train() for epochs in range(self.NN_var[-3]): for batch_idx, (inputs, labels) in enumerate(train_loader): self.Model.optimiser.zero_grad() #get estimate of y from DR _, y_hat, x_hat = self.Model(inputs) #calc loss loss = self.Model.error_function(x_hat, y_hat, inputs, labels, self.norm_range[:, 0], self.norm_range[:, 1], self.NN_var[-2], self.NN_var[-1]) #error function(x estimate, y estimate, true normalized x, true y, xlb, xub, lam, P) loss.backward() self.Model.optimiser.step() if (batch_idx == 0 and epochs % 100 == 0): print("loss: %s" % (loss.item())) self.Model.eval() self.DR_range = self.Model.DR_range #self.DR_range=np.c_[self.Model.min_val.detach().numpy()*np.ones(self.dim_DR),self.Model.max_val.detach().numpy()*np.ones(self.dim_DR)] return print('NN model created')