Exemplo n.º 1
0
 def evaluate(self, inp):
     
     inp = bf.scale_data(inp, self.xoffsets, self.xscales, -1, 1)
     inp = torch.tensor(inp).float()
     temp = self.regressor.forward(inp)
     pred = temp.detach().numpy()
     
     return bf.unscale_data(pred[:,np.newaxis], self.yoffset, self.yscale, -1, 1)
Exemplo n.º 2
0
def scaling_and_dict(inputs, outputs):
    """ 
    Helper function to make the scaling dictionary to pass to Model class.
    """
    x_scaled, x_scales, x_offsets = bf.do_scaling(inputs, -1, 1)
    y_offset, y_scale = bf.get_scale(outputs)

    y_scaled = bf.scale_data(outputs, y_offset, y_scale, -1,1)
    d = {}
    d["xscales"] = x_scales
    d["xoffsets"] = x_offsets
    d["yoffset"] = y_offset
    d["yscale"] = y_scale
    return x_scaled, y_scaled, d
Exemplo n.º 3
0
    def train_on_split_data(self, input_dict, output_dict, epochs = 5000, batch_size = 4096, saving = True):
        X_train = bf.scale_data(input_dict["train"], self.xoffsets, self.xscales, -1, 1)
        X_val = bf.scale_data(input_dict["val"], self.xoffsets, self.xscales, -1, 1)
        X_test = bf.scale_data(input_dict["test"], self.xoffsets, self.xscales, -1, 1)
        Y_train = bf.scale_data(output_dict["train"], self.yoffset, self.yscale, -1,1)
        Y_val = bf.scale_data(output_dict["val"], self.yoffset, self.yscale, -1,1)
        Y_test = bf.scale_data(output_dict["test"], self.yoffset, self.yscale, -1,1)

        if saving:
            np.save(self.path + self.name+ "x_train.npy", X_train)
            np.save(self.path + self.name+"y_train.npy", Y_train)

            np.save(self.path + self.name+ "x_test.npy", X_test)
            np.save(self.path + self.name+"y_test.npy", Y_test)

            np.save(self.path + self.name+"x_val.npy", X_val)
            np.save(self.path + self.name+"y_val.npy", Y_val)


        self.model = self.build_model()
        self.model.compile(loss = lambda y_true, y_pred: self.quantile_regression(y_true, y_pred), optimizer = 'adam')
        self.model.summary()

        mc = tf.keras.callbacks.ModelCheckpoint(self.path + self.name +'best_model_checkpoint.h5', monitor='val_loss', mode='min', save_best_only=True)
        es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience= 500)

        start = time.time()
        self.model.fit(X_train, Y_train, validation_data = (X_val, Y_val), epochs = epochs, batch_size = batch_size, verbose= 0, callbacks = [mc, es, bf.plot_losses])
        stop = time.time()

        self.model.save(self.path + self.name+"Model_Save.h5")
        self.model.save_weights(self.path + self.name+"Model_Weights.h5")

        print('Time to run in minutes was: ', (stop-start)/60) 

        model_fit = self.model.predict(X_test)
        self.model_fit = bf.unscale_data(model_fit, self.yoffset, self.yscale, -1, 1)

        self.X_test_fullscale = bf.unscale_data(X_test, self.xoffsets, self.xscales, -1, 1)
        self.Y_test_fullscale = bf.unscale_data(Y_test, self.yoffset, self.yscale, -1, 1)
        print("Training Complete")
Exemplo n.º 4
0
    def train_on_split_data(self, input_dict, output_dict, epochs = 300, batch_size = 4096, saving = True):
        X_train = bf.scale_data(input_dict["train"], self.xoffsets, self.xscales, -1, 1)
        X_test = bf.scale_data(input_dict["test"], self.xoffsets, self.xscales, -1, 1)
        Y_train = bf.scale_data(output_dict["train"], self.yoffset, self.yscale, -1,1)
        Y_test = bf.scale_data(output_dict["test"], self.yoffset, self.yscale, -1,1)

        X_train, Y_train = torch.tensor(X_train).float(), torch.tensor(Y_train).float()
        X_test, Y_test = torch.tensor(X_test).float(), torch.tensor(Y_test).float()
        
        
        ds_train = torch.utils.data.TensorDataset(X_train, Y_train)
        self.dataloader_train = torch.utils.data.DataLoader(ds_train, batch_size=2048, shuffle=True)

        ds_test = torch.utils.data.TensorDataset(X_test, Y_test)
        self.dataloader_test = torch.utils.data.DataLoader(ds_test, batch_size=2048, shuffle=True)

        
        if saving:
            np.save(self.path + self.name+ "x_train.npy", X_train)
            np.save(self.path + self.name+"y_train.npy", Y_train)

            np.save(self.path + self.name+ "x_test.npy", X_test)
            np.save(self.path + self.name+"y_test.npy", Y_test)



        self.regressor = self.build_model()
        self.regressor.to(self.device)
        self.config()
        
        start = time.time()
        
        
        iteration = 0
        for epoch in range(epochs):
            for i, (datapoints, labels) in enumerate(self.dataloader_train):
                self.optimizer.zero_grad()

                loss = self.regressor.sample_elbo(inputs = datapoints.to(self.device),
                                   labels = labels.to(self.device),
                                   criterion = self.criterion,
                                   sample_nbr = 3,
                                   complexity_cost_weight = 1/X_train.shape[0])
                loss.backward()
                self.optimizer.step()

                iteration += 1
                if iteration%100==0:
                    ic_acc, under_ci_upper, over_ci_lower = self.evaluate_regression(self.regressor,
                                                                                X_test.to(self.device),
                                                                                Y_test.to(self.device),
                                                                                samples=100,
                                                                                std_multiplier=2)

                    print("CI acc: {:.4f}, CI upper acc: {:.4f}, CI lower acc: {:.4f}".format(ic_acc, under_ci_upper, over_ci_lower))

        stop = time.time()


        print('Time to run in minutes was: ', (stop-start)/60) 
        torch.save(self.regressor.state_dict(), self.path + self.name+"Model_Save.h5")
        print("Training Complete")
Exemplo n.º 5
0
 def evaluate(self, inp):
     inpsc = bf.scale_data(inp, self.xoffsets, self.xscales, -1, 1)
     pred = self.model.predict(inpsc)
     return bf.unscale_data(pred, self.yoffset, self.yscale, -1, 1)