Exemplo n.º 1
0
def get_uci_dataset(dataset_name):
    data_path = os.path.join(vadam.__path__[0], 'data')
    ds = []
    for i in range(20):
        dataset = Dataset(data_set=dataset_name + str(i), data_folder=data_path)
        x_train, y_train = dataset.load_full_train_set(use_cuda=False)
        x_test, y_test = dataset.load_full_test_set(use_cuda=False)
        y_train = y_train.unsqueeze(1).double().numpy()
        y_test = y_test.unsqueeze(1).double().numpy()
        x_train = x_train.double().numpy()
        x_test = x_test.double().numpy()
        scl = StandardScaler(copy=False)
        scl.fit_transform(x_train)
        scl.transform(x_test)
        y_scl = StandardScaler(copy=False)
        y_scl.fit_transform(y_train)
        y_scl.transform(y_test)
        ds.append((None, None, x_train, y_train.squeeze(), x_test, y_test.squeeze(), y_scl.scale_[0]))
    return ds
Exemplo n.º 2
0
class Experiment():

    def __init__(self, data_set, model_params, train_params, optim_params, evals_per_epoch=1, normalize_x=False, normalize_y=False, results_folder="./results", data_folder=DEFAULT_DATA_FOLDER, use_cuda=torch.cuda.is_available()):

        # Store parameters
        self.data_set = data_set
        self.model_params = model_params
        self.train_params = train_params
        self.optim_params = optim_params
        self.evals_per_epoch = evals_per_epoch
        self.normalize_x = normalize_x
        self.normalize_y = normalize_y
        self.data_folder = data_folder
        self.results_folder = results_folder
        self.use_cuda = use_cuda

        # Set random seed
        seed = train_params['seed']
        torch.manual_seed(seed)
        if self.use_cuda:
            torch.cuda.manual_seed_all(seed)

        # Initialize metric history
        self.objective_history = []

        # Initialize data
        self.data = Dataset(data_set = data_set, data_folder = data_folder)

        ## All subclasses should override:

        # Define folder name for results
        self.folder_name = None

        # Define prediction function
        self.prediction = None

        # Define objective
        self.objective = None

        # Initialize model
        self.model = None

        # Initialize optimizer
        self.optimizer = None

        # Initialize metric history
        self.metric_history = None

        # Initialize final metric
        self.final_metric = None

    def run(self, log_metric_history=True):

        # Prepare
        num_epochs = self.train_params['num_epochs']
        batch_size = self.train_params['batch_size']
        seed = self.train_params['seed']

        # Set random seed
        torch.manual_seed(seed)
        if self.use_cuda:
            torch.cuda.manual_seed_all(seed)

        # Prepare data loader for training
        train_loader = self.data.get_train_loader(batch_size = batch_size)

        # Load full data set for evaluation
        x_train, y_train = self.data.load_full_train_set(use_cuda = self.use_cuda)
        x_test, y_test = self.data.load_full_test_set(use_cuda = self.use_cuda)

        # Compute normalization of x
        if self.normalize_x:
            self.x_means = torch.mean(x_train, dim=0)
            self.x_stds = torch.std(x_train, dim=0)
            self.x_stds[self.x_stds == 0] = 1

        # Compute normalization of y
        if self.normalize_y:
            self.y_mean = torch.mean(y_train)
            self.y_std = torch.std(y_train)
            if self.y_std==0:
                self.y_std = 1
                
        # Set iterations for evaluation
        num_batches = np.ceil(self.data.get_train_size() / batch_size)
        if self.evals_per_epoch > num_batches:
            evals_per_epoch = num_batches
        else:
            evals_per_epoch = self.evals_per_epoch
        eval_iters = np.round((1 + np.arange(evals_per_epoch)) * (num_batches / evals_per_epoch)).astype(int)

        # Train model
        for epoch in range(num_epochs):

            # Set model in training mode
            self.model.train(True)

            # Initialize batch objective accumulator
            batch_objective = []

            for i, (x, y) in enumerate(train_loader):

                # Prepare minibatch
                if self.use_cuda:
                    x, y = x.cuda(), y.cuda()

                # Normalize x and y
                if self.normalize_x:
                    x = (x-self.x_means)/self.x_stds
                if self.normalize_y:
                    y = (y-self.y_mean)/self.y_std

                # Update parameters
                def closure():
                    self.optimizer.zero_grad()
                    logits = self.prediction(x)
                    loss = self.objective(logits, y)
                    loss.backward()
                    return loss
                loss = self.optimizer.step(closure)
                
                if log_metric_history and (i+1) in eval_iters:
                    
                    # Set model in test mode
                    self.model.train(False)
        
                    # Evaluate model
                    with torch.no_grad():
                        self._evaluate_model(self.metric_history, x_train, y_train, x_test, y_test)

                # Store batch objective
                batch_objective.append(loss.detach().cpu().item())                    
            
            # Compute and store average objective from last epoch
            self.objective_history.append(np.mean(batch_objective))

            if log_metric_history:

                # Print progress
                self._print_progress(epoch)

            else:

                # Print average objective from last epoch
                self._print_objective(epoch)

        # Set model in test mode
        self.model.train(False)

        # Evaluate model
        with torch.no_grad():
            self._evaluate_model(self.final_metric, x_train, y_train, x_test, y_test)

    def _evaluate_model(self, metric_dict, x_train, y_train, x_test, y_test):

        ## All subclasses should override:
        raise NotImplementedError

    def _print_progress(self, epoch):

        ## All subclasses should override:
        raise NotImplementedError

    def _print_objective(self, epoch):

        # Print average objective from last epoch
        print('Epoch [{}/{}], Objective: {:.4f}'.format(
                epoch+1,
                self.train_params['num_epochs'],
                self.objective_history[-1]))

    def save(self, save_final_metric=True, save_metric_history=True, save_objective_history=True, save_model=True, save_optimizer=True, create_folder=True, folder_path=None):

        # Define folder path
        if not folder_path:
            folder_path = self.folder_name

        # Create folder
        if create_folder:
            os.makedirs(folder_path, exist_ok=True)

        # Store state dictionaries for model and optimizer
        if save_model:
            torch.save(self.model.state_dict(), os.path.join(folder_path, 'model.pt'))
        if save_optimizer:
            torch.save(self.optimizer.state_dict(), os.path.join(folder_path, 'optimizer.pt'))

        # Store history
        if save_final_metric:
            output = open(os.path.join(folder_path, 'final_metric.pkl'), 'wb')
            pickle.dump(self.final_metric, output)
            output.close()
        if save_metric_history:
            output = open(os.path.join(folder_path, 'metric_history.pkl'), 'wb')
            pickle.dump(self.metric_history, output)
            output.close()
        if save_objective_history:
            output = open(os.path.join(folder_path, 'objective_history.pkl'), 'wb')
            pickle.dump(self.objective_history, output)
            output.close()

    def load(self, load_final_metric=True, load_metric_history=True, load_objective_history=True, load_model=True, load_optimizer=True, folder_path=None):

        # Define folder path
        if not folder_path:
            folder_path = self.folder_name
        # Load state dictionaries for model and optimizer
        if load_model:
            state_dict = torch.load(os.path.join(folder_path, 'model.pt'))
            self.model.load_state_dict(state_dict)
            self.model.train(False)
        if load_optimizer:
            state_dict = torch.load(os.path.join(folder_path, 'optimizer.pt'))
            self.optimizer.load_state_dict(state_dict)

        # Load history
        if load_final_metric:
            pkl_file = open(os.path.join(folder_path, 'final_metric.pkl'), 'rb')
            self.final_metric = pickle.load(pkl_file)
            pkl_file.close()
        if load_metric_history:
            pkl_file = open(os.path.join(folder_path, 'metric_history.pkl'), 'rb')
            self.metric_history = pickle.load(pkl_file)
            pkl_file.close()
        if load_objective_history:
            pkl_file = open(os.path.join(folder_path, 'objective_history.pkl'), 'rb')
            self.objective_history = pickle.load(pkl_file)
            pkl_file.close()
Exemplo n.º 3
0
torch.manual_seed(seed)
if use_cuda:
    torch.cuda.manual_seed_all(seed)

###############
## Load data ##
###############

from vadam.datasets import Dataset

data = Dataset(data_set=data_set, data_folder="./../vadam/data")
train_loader = data.get_train_loader(batch_size)

x_train, y_train = data.load_full_train_set(use_cuda=use_cuda)
x_test, y_test = data.load_full_test_set(use_cuda=use_cuda)

##################
## Define model ##
##################

from vadam.models import MLP

model = MLP(input_size=data.num_features,
            hidden_sizes=hidden_sizes,
            output_size=None,
            act_func=act_func)
if use_cuda:
    model = model.cuda()

#######################