def lorenz_coefficients(normalization, poly_order=3):
    sigma = 10
    beta = 8 / 3
    rho = 28
    Xi = np.zeros((library_size(3, poly_order), 3))
    Xi[1, 0] = -sigma
    Xi[2, 0] = sigma * normalization[0] / normalization[1]
    Xi[1, 1] = rho * normalization[1] / normalization[0]
    Xi[2, 1] = -1
    Xi[6, 1] = -normalization[1] / (normalization[0] * normalization[2])
    Xi[3, 2] = -beta
    Xi[5, 2] = normalization[2] / (normalization[0] * normalization[1])
    return Xi
Ejemplo n.º 2
0
def lorenz_coefficients(normalization, poly_order=3, sigma=10., beta=8/3, rho=28.):
    """
    Generate the SINDy coefficient matrix for the Lorenz system.

    Arguments:
        normalization - 3-element list of array specifying scaling of each Lorenz variable
        poly_order - Polynomial order of the SINDy model.
        sigma, beta, rho - Parameters of the Lorenz system
    """
    Xi = np.zeros((library_size(3,poly_order),3))
    Xi[1,0] = -sigma
    Xi[2,0] = sigma*normalization[0]/normalization[1]
    Xi[1,1] = rho*normalization[1]/normalization[0]
    Xi[2,1] = -1
    Xi[6,1] = -normalization[1]/(normalization[0]*normalization[2])
    Xi[3,2] = -beta
    Xi[5,2] = normalization[2]/(normalization[0]*normalization[1])
    return Xi
Ejemplo n.º 3
0
def get_params_tf(training_data, hyper_params):
    params = {}

    params['input_dim'] = 128
    params['latent_dim'] = 3
    params['model_order'] = 1
    params['poly_order'] = 3
    params['include_sine'] = False
    params['library_dim'] = library_size(params['latent_dim'],
                                         params['poly_order'],
                                         params['include_sine'], True)

    # sequential thresholding parameters
    params['sequential_thresholding'] = True
    params['coefficient_threshold'] = hyper_params['threshold']
    params['threshold_frequency'] = hyper_params['threshold_freq']
    params['coefficient_mask'] = np.ones(
        (params['library_dim'], params['latent_dim']))
    params['coefficient_initialization'] = 'constant'

    # loss function weighting
    params['loss_weight_decoder'] = 1.0
    params['loss_weight_sindy_z'] = hyper_params['eta2']
    params['loss_weight_sindy_x'] = hyper_params['eta1']
    params['loss_weight_sindy_regularization'] = hyper_params['eta3']

    params['activation'] = 'sigmoid'
    params['widths'] = [64, 32]

    # training parameters
    params['epoch_size'] = training_data['x'].shape[0]
    params['batch_size'] = hyper_params['batch_size']
    params['learning_rate'] = hyper_params['lr']

    params['data_path'] = os.getcwd() + '/'
    params['print_progress'] = True
    params['print_frequency'] = hyper_params['model_eval']

    # training time cutoffs
    params['max_epochs'] = hyper_params['epochs']
    params['refinement_epochs'] = hyper_params['epochs_refinement']
    return params
Ejemplo n.º 4
0
from training import train_network
import tensorflow as tf

# SET UP PARAMETERS
params = {}

# generate training, validation, testing data
training_data = get_pendulum_data(100)
validation_data = get_pendulum_data(10)

params['N'] = training_data['x'].shape[-1]
params['d'] = 1
params['model_order'] = 2
params['poly_order'] = 3
params['include_sine'] = True
params['l'] = library_size(2 * params['d'], params['poly_order'],
                           params['include_sine'], True)

# set up sequential thresholding
params['sequential_thresholding'] = True
params['coefficient_threshold'] = 0.1
params['threshold_frequency'] = 500
params['coefficient_mask'] = np.ones((params['l'], params['d']))
params['coefficient_initialization'] = 'constant'

# define loss weights
params['loss_weight_decoder'] = 1.0
params['loss_weight_sindy_x'] = 5e-4
params['loss_weight_sindy_z'] = 5e-5
params['loss_weight_sindy_regularization'] = 1e-5

params['activation'] = 'sigmoid'
Ejemplo n.º 5
0
from training import train_network
import tensorflow as tf

# generate training, validation, testing data
noise_strength = 1e-6
training_data = get_lorenz_data(1024, noise_strength=noise_strength)
validation_data = get_lorenz_data(20, noise_strength=noise_strength)

params = {}

params['input_dim'] = 128
params['latent_dim'] = 3
params['model_order'] = 1
params['poly_order'] = 3
params['include_sine'] = False
params['library_dim'] = library_size(params['latent_dim'], params['poly_order'], params['include_sine'], True)

# sequential thresholding parameters
params['sequential_thresholding'] = True
params['coefficient_threshold'] = 0.1
params['threshold_frequency'] = 500
params['coefficient_mask'] = np.ones((params['library_dim'], params['latent_dim']))
params['coefficient_initialization'] = 'constant'

# loss function weighting
params['loss_weight_decoder'] = 1.0
params['loss_weight_sindy_z'] = 0.0
params['loss_weight_sindy_x'] = 1e-4
params['loss_weight_sindy_regularization'] = 1e-5

params['activation'] = 'sigmoid'
Ejemplo n.º 6
0
from training import train_network
import tensorflow as tf

# SET UP PARAMETERS
params = {}

# generate training, validation, testing data
training_data = get_lorenz_data(2048)
validation_data = get_lorenz_data(20)

params['N'] = 128
params['d'] = 3
params['model_order'] = 1
params['poly_order'] = 3
params['include_sine'] = False
params['l'] = library_size(params['d'], params['poly_order'], False, True)

# set up sequential thresholding
params['sequential_thresholding'] = True
params['coefficient_threshold'] = 0.1
params['threshold_frequency'] = 500
params['coefficient_mask'] = np.ones((params['l'], params['d']))
params['coefficient_initialization'] = 'constant'

# define loss weights
params['loss_weight_decoder'] = 1.0
params['loss_weight_sindy_z'] = 0.0
params['loss_weight_sindy_x'] = 1e-4
params['loss_weight_sindy_regularization'] = 1e-5

params['activation'] = 'sigmoid'
Ejemplo n.º 7
0
    def create_data_frame(self, hyper_params_grid):
        """Takes a dictionary of hyperparameters. Every entry an be a list or a scaler.
            Every list must have the same length. hyper_params_fix are the default values.
            They can be changed with hyper_params_grid. If they are not changed, for all hyper_params the
            default values are set.

            Args:
                hyper_params_grid (dict): hyperparameters to use.

            Returns:
                pandas.Dataframe: Hyperparameters for different models.

            """

        hyper_params_fix = {
            'stop_criterion': ['best_score'],
            'lr': [1e-3],
            'alpha': [0.01],
            'epochs': [0],
            'epochs_refinement': [0],
            'model_name': ['IV'],
            'steps_inner': [20],
            'batch_size': [1000],
            'z_latent': [3],
            'num_train_examples': [50],
            'num_val_examples': [50],
            'eta1': [1e-4],
            'eta2': [0],
            'eta3': [1e-5],
            'poly_order': [3],
            'model_eval': [500],
            'step_sample': [10],
            'include_sine': [False],
            'shuffle': [True],
            'threshold': [0.1],
            'threshold_freq': [500],
            'num_simulations': [20],
            'activate_thresholding': [True],
            'refinement': [False],
            'stop_best_score': [6],
            'training_set': [0],
            'train_num_simulations': [20]
        }
        hyper_params_fix['library_dim'] = [
            library_size(hyper_params_fix['z_latent'][0],
                         hyper_params_fix['poly_order'][0],
                         hyper_params_fix['include_sine'][0])
        ]
        n = 0
        # search number of models
        for key in hyper_params_grid.keys():
            hyper_params_fix[key] = hyper_params_grid[key]
            if len(hyper_params_fix[key]) > n:
                n = len(hyper_params_fix[key])
        # set default hyper_params for every model
        for key in hyper_params_fix.keys():
            if len(hyper_params_fix[key]) == n:
                continue
            else:
                hyper_params_fix[key] = np.repeat(hyper_params_fix[key], n)
        df = pd.DataFrame(hyper_params_fix)
        return df
Ejemplo n.º 8
0
    POLYORDER = 2
    INCLUDE_SIN = False
    LATENT_DIM = 2

    device = 'cuda:0'

    training, val, testing = get_rd_data(random=True)
    trainset = RDDataset(training)
    testset = RDDataset(testing)

    trainloader = DataLoader(trainset,
                             batch_size=8,
                             shuffle=True,
                             num_workers=0)
    testloader = DataLoader(testset,
                            batch_size=8,
                            shuffle=False,
                            num_workers=0)

    model = CNNAutoEncoder(latent_dim=LATENT_DIM)
    model.to(device)

    library_n = library_size(LATENT_DIM,
                             POLYORDER,
                             use_sine=INCLUDE_SIN,
                             include_constant=True)
    sindy_model = nn.Linear(library_n, LATENT_DIM, bias=False)
    sindy_model.to(device)

    evaluate(model, sindy_model, trainloader, train=True, device=device)