"""Hyperparameters""" LOG_DIR = "/Users/nhanitvn/Personal/Learning/DeepLearning/AE_ts/log_tb" config = {} # Put all configuration information into the dict config['num_layers'] = 2 # number of layers of stacked RNN's config['hidden_size'] = 90 # memory cells in a layer config['max_grad_norm'] = 5 # maximum gradient norm during training config['batch_size'] = batch_size = 64 config['learning_rate'] = .005 config['crd'] = 1 # Hyperparameter for future generalization config['num_l'] = 20 # number of units in the latent space plot_every = 10 # after _plot_every_ GD steps, there's console output max_iterations = 1000 # maximum number of iterations dropout = 0.8 # Dropout rate """Load the data""" X_train, X_val, y_train, y_val = open_data(direc) N = X_train.shape[0] Nval = X_val.shape[0] D = X_train.shape[1] config['sl'] = sl = D # sequence length print('We have %s observations with %s dimensions' % (N, D)) # Organize the classes num_classes = len(np.unique(y_train)) base = np.min(y_train) # Check if data is 0-based if base != 0: y_train -= base y_val -= base # Plot data # and save high quality plt.savefig('data_examples.eps', format='eps', dpi=1000)
config = dict() # Put all configuration information into the dict config['num_layers'] = 2 # number of layers of stacked RNN's config['hidden_size'] = 90 # memory cells in a layer config['max_grad_norm'] = 5 # maximum gradient norm during training config['batch_size'] = batch_size = 64 config['learning_rate'] = .005 config['crd'] = 1 # Hyperparameter for future generalization config['num_l'] = 20 # number of units in the latent space plot_every = 100 # after _plot_every_ GD steps, there's console output max_iterations = 1000 # maximum number of iterations dropout = 0.8 # Dropout rate # Load the data X_train, X_val, y_train, y_val = open_data( '/home/rob/Dropbox/ml_projects/LSTM/UCR_TS_Archive_2015') N = X_train.shape[0] Nval = X_val.shape[0] D = X_train.shape[1] config['sl'] = sl = D # sequence length print('We have %s observations with %s dimensions' % (N, D)) # Organize the classes num_classes = len(np.unique(y_train)) base = np.min(y_train) # Check if data is 0-based if base != 0: y_train -= base y_val -= base # Plot data # and save high quality plt.savefig('data_examples.eps', format='eps', dpi=1000)
direc = './' LOG_DIR = './' config = {} # Put all configuration information into the dict config['num_layers'] = 2 # number of layers of stacked RNN's config['hidden_size'] = 90 # memory cells in a layer config['max_grad_norm'] = 5 # maximum gradient norm during training config['batch_size'] = batch_size = 64 config['learning_rate'] = .005 config['crd'] = 1 # Hyperparameter for future generalization config['num_l'] = 1 # number of units in the latent space plot_every = 100 # after _plot_every_ GD steps, there's console output max_iterations = 10000 # maximum number of iterations dropout = 0.8 # Dropout rate """Load the data""" X_train, X_val, y_train, y_val = open_data('./UCR_TS_Archive_2015') X_train, X_train_out = X_train[:, :-1], X_train[:, -1] X_val, X_val_out = X_val[:, :-1], X_val[:, -1] N = X_train.shape[0] Nval = X_val.shape[0] D = X_train.shape[1] config['sl'] = sl = D # sequence length print('We have %s observations with %s dimensions' % (N, D)) # Organize the classes num_classes = len(np.unique(y_train)) base = np.min(y_train) # Check if data is 0-based if base != 0: y_train -= base y_val -= base
LOG_DIR = "./Saved_Model" # Directory for the logging config = dict() # Put all configuration information into the dict config['num_layers'] = 2 # number of layers of stacked RNN's config['hidden_size'] = 90 # memory cells in a layer config['max_grad_norm'] = 0.5 # maximum gradient norm during training config['batch_size'] = batch_size = 64 config['learning_rate'] = .005 config['num_l'] = 20 # number of units in the latent space plot_every = 100 # after _plot_every_ GD steps, there's console output max_iterations = 100 # maximum number of iterations dropout = 0.8 # Dropout rate # Load the data X_train, X_val = open_data('./Data/') N = X_train.shape[0] # nbr of element in train db Nval = X_val.shape[0] # nbr of element in val db D = X_train.shape[1] # nbr of columns in train db config['sl'] = sl = D # sequence length print('We have %s observations with %s dimensions' % (N, D)) # Organize the classes num_classes = 15 """Training time!""" model = Model(config) sess = tf.Session() perf_collect = np.zeros( (2, int(np.floor(max_iterations / plot_every))) ) # np.floor -> a = np.array([-1.7,2.0]) np.floor(a) array([-2., 2.]) *** np.zeros Return a new array of given shape filled with zeros.