def main(config): hidden_size = config.hidden_size hidden_layer_depth = config.hidden_layer_depth latent_length = config.latent_length batch_size = config.batch_size learning_rate = config.learning_rate n_epochs = config.n_epochs dropout_rate = config.dropout_rate optimizer = config.optimizer # options: ADAM, SGD cuda = config.cuda # options: True, False print_every = config.print_every clip = config.clip # options: True, False max_grad_norm = config.max_grad_norm loss = config.loss # options: SmoothL1Loss, MSELoss block = config.block # options: LSTM, GRU dload = config.dload # TODO best practice # sequence_length = 32 # # number_of_features = 32 * 3 sequence_length = 32 * 32 number_of_features = 3 vrae = VRAE(sequence_length=sequence_length, number_of_features=number_of_features, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length, batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, optimizer=optimizer, cuda=cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, loss=loss, block=block, dload=dload) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) return vrae
def define_vrae_model(trial, sequence_length, number_of_features): hidden_layer_depth = trial.suggest_int("n_hidden_layers", 1, 2) hidden_size = trial.suggest_int("hidden_size", 30, 60, step=10) latent_length = trial.suggest_int("latent_length",7, 21, step=7) batch_size = trial.suggest_int("batch_size", 32, 64, step=32) learning_rate = trial.suggest_float("learning_rate", 0.0001, 0.0005, step=0.0002) n_epochs = trial.suggest_int("n_epochs", 800, 1400, step=200) dropout_rate = trial.suggest_float("learning_rate", 0.1, 0.2, step=0.1) optimizer = trial.suggest_categorical("optimizer", ["Adam", "SGD"]) loss = trial.suggest_categorical("loss", ["SmoothL1Loss", "MSELoss"]) # block = trial.suggest_categorical("block_choose", ["LSTM", "GRU"]) # hidden_layer_depth = 1 # hidden_size = 30 # latent_length = 10 # batch_size = 64 # learning_rate = 0.00005 # n_epochs = 5 # dropout_rate = 0.0005 # n_epochs = 2 # optimizer = "Adam" # loss = "SmoothL1Loss" block = "LSTM" dload = './ECG200_model_dir/' print_every = 30 cuda = True clip = True max_grad_norm = 5 vrae = VRAE(sequence_length=sequence_length, number_of_features=number_of_features, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length, batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, optimizer=optimizer, cuda=cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, loss=loss, block=block, dload=dload) return vrae
def main(config): hidden_size = config.hidden_size hidden_layer_depth = config.hidden_layer_depth latent_length = config.latent_length batch_size = config.batch_size learning_rate = config.learning_rate n_epochs = config.n_epochs dropout_rate = config.dropout_rate optimizer = config.optimizer # options: ADAM, SGD cuda = config.cuda # options: True, False print_every = config.print_every clip = config.clip # options: True, False max_grad_norm = config.max_grad_norm loss = config.loss # options: SmoothL1Loss, MSELoss block = config.block # options: LSTM, GRU dload = config.dload sequence_length = X_train.shape[1] number_of_features = X_train.shape[2] vrae = VRAE(sequence_length=sequence_length, number_of_features=number_of_features, hidden_size=hidden_size, hidden_layer_depth=hidden_layer_depth, latent_length=latent_length, batch_size=batch_size, learning_rate=learning_rate, n_epochs=n_epochs, dropout_rate=dropout_rate, optimizer=optimizer, cuda=cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, loss=loss, block=block, dload=dload) return vrae
y_train -= base y_val -= base train_dataset = TensorDataset(torch.from_numpy(X_train)) test_dataset = TensorDataset(torch.from_numpy(X_val)) sequence_length = X_train.shape[1] number_of_features = X_train.shape[2] vrae = VRAE(sequence_length=sequence_length, number_of_features = number_of_features, hidden_size = hidden_size, hidden_layer_depth = hidden_layer_depth, latent_length = latent_length, batch_size = batch_size, learning_rate = learning_rate, n_epochs = n_epochs, dropout_rate = dropout_rate, optimizer = optimizer, cuda = cuda, print_every=print_every, clip=clip, max_grad_norm=max_grad_norm, loss = loss, block = block, dload = dload) vrae.fit(train_dataset, save = False) vrae.save('vraetc110.pth') #Traversal Plot z_run = vrae.transform(test_dataset) for i in range(20):