Beispiel #1
0
def tune(n_times=25):
    for i in range(1, n_times):
        print("Running tuning iteration number {}".format(i))
        hyperparameter_setting = hyperparameters(batch_size=(10, 100),
                                                 num_units=(16, 150),
                                                 embedding_size=(40, 100),
                                                 learning_rate=(.00001, 0.1))
        hyperparameter_setting.batch_size = sample_randomly(
            hyperparameter_setting.batch_size,
            use_log_scale=False,
            sample_int=True,
            from_list=False)
        hyperparameter_setting.num_units = sample_randomly(
            hyperparameter_setting.num_units,
            use_log_scale=False,
            sample_int=True,
            from_list=False)
        hyperparameter_setting.embedding_size = sample_randomly(
            hyperparameter_setting.embedding_size,
            use_log_scale=False,
            sample_int=True,
            from_list=False)
        hyperparameter_setting.learning_rate = sample_randomly(
            hyperparameter_setting.learning_rate,
            use_log_scale=True,
            sample_int=False,
            from_list=False)
        print("The configuration is ", hyperparameter_setting.__dict__)
        train(resume_training=False,
              total_epochs=8,
              test_after_n_iter=10,
              disp_loss_after_n_iter=10,
              merge_summary_after_n_iter=10,
              hyperparameters=hyperparameter_setting)
Beispiel #2
0
from os.path import join, expanduser

import tensorboard_logger
from tensorboard_logger import *
from hyperparameters import hyperparameters

vox_dir ='vox'

global_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
    cudnn.benchmark = False
use_multigpu = None

hparams = hyperparameters()
print(hparams)
fs = hparams.sample_rate

use_assistant = 1



def train(model, train_loader, val_loader, optimizer,
          init_lr=0.002,
          checkpoint_dir=None, checkpoint_interval=None, nepochs=None,
          clip_thresh=1.0, assistant=None):
    model.train()
    if use_cuda:
        model = model.cuda()
    linear_dim = model.linear_dim