コード例 #1
0
    def __init__(self, config: WaveNetConfig):
        super().__init__(config)
        self.models = nn.ModuleList()

        for i in range(3):
            model = WaveNet(config.input_channels,
                            config.residual_channels,
                            config.out_channels,
                            config.stacks,
                            config.layers_per_stack,
                            config.total_samples,
                            config.l2_lambda,
                            config.bias,
                            config.dropout,
                            config.bayesian,
                            backwards=False)
            self.models.append(model)

        for i in range(3):
            model = WaveNet(config.input_channels,
                            config.residual_channels,
                            config.out_channels,
                            config.stacks,
                            config.layers_per_stack,
                            config.total_samples,
                            config.l2_lambda,
                            config.bias,
                            config.dropout,
                            config.bayesian,
                            backwards=True)
            self.models.append(model)

        self.init_weights()
コード例 #2
0
ファイル: trainer.py プロジェクト: whl33886/WaveNet-gluon
 def build_model(self):
     """
     Description : module for building network
     """
     self.net = WaveNet(mu=self.mu, n_residue=self.n_residue, n_skip=self.n_skip,\
      dilation_depth=self.dilation_depth, n_repeat=self.n_repeat)
     #parameter initialization
     self.net.collect_params().initialize(ctx=self.ctx)
     #set optimizer
     self.trainer = gluon.Trainer(self.net.collect_params(), optimizer='adam',\
     optimizer_params={'learning_rate':0.01})
     self.loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
コード例 #3
0
ファイル: demo_train.py プロジェクト: bongsang/fast-forecast
from time import time
from utils import make_batch
from models import WaveNet, Generator
from IPython.display import Audio

inputs, targets = make_batch('./voice.wav')
num_time_samples = inputs.shape[1]
num_channels = 1
gpu_fraction = 1

model = WaveNet(num_time_samples=num_time_samples,
                num_channels=num_channels,
                gpu_fraction=gpu_fraction)

Audio(inputs.reshape(inputs.shape[1]), rate=44100)

tic = time()
model.train(inputs, targets)
toc = time()
print('Training time = {} seconds'.format(toc - tic))

# generator = Generator(model)

# input_ = inputs[:, 0:1, 0]

# tic = time()
# predictions = generator.run(input_, 32000)
# toc = time()

# print('Generating time = {} seconds'.format(toc-tic))
コード例 #4
0
ファイル: run_wavenet.py プロジェクト: leetemil/thesis_code
    train_loader = get_variable_length_protein_dataLoader(
        train_data,
        batch_size=args.batch_size,
        shuffle=True,
        use_weights=args.use_weights)
    val_loader = get_variable_length_protein_dataLoader(
        val_data, batch_size=args.batch_size, use_weights=args.use_weights)

    print("Data loaded!")

    model = WaveNet(input_channels=NUM_TOKENS,
                    residual_channels=args.residual_channels,
                    out_channels=NUM_TOKENS,
                    stacks=args.stacks,
                    layers_per_stack=args.layers,
                    total_samples=train_length,
                    l2_lambda=args.L2,
                    bias=args.bias,
                    dropout=args.dropout,
                    use_bayesian=args.bayesian,
                    backwards=args.backwards).to(device)

    print(model.summary())
    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate)  #, weight_decay = args.L2)

    if args.anneal_learning_rates:
        T_0 = 1  # Emil: I just picked a small number, no clue if any good
        T_mult = 2
        scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
    else:
コード例 #5
0
ファイル: wave_train.py プロジェクト: DuanYDYD/fyp
    Y_DAYS = 3
    NUM_WORKERS = 0
    LR = 0.0001  #5

    # model parameters
    layer_size = 2
    stack_size = 29
    in_channels = 6  # 6 features
    res_channels = 64

    # paths
    PATHS = crypto_data_paths()
    MODEL_PATH = 'weights/wave/crypto/3days/{l}_{s}_{r}_{y}_{seed}'.format(
        l=layer_size, s=stack_size, r=res_channels, y=Y_DAYS, seed=SEED)

    net = WaveNet(layer_size, stack_size, in_channels, res_channels, Y_DAYS,
                  N_LAGS)
    net.load_state_dict(torch.load(MODEL_PATH))

    # load the dataset
    X_train, y_train, X_test, y_test = create_input_data(PATHS, N_LAGS, Y_DAYS)
    train_dataset = StockDataset(X_train, y_train)
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE)
    test_dataset = StockDataset(X_test, y_test)
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE)

    train(net, N_EPOCHS, train_loader, LR, MODEL_PATH)
    eval(net, MODEL_PATH, test_loader)

    #The MSE is  0.00040897312399241256 28 32 se123 1
    #The MSE is  0.0007783590546027762 28 32 se111 1
    #The MSE is  0.0003596830506049608 se 333
コード例 #6
0
    val_len = len(validation_data)
    train_seqs_per_epoch = val_len * 9

    train_loader = get_variable_length_protein_dataLoader(train_data, batch_size = args.batch_size)
    val_loader = get_variable_length_protein_dataLoader(validation_data, batch_size = args.batch_size)
    print("Data loaded!")

    total_samples = 39_069_211 # magic number

    model = WaveNet(
        input_channels = NUM_TOKENS,
        residual_channels = args.residual_channels,
        out_channels = NUM_TOKENS,
        stacks = args.stacks,
        layers_per_stack = args.layers,
        total_samples = total_samples,
        l2_lambda = args.L2,
		bias = args.bias,
        dropout = args.dropout,
        use_bayesian = args.bayesian,
        backwards = args.backwards,
        multi_gpu = True
	)
    print(model.summary())

    if args.multi_gpu:
        model = nn.DataParallel(model)

    model.to(device)
    optimizer = optim.Adam(model.parameters(), lr = args.learning_rate)

    if args.anneal_learning_rates:
コード例 #7
0
def train(args):

    # Arugments & parameters
    dataset = args.dataset
    dataset_dir = args.dataset_dir
    workspace = args.workspace
    filename = args.filename
    batch_size = args.batch_size  # Use an audio clip as a mini-batch. Must
    # be 1 if audio clips has different length.
    condition = args.condition
    cuda = args.cuda

    quantize_bins = config.quantize_bins
    dilations = config.dilations

    # Paths
    models_dir = os.path.join(workspace, 'models',
                              'dataset={}'.format(dataset), filename,
                              'condition={}'.format(condition))
    create_folder(models_dir)

    # Data Generator
    Dataset = get_dataset(dataset)
    train_dataset = Dataset(dataset_dir, data_type='train')
    validate_dataset = Dataset(dataset_dir, data_type='validate')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=1,
                                               pin_memory=True)

    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  pin_memory=True)

    # Model
    model = WaveNet(
        dilations,
        residual_channels=config.residual_channels,
        dilation_channels=config.dilation_channels,
        skip_channels=config.skip_channels,
        quantize_bins=config.quantize_bins,
        global_condition_channels=config.global_condition_channels,
        global_condition_cardinality=Dataset.global_condition_cardinality,
        use_cuda=cuda)

    if cuda:
        model.cuda()

    # Optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=1e-3,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0.)

    train_bgn_time = time.time()
    iteration = 0

    while True:
        for (batch_x, global_condition) in train_loader:
            '''batch_x: (batch_size, seq_len)
            global_condition: (batch_size,)
            '''

            print('iteration: {}, input size: {}'.format(
                iteration, batch_x.shape))

            # Evaluate
            if iteration % 1000 == 0:
                train_fin_time = time.time()
                evaluate_bgn_time = time.time()
                loss = evaluate(model, validate_loader, condition, cuda)

                print('-----------------')
                logging.info(
                    'iteration: {}, loss: {:.3f}, train_time: {:.3f}, '
                    'validate time: {:.3f} s'.format(
                        iteration, loss, train_fin_time - train_bgn_time,
                        time.time() - evaluate_bgn_time))

                train_bgn_time = time.time()

            # Save model
            if iteration % 10000 == 0:
                save_out_dict = {
                    'iteration': iteration,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }

                save_out_path = os.path.join(
                    models_dir, 'md_{}_iters.tar'.format(iteration))

                torch.save(save_out_dict, save_out_path)
                logging.info('Save model to {}'.format(save_out_path))

            # Move data to GPU
            if condition:
                global_condition = move_data_to_gpu(global_condition, cuda)
            else:
                global_condition = None

            batch_x = move_data_to_gpu(batch_x, cuda)

            # Prepare input and target data
            batch_input = batch_x[:, 0:-1]
            output_width = batch_input.shape[-1] - model.receptive_field + 1
            batch_target = batch_x[:, -output_width:]

            # Forward
            model.train()
            batch_output = model(batch_input, global_condition)
            loss = _loss_func(batch_output, batch_target)

            # Backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print('loss: {:.3f}'.format(loss.data.cpu().numpy()))

            iteration += 1
コード例 #8
0
def generate(args):

    np.random.seed(1234)

    # Arugments & parameters
    dataset = args.dataset
    workspace = args.workspace
    iteration = args.iteration
    samples = args.samples
    batch_size = args.batch_size
    global_condition = args.global_condition
    fast_generate = args.fast_generate
    cuda = args.cuda
    filename = args.filename

    quantize_bins = config.quantize_bins
    dilations = config.dilations

    if global_condition == -1:
        condition = False
        global_condition = None
    else:
        condition = True
        global_condition = torch.LongTensor([global_condition] * batch_size)
        # global_condition = torch.LongTensor(np.arange(batch_size))
        if cuda:
            global_condition = global_condition.cuda()

    # Paths
    model_path = os.path.join(workspace, 'models',
                              'dataset={}'.format(dataset), filename,
                              'condition={}'.format(condition),
                              'md_{}_iters.tar'.format(iteration))

    generated_wavs_dir = os.path.join(workspace, 'generated_wavs',
                                      'dataset={}'.format(dataset), filename,
                                      'condition={}'.format(condition))

    create_folder(generated_wavs_dir)

    # Load model
    model = WaveNet(dilations,
                    residual_channels=config.residual_channels,
                    dilation_channels=config.dilation_channels,
                    skip_channels=config.skip_channels,
                    quantize_bins=config.quantize_bins,
                    global_condition_channels=config.global_condition_channels,
                    global_condition_cardinality=get_dataset(
                        dataset).global_condition_cardinality,
                    use_cuda=cuda)

    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    receptive_field = model.receptive_field

    # Init buffer for generation
    _mulaw = mu_law.MuLaw(mu=quantize_bins)
    _quantize = mu_law.Quantize(quantize=quantize_bins)

    buffer = np.zeros((batch_size, receptive_field))
    buffer[:, -1] = np.random.uniform(-1, 1, batch_size)
    buffer = _mulaw.transform(buffer)
    buffer = _quantize.transform(buffer)
    buffer = torch.LongTensor(buffer)

    if cuda:
        buffer = buffer.cuda()

    # Generate
    generate_time = time.time()

    if fast_generate:
        with torch.no_grad():
            model.eval()
            audios = model.fast_generate(buffer=buffer,
                                         samples=samples,
                                         global_condition=global_condition)

    else:
        with torch.no_grad():
            model.eval()
            audios = model.slow_generate(buffer=buffer,
                                         global_condition=global_condition,
                                         samples=samples)

    print('Generate_time: {:.3f} s'.format(time.time() - generate_time))

    audios = audios.data.cpu().numpy()

    # Transform to wave
    for n in range(batch_size):
        audio = audios[n]
        audio = _quantize.inverse_transform(audio)
        audio = _mulaw.inverse_transform(audio)
        audio_path = os.path.join(generated_wavs_dir,
                                  'iter_{}_{}.wav'.format(iteration, n))
        write_audio(audio_path, audio, config.sample_rate)
        print('Generate wav to {}'.format(audio_path))