Esempio n. 1
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    # if params['cuda']:
    #     model = model.cuda()
    #     predictor = predictor.cuda()
    #
    # # Adam lr = 0.001, betas=(0.9, 0.999) weight_decay=0

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']
    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, load_best=False)

    data_loader = make_data_loader(model.lookback, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        predictor.load_state_dict(state_dict)
        generator = Generator(predictor.model, cuda=True)
    else:
        raise FileNotFoundError('There is no valid checkpoint.')
    save_path = '/mnt/IDMT-WORKSPACE/DATA-STORE/xiaoyg/samplernn/generated_audio'
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    for i in range(0, 10):
        y = generator(
            1000,
            160000,
            i,
            data_seed=data_loader(val_split, test_split,
                                  eval=True).dataset).float().numpy()
        for j in range(len(y)):
            path = os.path.join(save_path,
                                '{}_{}.wav'.format(class_label[i], j))
            wav = y[j, :].astype(np.int16)
            wav.resize(wav.size, 1)
            write_wav(path, wav, 16000, 'PCM_16')
Esempio n. 2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)

    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    samples_path = os.path.join(results_path, 'samples')
    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      checkpoints_path,
                      samples_path,
                      params['n_samples'],
                      params['sample_length'],
                      params['sample_rate'],
                      data_loader(val_split, test_split, eval=True),
                      data_loader(test_split, 1, eval=True),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    trainer.run(params['epoch_limit'])
Esempio n. 3
0
def main():
    model = SampleRNN(
        frame_sizes=[16, 4], n_rnn=1, dim=1024, learn_h0=True, q_levels=256
    )
    predictor = Predictor(model).cuda()
    predictor.load_state_dict(torch.load('model.tar'))

    generator = Generator(predictor.model, cuda=True)

    t = time()
    samples = generator(5, 16000)
    print('generated in {}s'.format(time() - t))

    write_wav(
        'sample.wav',
        samples.cpu().float().numpy()[0, :],
        sr=16000,
        norm=True
    )
Esempio n. 4
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)

    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    else:
        trainer.epochs = 0
        trainer.iterations = 0
        torch.save(predictor,
                   os.path.join(checkpoints_path, "pytorch_model.bin"))
    # else:
    #     print("***** Saving fine-tuned model *****")
    #     output_model_file = os.path.join(params['results_path'], "pytorch_model.bin")
    #     if params['cuda']:
    #         torch.save(predictor, output_model_file)
    #     else:
    #         torch.save(predictor, output_model_file)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path, 'samples'),
                        params['n_samples'], params['sample_length'],
                        params['sample_rate'], params['sampling_temperature']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Esempio n. 5
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    os.environ['CUDA_VISIBLE_DEVICES'] = params['gpu']

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'],
                      dropout=params['dropout'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['lr']))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    criterion = sequence_nll_loss_bits

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, params)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        start_epoch = int(epoch)
        global_step = iteration
        start_epoch = iteration
        predictor.load_state_dict(state_dict)
    else:
        start_epoch = 0
        global_step = 0

    #writer = SummaryWriter("runs/{}-{}".format(params['dataset'], str(datetime.datetime.now()).split('.')[0].replace(' ', '-')))
    writer = SummaryWriter(
        os.path.join(
            results_path, "{}-{}".format(
                params['dataset'],
                str(datetime.datetime.now()).split('.')[0].replace(' ', '-'))))
    dataset_train = data_loader(0, val_split, eval=False)
    dataset_val = data_loader(val_split, test_split, eval=True)
    dataset_test = data_loader(test_split, 1, eval=True)

    generator = Generator(predictor.model, params['cuda'])
    best_val_loss = 10000000000000

    for e in range(start_epoch, int(params['epoch_limit'])):
        for i, data in enumerate(dataset_train):

            batch_inputs = data[:-1]
            batch_target = data[-1]

            def wrap(input):
                if torch.is_tensor(input):
                    input = torch.autograd.Variable(input)
                    if params['cuda']:
                        input = input.cuda()
                return input

            batch_inputs = list(map(wrap, batch_inputs))

            batch_target = torch.autograd.Variable(batch_target)
            if params['cuda']:
                batch_target = batch_target.cuda()

            plugin_data = [None, None]

            def closure():
                batch_output = predictor(*batch_inputs)

                loss = criterion(batch_output, batch_target)
                loss.backward()

                if plugin_data[0] is None:
                    plugin_data[0] = batch_output.data
                    plugin_data[1] = loss.data

                return loss

            optimizer.zero_grad()
            optimizer.step(closure)
            train_loss = plugin_data[1]

            # stats: iteration
            writer.add_scalar('train/train loss', train_loss, global_step)
            print("E:{:03d}-S{:05d}: Loss={}".format(e, i, train_loss))
            global_step += 1

        # validation: per epoch
        predictor.eval()
        with torch.no_grad():
            loss_sum = 0
            n_examples = 0
            for data in dataset_val:
                batch_inputs = data[:-1]
                batch_target = data[-1]
                batch_size = batch_target.size()[0]

                def wrap(input):
                    if torch.is_tensor(input):
                        input = torch.autograd.Variable(input)
                        if params['cuda']:
                            input = input.cuda()
                    return input

                batch_inputs = list(map(wrap, batch_inputs))

                batch_target = torch.autograd.Variable(batch_target)
                if params['cuda']:
                    batch_target = batch_target.cuda()

                batch_output = predictor(*batch_inputs)

                loss_sum += criterion(batch_output,
                                      batch_target).item() * batch_size

                n_examples += batch_size

            val_loss = loss_sum / n_examples
            writer.add_scalar('validation/validation loss', val_loss,
                              global_step)
            print("== Validation Step E:{:03d}: Loss={} ==".format(
                e, val_loss))

        predictor.train()

        # saver: epoch
        last_pattern = 'ep{}-it{}'
        best_pattern = 'best-ep{}-it{}'
        if not params['keep_old_checkpoints']:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
        torch.save(
            predictor.state_dict(),
            os.path.join(checkpoints_path, last_pattern.format(e,
                                                               global_step)))

        cur_val_loss = val_loss
        if cur_val_loss < best_val_loss:
            pattern = os.path.join(checkpoints_path,
                                   last_pattern.format('*', '*'))
            for file_name in glob(pattern):
                os.remove(file_name)
            torch.save(
                predictor.state_dict(),
                os.path.join(checkpoints_path,
                             best_pattern.format(e, global_step)))
            best_val_loss = cur_val_loss

        generate_sample(generator, params, writer, global_step, results_path,
                        e)

    # generate final results
    generate_sample(generator, params, None, global_step, results_path, 0)
Esempio n. 6
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Esempio n. 7
0
def main(exp, dataset, **params):
    params = dict(default_params, exp=exp, dataset=dataset, **params)
    print(params)
    storage_client = None
    bucket = None

    path = os.path.join(params['datasets_path'], params['dataset'])

    if params['bucket']:
        storage_client = storage.Client()
        bucket = Bucket(storage_client, params['bucket'])
        preload_dataset(path, storage_client, bucket)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    (quantize, dequantize) = quantizer(params['q_method'])
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model, dequantize)
    if params['cuda'] is not False:
        print(params['cuda'])
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['learning_rate']))

    data_loader = make_data_loader(path, model.lookback, quantize, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, storage_client,
                                           bucket)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(SchedulerPlugin(params['lr_scheduler_step']))

    def upload(file_path):
        if bucket is None:
            return

        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        try:
            blob.upload_from_filename(file_path, timeout=300)
        except Exception as e:
            print(str(e))

    trainer.register_plugin(AbsoluteTimeMonitor())

    samples_path = os.path.join(results_path, 'samples')
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints'], upload))
    trainer.register_plugin(
        GeneratorPlugin(samples_path,
                        params['n_samples'],
                        params['sample_length'],
                        params['sample_rate'],
                        params['q_levels'],
                        dequantize,
                        params['sampling_temperature'],
                        upload=upload))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(
            results_path,
            iteration_fields=[
                'training_loss',
                #('training_loss', 'running_avg'),
                'time'
            ],
            epoch_fields=[
                'training_loss', ('training_loss', 'running_avg'),
                'validation_loss', 'test_loss', 'time'
            ],
            plots={
                'loss': {
                    'x':
                    'iteration',
                    'ys': [
                        'training_loss',
                        # ('training_loss', 'running_avg'),
                        'validation_loss',
                        'test_loss'
                    ],
                    'log_y':
                    True
                }
            }))

    init_comet(params, trainer, samples_path, params['n_samples'],
               params['sample_rate'])

    trainer.run(params['epoch_limit'])
Esempio n. 8
0
def main(frame_sizes, **params):

    use_cuda = torch.cuda.is_available()

    params = dict(default_params, frame_sizes=frame_sizes, **params)

    # Redefine parameters listed in the experiment directory and separated with '~'
    for i in params['model'].split('/')[1].split('~'):
        param = i.split(':')
        if param[0] in params:
            params[param[0]] = as_type(param[1], type(params[param[0]]))
    # Define npy file names with maximum and minimum values of de-normalized conditioners
    npy_name_min_max_cond = 'npy_datasets/min_max' + params['norm_ind'] * '_ind' + (not params['norm_ind']) * '_joint' \
                            + params['static_spk'] * '_static' + '.npy'

    # Define npy file name with array of unique speakers in dataset
    npy_name_spk_id = 'npy_datasets/spk_id.npy'

    # Get file names from partition's list
    file_names = open(
        str(params['datasets_path']) + 'generate_cond_gina.list',
        'r').read().splitlines()

    spk_names = open(
        str(params['datasets_path']) + 'generate_spk_gina.list',
        'r').read().splitlines()

    datasets_path = os.path.join(params['datasets_path'], params['cond_set'])

    spk = np.load(npy_name_spk_id)

    if len(spk_names) != len(file_names):
        print(
            'Length of speaker file do not match length of conditioner file.')
        quit()

    print('Generating', len(file_names), 'audio files')

    for i in range(len(file_names)):
        print('Generating Audio', i)
        print('Generating...', file_names[i])

        # Load CC conditioner
        c = np.loadtxt(datasets_path + file_names[i] + '.cc')

        # Load LF0 conditioner
        f0file = np.loadtxt(datasets_path + file_names[i] + '.lf0')
        f0, _ = interpolation(f0file, -10000000000)
        f0 = f0.reshape(f0.shape[0], 1)

        # Load FV conditioner
        fvfile = np.loadtxt(datasets_path + file_names[i] + '.gv')
        fv, uv = interpolation(fvfile, 1e3)
        num_fv = fv.shape[0]
        uv = uv.reshape(num_fv, 1)
        fv = fv.reshape(num_fv, 1)

        # Load speaker conditioner
        speaker = np.where(spk == spk_names[i])[0][0]

        cond = np.concatenate((c, f0), axis=1)
        cond = np.concatenate((cond, fv), axis=1)
        cond = np.concatenate((cond, uv), axis=1)

        # Load maximum and minimum of de-normalized conditioners
        min_cond = np.load(npy_name_min_max_cond)[0]
        max_cond = np.load(npy_name_min_max_cond)[1]

        # Normalize conditioners with absolute maximum and minimum for each speaker of training partition
        if params['norm_ind']:
            print(
                'Normalizing conditioners for each speaker of training dataset'
            )
            cond = (cond - min_cond[speaker]) / (max_cond[speaker] -
                                                 min_cond[speaker])
        else:
            print('Normalizing conditioners jointly')
            cond = (cond - min_cond) / (max_cond - min_cond)

        print('Shape cond', cond.shape)
        if params['look_ahead']:
            delayed = np.copy(cond)
            delayed[:-1, :] = delayed[1:, :]
            cond = np.concatenate((cond, delayed), axis=1)
            print('Shape cond after look ahead', cond.shape)

        print(cond.shape)
        seed = params.get('seed')
        init_random_seed(seed, use_cuda)

        spk_dim = len([
            i for i in os.listdir(
                os.path.join(params['datasets_path'], params['cond_set']))
            if os.path.islink(
                os.path.join(params['datasets_path'], params['cond_set']) +
                '/' + i)
        ])

        print('Start Generate SampleRNN')
        model = SampleRNN(frame_sizes=params['frame_sizes'],
                          n_rnn=params['n_rnn'],
                          dim=params['dim'],
                          learn_h0=params['learn_h0'],
                          q_levels=params['q_levels'],
                          ulaw=params['ulaw'],
                          weight_norm=params['weight_norm'],
                          cond_dim=params['cond_dim'] *
                          (1 + params['look_ahead']),
                          spk_dim=spk_dim,
                          qrnn=params['qrnn'])
        print(model)

        if use_cuda:
            model = model.cuda()
            predictor = Predictor(model).cuda()
        else:
            predictor = Predictor(model)

        f_name = params['model']
        model_data = load_model(f_name)

        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)

        original_name = file_names[i].split('/')[1]
        if original_name == "..":
            original_name = file_names[i].split('/')[3]

        generator = RunGenerator(model=model,
                                 sample_rate=params['sample_rate'],
                                 cuda=use_cuda,
                                 epoch=epoch_index,
                                 cond=cond,
                                 spk_list=spk,
                                 speaker=speaker,
                                 checkpoints_path=f_name,
                                 original_name=original_name)

        generator(params['n_samples'], params['sample_length'], cond, speaker)
Esempio n. 9
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    # Model
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm'],
        batch_size=params['batch_size']
    )
    print("CUDA num: {}".format(torch.cuda.device_count()))
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
    # test_split = 1 - params['test_frac']
    # val_split = test_split - params['val_frac']

    trainer = Trainer(
        predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
        # data_loader(0, val_split, eval=False),
        data_loader(0, 1, eval=False),
        cuda=params['cuda']
    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        (state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
        model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        # data_loader(val_split, test_split, eval=True),
        # data_loader_test(0, 1, eval=True)
        data_loader_test(0, params['val_frac'], eval=True),
        data_loader_test(params['val_frac'], 1, eval=True)
        # data_loader(test_split, 1, eval=True)
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))
    trainer.register_plugin(GeneratorPlugin(
        os.path.join(results_path, 'samples'), params['n_samples'],
        params['sample_length'], params['sample_rate']
    ))
    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )
    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))

    trainer.run(params['epoch_limit'])
Esempio n. 10
0
                    type=int,
                    default=8,
                    help='mini-batch size')
args = parser.parse_args()

if __name__ == '__main__':
    if not os.path.isdir(args.test_result_dir):
        os.makedirs(args.test_result_dir)

    # define the model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    pred_model = Predictor(args).to(device)
    pred_model = nn.DataParallel(pred_model)

    # load checkpoint
    pred_model.load_state_dict(torch.load(args.checkpoint_load_file))
    print('Checkpoint is loaded from ' + args.checkpoint_load_file)

    # prepare dataloader for selected dataset
    if args.dataset == 'movingmnist':
        test_dataset = MovingMNIST(args.test_data_dir,
                                   seq_len=args.short_len + args.out_len,
                                   train=False)
        testloader = DataLoader(test_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=args.workers,
                                drop_last=False)
    elif args.dataset == 'kth':
        test_dataset = KTH(args.test_data_dir,
                           seq_len=args.short_len + args.out_len,
Esempio n. 11
0
                        n_rnn=2,
                        dim=1024,
                        learn_h0=True,
                        q_levels=256,
                        weight_norm=True)

if torch.cuda.is_available():
    srnn_model1 = srnn_model1.cuda()

predictor1 = Predictor(srnn_model1)

if torch.cuda.is_available():
    predictor1 = predictor1.cuda()

if torch.cuda.is_available():
    predictor1.load_state_dict(torch.load(modelpath)['model'])
else:
    predictor1.load_state_dict(
        torch.load(modelpath, map_location='cpu')['model'])

print("model loaded successfully!")

generate = Generator(srnn_model1, True)

import time
s_time = time.time()

sys.stderr.write("Generating {} sequences, each of length {}."\
                .format(n_samples, sample_length))
samples = generate(n_samples, sample_length).cpu().float().numpy()
def main(exp, frame_sizes, dataset, **params):
    scheduler = True
    use_cuda = torch.cuda.is_available()
    print('Start Sample-RNN')
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )
    seed = params.get('seed')
    init_random_seed(seed, use_cuda)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    spk_dim = len([i for i in os.listdir(os.path.join(params['datasets_path'], params['dataset']))
                   if os.path.islink(os.path.join(params['datasets_path'], params['dataset']) + '/' + i)])

    print('Create model')
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        ulaw=params['ulaw'],
        weight_norm=params['weight_norm'],
        cond_dim=params['cond_dim']*(1+params['look_ahead']),
        spk_dim=spk_dim,
        qrnn=params['qrnn']
    )
    if use_cuda:
        model = model.cuda()
        predictor = Predictor(model).cuda()
    else:
        predictor = Predictor(model)

    print('Done!')
    f_name = params['model']
    if f_name is not None:
        print('pre train with', f_name)
        model_data = load_model(f_name)
        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)
    print('predictor', predictor)
    for name, param in predictor.named_parameters():
        print(name, param.size())

    optimizer = torch.optim.Adam(predictor.parameters(), lr=params['learning_rate'])
    if params['scheduler']:
        scheduler = MultiStepLR(optimizer, milestones=[15, 35], gamma=0.1)
    optimizer = gradient_clipping(optimizer)
    print('Saving results in path', results_path)
    print('Read data')
    data_loader = make_data_loader(model.lookback, params)
    print('Done!')
    data_model = data_loader('train')

    show_dataset = False
    if show_dataset:
        for i, full in enumerate(data_model):
            print('Data Loader---------------------------------------')
            print('batch', i)
            (data, reset, target, cond) = full           
            print('Data', data.size())
            print('Target', target.size())

    if not params['scheduler']:    
        scheduler = None
    if use_cuda:
        cuda = True
    else:
        cuda = False
    writer = SummaryWriter(log_dir='sample_board')
    trainer = Trainer(
        predictor, sequence_nll_loss_bits, optimizer,  data_model, cuda, writer, scheduler

    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        data_loader('validation'),
        data_loader('test'),
        writer
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))

    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )

    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))
    
    trainer.run(params['epoch_limit'])