Beispiel #1
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)

    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    else:
        trainer.epochs = 0
        trainer.iterations = 0
        torch.save(predictor,
                   os.path.join(checkpoints_path, "pytorch_model.bin"))
    # else:
    #     print("***** Saving fine-tuned model *****")
    #     output_model_file = os.path.join(params['results_path'], "pytorch_model.bin")
    #     if params['cuda']:
    #         torch.save(predictor, output_model_file)
    #     else:
    #         torch.save(predictor, output_model_file)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path, 'samples'),
                        params['n_samples'], params['sample_length'],
                        params['sample_rate'], params['sampling_temperature']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Beispiel #2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Beispiel #3
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    # Model
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm'],
        batch_size=params['batch_size']
    )
    print("CUDA num: {}".format(torch.cuda.device_count()))
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
    # test_split = 1 - params['test_frac']
    # val_split = test_split - params['val_frac']

    trainer = Trainer(
        predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
        # data_loader(0, val_split, eval=False),
        data_loader(0, 1, eval=False),
        cuda=params['cuda']
    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        (state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
        model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        # data_loader(val_split, test_split, eval=True),
        # data_loader_test(0, 1, eval=True)
        data_loader_test(0, params['val_frac'], eval=True),
        data_loader_test(params['val_frac'], 1, eval=True)
        # data_loader(test_split, 1, eval=True)
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))
    trainer.register_plugin(GeneratorPlugin(
        os.path.join(results_path, 'samples'), params['n_samples'],
        params['sample_length'], params['sample_rate']
    ))
    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )
    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))

    trainer.run(params['epoch_limit'])
Beispiel #4
0
def main(exp, dataset, **params):
    params = dict(default_params, exp=exp, dataset=dataset, **params)
    print(params)
    storage_client = None
    bucket = None

    path = os.path.join(params['datasets_path'], params['dataset'])

    if params['bucket']:
        storage_client = storage.Client()
        bucket = Bucket(storage_client, params['bucket'])
        preload_dataset(path, storage_client, bucket)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    (quantize, dequantize) = quantizer(params['q_method'])
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model, dequantize)
    if params['cuda'] is not False:
        print(params['cuda'])
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['learning_rate']))

    data_loader = make_data_loader(path, model.lookback, quantize, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, storage_client,
                                           bucket)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(SchedulerPlugin(params['lr_scheduler_step']))

    def upload(file_path):
        if bucket is None:
            return

        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        try:
            blob.upload_from_filename(file_path, timeout=300)
        except Exception as e:
            print(str(e))

    trainer.register_plugin(AbsoluteTimeMonitor())

    samples_path = os.path.join(results_path, 'samples')
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints'], upload))
    trainer.register_plugin(
        GeneratorPlugin(samples_path,
                        params['n_samples'],
                        params['sample_length'],
                        params['sample_rate'],
                        params['q_levels'],
                        dequantize,
                        params['sampling_temperature'],
                        upload=upload))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(
            results_path,
            iteration_fields=[
                'training_loss',
                #('training_loss', 'running_avg'),
                'time'
            ],
            epoch_fields=[
                'training_loss', ('training_loss', 'running_avg'),
                'validation_loss', 'test_loss', 'time'
            ],
            plots={
                'loss': {
                    'x':
                    'iteration',
                    'ys': [
                        'training_loss',
                        # ('training_loss', 'running_avg'),
                        'validation_loss',
                        'test_loss'
                    ],
                    'log_y':
                    True
                }
            }))

    init_comet(params, trainer, samples_path, params['n_samples'],
               params['sample_rate'])

    trainer.run(params['epoch_limit'])
def main(exp, frame_sizes, dataset, **params):
    scheduler = True
    use_cuda = torch.cuda.is_available()
    print('Start Sample-RNN')
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )
    seed = params.get('seed')
    init_random_seed(seed, use_cuda)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    spk_dim = len([i for i in os.listdir(os.path.join(params['datasets_path'], params['dataset']))
                   if os.path.islink(os.path.join(params['datasets_path'], params['dataset']) + '/' + i)])

    print('Create model')
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        ulaw=params['ulaw'],
        weight_norm=params['weight_norm'],
        cond_dim=params['cond_dim']*(1+params['look_ahead']),
        spk_dim=spk_dim,
        qrnn=params['qrnn']
    )
    if use_cuda:
        model = model.cuda()
        predictor = Predictor(model).cuda()
    else:
        predictor = Predictor(model)

    print('Done!')
    f_name = params['model']
    if f_name is not None:
        print('pre train with', f_name)
        model_data = load_model(f_name)
        if model_data is None:
            sys.exit('ERROR: Model not found in' + str(f_name))
        (state_dict, epoch_index, iteration) = model_data
        print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
        print(state_dict)
        predictor.load_state_dict(state_dict)
    print('predictor', predictor)
    for name, param in predictor.named_parameters():
        print(name, param.size())

    optimizer = torch.optim.Adam(predictor.parameters(), lr=params['learning_rate'])
    if params['scheduler']:
        scheduler = MultiStepLR(optimizer, milestones=[15, 35], gamma=0.1)
    optimizer = gradient_clipping(optimizer)
    print('Saving results in path', results_path)
    print('Read data')
    data_loader = make_data_loader(model.lookback, params)
    print('Done!')
    data_model = data_loader('train')

    show_dataset = False
    if show_dataset:
        for i, full in enumerate(data_model):
            print('Data Loader---------------------------------------')
            print('batch', i)
            (data, reset, target, cond) = full           
            print('Data', data.size())
            print('Target', target.size())

    if not params['scheduler']:    
        scheduler = None
    if use_cuda:
        cuda = True
    else:
        cuda = False
    writer = SummaryWriter(log_dir='sample_board')
    trainer = Trainer(
        predictor, sequence_nll_loss_bits, optimizer,  data_model, cuda, writer, scheduler

    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        data_loader('validation'),
        data_loader('test'),
        writer
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))

    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )

    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))
    
    trainer.run(params['epoch_limit'])