Exemplo n.º 1
0
def main(exp, frame_sizes, generate_from, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  generate_from=generate_from,
                  **params)
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        nb_classes=params['nb_classes'],
        weight_norm=params['weight_norm'],
    )
    #    model = SampleRNN([16, 4], 2, 1024, True, 256, True)
    print('Loading saved model' + params['generate_from'])
    checkpoint = torch.load(params['generate_from'])
    temporary_dict = {}
    for k, v in checkpoint.items():
        temporary_dict[k[6:]] = v
    checkpoint = temporary_dict
    model.load_state_dict(checkpoint)
    if not os.path.exists(params['generate_to']):
        os.mkdir(params['generate_to'])
    print(params['cond'])
    generator = GeneratorPlugin(params['generate_to'], params['n_samples'],
                                params['sample_length'], params['sample_rate'],
                                params['nb_classes'], params['cond'])
    generator.register_generate(model.cuda(), params['cuda'])
    generator.epoch(exp)
Exemplo n.º 2
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)

    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
    else:
        trainer.epochs = 0
        trainer.iterations = 0
        torch.save(predictor,
                   os.path.join(checkpoints_path, "pytorch_model.bin"))
    # else:
    #     print("***** Saving fine-tuned model *****")
    #     output_model_file = os.path.join(params['results_path'], "pytorch_model.bin")
    #     if params['cuda']:
    #         torch.save(predictor, output_model_file)
    #     else:
    #         torch.save(predictor, output_model_file)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path, 'samples'),
                        params['n_samples'], params['sample_length'],
                        params['sample_rate'], params['sampling_temperature']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Exemplo n.º 3
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(default_params,
                  exp=exp,
                  frame_sizes=frame_sizes,
                  dataset=dataset,
                  **params)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints']))
    trainer.register_plugin(
        GeneratorPlugin(os.path.join(results_path,
                                     'samples'), params['n_samples'],
                        params['sample_length'], params['sample_rate']))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(results_path,
                    iteration_fields=[
                        'training_loss', ('training_loss', 'running_avg'),
                        'time'
                    ],
                    epoch_fields=['validation_loss', 'test_loss', 'time'],
                    plots={
                        'loss': {
                            'x':
                            'iteration',
                            'ys': [
                                'training_loss',
                                ('training_loss', 'running_avg'),
                                'validation_loss',
                                'test_loss',
                            ],
                            'log_y':
                            True
                        }
                    }))

    init_comet(params, trainer)

    trainer.run(params['epoch_limit'])
Exemplo n.º 4
0
def main(exp, dataset, **params):
    params = dict(default_params, exp=exp, dataset=dataset, **params)
    print(params)
    storage_client = None
    bucket = None

    path = os.path.join(params['datasets_path'], params['dataset'])

    if params['bucket']:
        storage_client = storage.Client()
        bucket = Bucket(storage_client, params['bucket'])
        preload_dataset(path, storage_client, bucket)

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    (quantize, dequantize) = quantizer(params['q_method'])
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    predictor = Predictor(model, dequantize)
    if params['cuda'] is not False:
        print(params['cuda'])
        model = model.cuda()
        predictor = predictor.cuda()

    optimizer = gradient_clipping(
        torch.optim.Adam(predictor.parameters(), lr=params['learning_rate']))

    data_loader = make_data_loader(path, model.lookback, quantize, params)
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    trainer = Trainer(predictor,
                      sequence_nll_loss_bits,
                      optimizer,
                      data_loader(0, val_split, eval=False),
                      cuda=params['cuda'])

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path, storage_client,
                                           bucket)
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)

    trainer.register_plugin(
        TrainingLossMonitor(smoothing=params['loss_smoothing']))
    trainer.register_plugin(
        ValidationPlugin(data_loader(val_split, test_split, eval=True),
                         data_loader(test_split, 1, eval=True)))
    trainer.register_plugin(SchedulerPlugin(params['lr_scheduler_step']))

    def upload(file_path):
        if bucket is None:
            return

        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        try:
            blob.upload_from_filename(file_path, timeout=300)
        except Exception as e:
            print(str(e))

    trainer.register_plugin(AbsoluteTimeMonitor())

    samples_path = os.path.join(results_path, 'samples')
    trainer.register_plugin(
        SaverPlugin(checkpoints_path, params['keep_old_checkpoints'], upload))
    trainer.register_plugin(
        GeneratorPlugin(samples_path,
                        params['n_samples'],
                        params['sample_length'],
                        params['sample_rate'],
                        params['q_levels'],
                        dequantize,
                        params['sampling_temperature'],
                        upload=upload))
    trainer.register_plugin(
        Logger(['training_loss', 'validation_loss', 'test_loss', 'time']))
    trainer.register_plugin(
        StatsPlugin(
            results_path,
            iteration_fields=[
                'training_loss',
                #('training_loss', 'running_avg'),
                'time'
            ],
            epoch_fields=[
                'training_loss', ('training_loss', 'running_avg'),
                'validation_loss', 'test_loss', 'time'
            ],
            plots={
                'loss': {
                    'x':
                    'iteration',
                    'ys': [
                        'training_loss',
                        # ('training_loss', 'running_avg'),
                        'validation_loss',
                        'test_loss'
                    ],
                    'log_y':
                    True
                }
            }))

    init_comet(params, trainer, samples_path, params['n_samples'],
               params['sample_rate'])

    trainer.run(params['epoch_limit'])
Exemplo n.º 5
0
def main(checkpoint, **args):
    task_id = setup_logging(
        'gen', logging.NOTSET if args.get('debug', False) else logging.INFO)

    params = dict(
        {
            'n_rnn': 3,
            'dim': 1024,
            'learn_h0': False,
            'q_levels': 256,
            'weight_norm': True,
            'frame_sizes': [16, 16, 4],
            'sample_rate': 16000,
            'n_samples': 1,
            'sample_length': 16000 * 60 * 4,
            'sampling_temperature': 1,
            'q_method': QMethod.LINEAR,
        },
        exp=checkpoint,
        **args)
    logging.info(str(params))
    logging.info('booting')

    # dataset = storage_client.list_blobs(bucket, prefix=path)
    # for blob in dataset:
    #   blob.download_to_filename(blob.name)
    bucket = None

    if args['bucket']:
        logging.debug('setup google storage bucket {}'.format(args['bucket']))
        storage_client = storage.Client()
        bucket = Bucket(storage_client, args['bucket'])

        preload_checkpoint(checkpoint, storage_client, bucket)

    results_path = os.path.abspath(
        os.path.join(checkpoint, os.pardir, os.pardir, task_id))
    ensure_dir_exists(results_path)

    checkpoint = os.path.abspath(checkpoint)

    tmp_pretrained_state = torch.load(
        checkpoint,
        map_location=lambda storage, loc: storage.cuda(0)
        if args['cuda'] else storage)

    # Load all tensors onto GPU 1
    # torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1))

    pretrained_state = OrderedDict()

    for k, v in tmp_pretrained_state.items():
        # Delete "model." from key names since loading the checkpoint automatically attaches it
        layer_name = k.replace("model.", "")
        pretrained_state[layer_name] = v
        # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v)))

    # Create model with same parameters as used in training
    model = SampleRNN(frame_sizes=params['frame_sizes'],
                      n_rnn=params['n_rnn'],
                      dim=params['dim'],
                      learn_h0=params['learn_h0'],
                      q_levels=params['q_levels'],
                      weight_norm=params['weight_norm'])
    if params['cuda']:
        model = model.cuda()

    # Load pretrained model
    model.load_state_dict(pretrained_state)

    def upload(file_path):
        if bucket is None:
            return

        # remove prefix /app
        name = file_path.replace(os.path.abspath(os.curdir) + '/', '')
        blob = Blob(name, bucket)
        logging.info('uploading {}'.format(name))
        blob.upload_from_filename(file_path)

    (_, dequantize) = quantizer(params['q_method'])
    gen = Gen(Runner(model), params['cuda'])
    gen.register_plugin(
        GeneratorPlugin(results_path, params['n_samples'],
                        params['sample_length'], params['sample_rate'],
                        params['q_levels'], dequantize,
                        params['sampling_temperature'], upload))

    gen.run()
Exemplo n.º 6
0
# Load pretrained model
model.load_state_dict(new_pretrained_state)

# Generate Plugin
num_samples = 1  # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
sampling_temperature = params['sampling_temperature']

# Override from our options
sample_length = sample_rate * int(options.length)

print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate))
print("Generating %d seconds of audio" % (sample_length / sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate, sampling_temperature)

# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model.cuda(), params['cuda'])

# Generate new audio
# $$$ check if we already have generated audio and increment the file name
generator.epoch(OUTPUT_NAME)
GENERATED_FILEPATH = GENERATED_PATH + "ep" + OUTPUT_NAME + "-s1.wav"
print("Saved audio to %s " % GENERATED_FILEPATH)

if options.output:
    print("Moving to %s" % options.output)
    os.rename(GENERATED_FILEPATH, options.output)

                  learn_h0=params['learn_h0'],
                  q_levels=params['q_levels'],
                  weight_norm=params['weight_norm'])

# Delete "model." from key names since loading the checkpoint automatically attaches it to the key names
pretrained_state = torch.load(PRETRAINED_PATH)
new_pretrained_state = OrderedDict()

for k, v in pretrained_state.items():
    layer_name = k.replace("model.", "")
    new_pretrained_state[layer_name] = v
    # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v)))

# Load pretrained model
model.load_state_dict(new_pretrained_state)

# Generate Plugin
num_samples = 2  # params['n_samples']
sample_length = params['sample_length']
sample_rate = params['sample_rate']
print("Number samples: {}, sample_length: {}, sample_rate: {}".format(
    num_samples, sample_length, sample_rate))
generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length,
                            sample_rate)

# Call new register function to accept the trained model and the cuda setting
generator.register_generate(model.cuda(), params['cuda'])

# Generate new audio
generator.epoch('Test2')
    # Gets initial samples form 1 test sample and check if it re-generates it
    audio_filename = dataset_filenames[0]
    from librosa.core import load
    sr = params['sample_rate']
    seq, sr = load(audio_filename, sr=sr, mono=True)
    print("Sample rate: {}".format(sr))

    # Generate Plugin
    num_samples = 6  # params['n_samples']

    initial_seq_size = 64 * 100  # has to be multiple of rnn.n_frame_samples ???
    initial_seq = None
    if initial_seq_size > 1:
        init = utils.linear_quantize(torch.from_numpy(seq[0:initial_seq_size]), params['q_levels'])
        # init = seq[0:initial_seed_size]
        init = np.tile(init, (num_samples, 1))
        initial_seq = torch.LongTensor(init)
        # initial_seed = utils.linear_quantize(initial_seed, params['q_levels'])

    sample_length = params['sample_length']
    sample_rate = params['sample_rate']
    print("Number samples: {}, sample_length: {}, sample_rate: {}".format(num_samples, sample_length, sample_rate))
    generator = GeneratorPlugin(GENERATED_PATH, num_samples, sample_length, sample_rate)

    # Overloads register function to accept the trained model and the cuda setting
    generator.register_generate(model.cuda(), params['cuda'])

    # Generate new audio
    generator.epoch('Test19_{}'.format(initial_seq_size), initial_seed=initial_seq)
Exemplo n.º 9
0
def main(exp, frame_sizes, dataset, **params):
    params = dict(
        default_params,
        exp=exp, frame_sizes=frame_sizes, dataset=dataset,
        **params
    )

    results_path = setup_results_dir(params)
    tee_stdout(os.path.join(results_path, 'log'))

    # Save samplernn parameters in .json for future audio generation
    import json
    with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
        json.dump(params, fp, sort_keys=True, indent=4)

    # Model
    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm'],
        batch_size=params['batch_size']
    )
    print("CUDA num: {}".format(torch.cuda.device_count()))
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))

    data_loader = make_data_loader(model.lookback, params)
    data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
    # test_split = 1 - params['test_frac']
    # val_split = test_split - params['val_frac']

    trainer = Trainer(
        predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
        # data_loader(0, val_split, eval=False),
        data_loader(0, 1, eval=False),
        cuda=params['cuda']
    )

    checkpoints_path = os.path.join(results_path, 'checkpoints')
    checkpoint_data = load_last_checkpoint(checkpoints_path)
    checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
    if checkpoint_data is not None:
        (state_dict, epoch, iteration) = checkpoint_data
        (state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
        trainer.epochs = epoch
        trainer.iterations = iteration
        predictor.load_state_dict(state_dict)
        model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)

    trainer.register_plugin(TrainingLossMonitor(
        smoothing=params['loss_smoothing']
    ))
    trainer.register_plugin(ValidationPlugin(
        # data_loader(val_split, test_split, eval=True),
        # data_loader_test(0, 1, eval=True)
        data_loader_test(0, params['val_frac'], eval=True),
        data_loader_test(params['val_frac'], 1, eval=True)
        # data_loader(test_split, 1, eval=True)
    ))
    trainer.register_plugin(AbsoluteTimeMonitor())
    trainer.register_plugin(SaverPlugin(
        checkpoints_path, params['keep_old_checkpoints']
    ))
    trainer.register_plugin(GeneratorPlugin(
        os.path.join(results_path, 'samples'), params['n_samples'],
        params['sample_length'], params['sample_rate']
    ))
    trainer.register_plugin(
        Logger([
            'training_loss',
            'validation_loss',
            'test_loss',
            'time'
        ])
    )
    trainer.register_plugin(StatsPlugin(
        results_path,
        iteration_fields=[
            'training_loss',
            ('training_loss', 'running_avg'),
            'time'
        ],
        epoch_fields=[
            'validation_loss',
            'test_loss',
            'time'
        ],
        plots={
            'loss': {
                'x': 'iteration',
                'ys': [
                    'training_loss',
                    ('training_loss', 'running_avg'),
                    'validation_loss',
                    'test_loss',
                ],
                'log_y': True
            }
        }
    ))

    trainer.run(params['epoch_limit'])