Exemple #1
0
def predict(desired_sample_rate, fragment_length, _log, seed, _seed, _config,
            predict_seconds, data_dir, batch_size, fragment_stride,
            nb_output_bins, learn_all_outputs, run_dir,
            predict_use_softmax_as_input, use_ulaw, predict_initial_input,
            **kwargs):
    checkpoint_dir = os.path.join(run_dir, 'checkpoints')
    last_checkpoint = sorted(os.listdir(checkpoint_dir))[-1]
    epoch = int(re.match(r'checkpoint\.(\d+?)-.*', last_checkpoint).group(1))
    _log.info('Using checkpoint from epoch: %s' % epoch)

    sample_dir = os.path.join(run_dir, 'samples')
    if not os.path.exists(sample_dir):
        os.mkdir(sample_dir)

    sample_name = make_sample_name(epoch)
    sample_filename = os.path.join(sample_dir, sample_name)

    _log.info('Saving to "%s"' % sample_filename)

    sample_stream = make_sample_stream(desired_sample_rate, sample_filename)

    model = build_model()
    model.load_weights(os.path.join(checkpoint_dir, last_checkpoint))

    if predict_initial_input is not '':
        _log.info('Taking first %d (%.2fs) from \'%s\' as initial input.' %
                  (fragment_length, fragment_length / desired_sample_rate,
                   predict_initial_input))
        wav = dataset.process_wav(desired_sample_rate, predict_initial_input,
                                  use_ulaw)
        outputs = list(dataset.one_hot(wav[0:fragment_length]))
    else:
        _log.info('Taking sample from test dataset as initial input.' %
                  (fragment_length, predict_initial_input))
        data_generators, _ = dataset.generators(data_dir, desired_sample_rate,
                                                fragment_length, batch_size,
                                                fragment_stride,
                                                nb_output_bins,
                                                learn_all_outputs, use_ulaw)
        outputs = list(data_generators['test'].next()[0][-1])

    # write_samples(sample_stream, outputs)
    for i in tqdm(xrange(int(desired_sample_rate * predict_seconds))):
        prediction_seed = np.expand_dims(
            np.array(outputs[i:i + fragment_length]), 0)
        output = model.predict(prediction_seed)
        output_dist = output[0][-1]
        output_val = draw_sample(output_dist)
        if predict_use_softmax_as_input:
            outputs.append(output_dist)
        else:
            outputs.append(output_val)
        write_samples(sample_stream, [output_val])

    sample_stream.close()

    _log.info("Done!")
Exemple #2
0
def get_generators(batch_size, data_dir, desired_sample_rate, fragment_length, fragment_stride, learn_all_outputs,
                   nb_output_bins, use_ulaw, test_factor, data_dir_structure, randomize_batch_order, _rnd, random_train_batches):
    if data_dir_structure == 'flat':
        return dataset.generators(data_dir, desired_sample_rate, fragment_length, batch_size,
                                  fragment_stride, nb_output_bins, learn_all_outputs, use_ulaw, randomize_batch_order,
                                  _rnd, random_train_batches)

    elif data_dir_structure == 'vctk':
        return dataset.generators_vctk(data_dir, desired_sample_rate, fragment_length, batch_size,
                                       fragment_stride, nb_output_bins, learn_all_outputs, use_ulaw, test_factor,
                                       randomize_batch_order, _rnd, random_train_batches)
    else:
        raise ValueError('data_dir_structure must be "flat" or "vctk", is %s' % data_dir_structure)
Exemple #3
0
def get_generators(batch_size, data_dir, desired_sample_rate, fragment_length, fragment_stride, learn_all_outputs,
                   nb_output_bins, use_ulaw, test_factor, data_dir_structure, randomize_batch_order, _rnd,
                   random_train_batches):
    if data_dir_structure == 'flat':
        return dataset.generators(data_dir, desired_sample_rate, fragment_length, batch_size,
                                  fragment_stride, nb_output_bins, learn_all_outputs, use_ulaw, randomize_batch_order,
                                  _rnd, random_train_batches)

    elif data_dir_structure == 'vctk':
        return dataset.generators_vctk(data_dir, desired_sample_rate, fragment_length, batch_size,
                                       fragment_stride, nb_output_bins, learn_all_outputs, use_ulaw, test_factor,
                                       randomize_batch_order, _rnd, random_train_batches)
    else:
        raise ValueError('data_dir_structure must be "flat" or "vctk", is %s' % data_dir_structure)
Exemple #4
0
def test_preprocess(desired_sample_rate, fragment_length, _log, seed, _seed,
                    _config, predict_seconds, data_dir, batch_size,
                    fragment_stride, nb_output_bins, learn_all_outputs,
                    run_dir, predict_use_softmax_as_input, use_ulaw, **kwargs):
    sample_dir = os.path.join('preprocess_test')
    if not os.path.exists(sample_dir):
        os.mkdir(sample_dir)

    ulaw_str = '_ulaw' if use_ulaw else ''
    sample_filename = os.path.join(sample_dir, 'test1%s.wav' % ulaw_str)
    sample_stream = make_sample_stream(desired_sample_rate, sample_filename)

    data_generators, _ = dataset.generators(data_dir, desired_sample_rate,
                                            fragment_length, batch_size,
                                            fragment_stride, nb_output_bins,
                                            learn_all_outputs, use_ulaw)
    outputs = data_generators['test'].next()[0][batch_size - 1].astype('uint8')

    write_samples(sample_stream, outputs)
    scipy.io.wavfile.write(os.path.join(sample_dir, 'test2%s.wav' % ulaw_str),
                           desired_sample_rate,
                           np.argmax(outputs, axis=-1).astype('uint8'))
Exemple #5
0
def main(run_dir, data_dir, nb_epoch, early_stopping_patience,
         desired_sample_rate, fragment_length, batch_size, fragment_stride,
         nb_output_bins, keras_verbose, _log, seed, _config, debug,
         learn_all_outputs, train_only_in_receptive_field, _run, use_ulaw):
    if run_dir is None:
        run_dir = os.path.join(
            'models',
            datetime.datetime.now().strftime('run_%Y-%m-%d_%H:%M:%S'))
        _config['run_dir'] = run_dir

    print_config(_run)

    _log.info('Running with seed %d' % seed)

    if not debug:
        if os.path.exists(run_dir):
            raise EnvironmentError('Run with seed %d already exists' % seed)
        os.mkdir(run_dir)
        checkpoint_dir = os.path.join(run_dir, 'checkpoints')
        json.dump(_config, open(os.path.join(run_dir, 'config.json'), 'w'))

    _log.info('Loading data...')
    data_generators, nb_examples = dataset.generators(
        data_dir, desired_sample_rate, fragment_length, batch_size,
        fragment_stride, nb_output_bins, learn_all_outputs, use_ulaw)

    _log.info('Building model...')
    model = build_model(fragment_length)
    _log.info(model.summary())

    optim = make_optimizer()
    _log.info('Compiling Model...')

    loss = objectives.categorical_crossentropy
    all_metrics = [
        metrics.categorical_accuracy, metrics.categorical_mean_squared_error
    ]
    if train_only_in_receptive_field:
        loss = skip_out_of_receptive_field(loss)
        all_metrics = [skip_out_of_receptive_field(m) for m in all_metrics]

    model.compile(optimizer=optim, loss=loss, metrics=all_metrics)
    # TODO: Consider gradient weighting making last outputs more important.

    callbacks = [
        ReduceLROnPlateau(patience=early_stopping_patience / 2,
                          cooldown=early_stopping_patience / 4,
                          verbose=1),
        EarlyStopping(patience=early_stopping_patience, verbose=1),
    ]
    if not debug:
        callbacks.extend([
            ModelCheckpoint(os.path.join(
                checkpoint_dir, 'checkpoint.{epoch:05d}-{val_loss:.3f}.hdf5'),
                            save_best_only=True),
            CSVLogger(os.path.join(run_dir, 'history.csv')),
        ])

    if not debug:
        os.mkdir(checkpoint_dir)
        _log.info('Starting Training...')

    model.fit_generator(data_generators['train'],
                        nb_examples['train'],
                        nb_epoch=nb_epoch,
                        validation_data=data_generators['test'],
                        nb_val_samples=nb_examples['test'],
                        callbacks=callbacks,
                        verbose=keras_verbose)