Ejemplo n.º 1
0
def main_train():
    # train
    args = parse_args()
    config_args, device, checkpoint = transform_args(args)
    train(config_args, device, checkpoint, dataplace=args.dataplace)

    # test at best epoch
    best_checkpoint = misc.get_checkpoint(
        output_folder=config_args['training']['output_folder'],
        epoch="best",
    )
    evaluate(
        config_args=config_args,
        device=device,
        checkpoint=best_checkpoint,
        tempscale=False,
        corruptions=False,
        dataplace=args.dataplace
    )
    LOGGER.error(f"Finish: {config_args['training']['config_path']}")
Ejemplo n.º 2
0
def test_evaluate(deepspeech: DeepSpeech, generator: Iterable,
                  store_path: str) -> pd.DataFrame:
    metrics = evaluate(deepspeech,
                       generator,
                       save_activations=True,
                       store_path=store_path)
    with pd.HDFStore(store_path, mode='r') as store:
        references = store['references']

    assert len(references) == len(metrics) == 12
    assert all(references.columns.values == np.array(
        ['transcript', 'prediction', 'wer', 'cer']))
Ejemplo n.º 3
0
def main(store_path: str, features_store_path: str, batch_size: int,
         save_activations: bool, mask: bool, mask_F: int, mask_mf: int,
         mask_T: int, mask_mt: int, mask_ratio_t: float):
    """ Evaluate model using prepared features. """
    deepspeech = load_extended_model(CONFIG_PATH, ALPHABET_PATH, WEIGHTS_PATH)
    generator = DataGenerator.from_prepared_features(
        features_store_path,
        alphabet=deepspeech.alphabet,
        features_extractor=deepspeech.features_extractor,
        batch_size=batch_size,
        mask=mask,
        mask_params=dict(F=mask_F,
                         mf=mask_mf,
                         T=mask_T,
                         mt=mask_mt,
                         ratio_t=mask_ratio_t))
    units = calculate_units(deepspeech.model)
    logger.info(f'Model contains: {units//1e6:.0f}M units ({units})')

    metrics = evaluate(deepspeech, generator, save_activations, store_path)
    logger.info(f'Mean CER: {metrics.cer.mean():.4f}')
    logger.info(f'Mean WER: {metrics.wer.mean():.4f}')
Ejemplo n.º 4
0
    for k in to_evaluate.keys():
        print(k)
        base_data_path = k
        for change, movement_data_path in to_evaluate[k]:
            real_change = {
                "x": 0,
                "y": 0,
                "z": 0,
                "roll": 0,
                "pitch": 0,
                "yaw": 0,
            }
            for k in change.keys():
                real_change[k] = change[k]
            left_side = "x y z: ({} {} {}) φ θ ψ: ({} {} {}),".format(
                real_change['x'], real_change['y'], real_change['z'],
                real_change['roll'], real_change['pitch'], real_change['yaw'])

            results = evaluate(base_data_path,
                               movement_data_path,
                               settings,
                               real_change,
                               CALCULATE_ERROR=False)
            f.write(left_side + ",".join(["{:.3f}".format(x)
                                          for x in results]) + "\n")
            f.flush()

    end = time()
    print("Took:", end - start, "seconds")
    print("done")
Ejemplo n.º 5
0
if __name__ == '__main__':

    if args.train_ubm:
        data = prepare_data.load_train_file()
        ubm = classify.fit_ubm(data, args.save_path)
        classify.fit_adap(data, ubm, args.save_path)
        args.load_path = args.save_path
    if args.train:
        data = prepare_data.load_train_file()
        classify.fit(data, args.save_path)
        args.load_path = args.save_path
    if args.validate_ubm:
        data = prepare_data.load_test('val')
        scores = classify.predict_ubm(data, args.load_path)
        Y, Y_pred = evaluate.get_predictions(scores)
        evaluate.evaluate(Y, Y_pred)
        if args.confusion_matrix:
            evaluate.confusion_matrix(Y, Y_pred)
    if args.test_ubm:
        data = prepare_data.load_test('test')
        scores = classify.predict_ubm(data, args.load_path)
        Y, Y_pred = evaluate.get_predictions(scores)
        evaluate.evaluate(Y, Y_pred)
        if args.confusion_matrix:
            evaluate.confusion_matrix(Y, Y_pred)
    if args.validate:
        data = prepare_data.load_test('val')
        scores = classify.predict(data, args.load_path)
        Y, Y_pred = evaluate.get_predictions(scores)
        evaluate.evaluate(Y, Y_pred)
        if args.confusion_matrix: