示例#1
0
def main(store_path: str, model_dir: str, features_store_path: str,
         batch_size: int, save_activations: bool):
    """ Evaluate model using prepared features. """
    deepspeech = load(model_dir)
    generator = deepspeech.create_generator(features_store_path,
                                            source='from_prepared_features',
                                            batch_size=batch_size)

    units = calculate_units(deepspeech.model)
    logger.info(f'Model contains: {units//1e6:.0f}M units ({units})')

    metrics = evaluate(deepspeech, generator, save_activations, store_path)
    logger.info(f'Mean CER: {metrics.cer.mean():.4f}')
    logger.info(f'Mean WER: {metrics.wer.mean():.4f}')
def main(store_path: str, model_dir: str, features_store_path: str, batch_size: int, save_activations: bool,
         mask: bool, mask_F: int, mask_mf: int, mask_T: int, mask_mt: int, mask_ratio_t: float):
    """ Evaluate model using prepared features. """
    deepspeech = load(model_dir)
    generator = DataGenerator.from_prepared_features(
        features_store_path,
        alphabet=deepspeech.alphabet,
        features_extractor=deepspeech.features_extractor,
        batch_size=batch_size,
        mask=mask,
        mask_params=dict(F=mask_F, mf=mask_mf, T=mask_T,
                         mt=mask_mt, ratio_t=mask_ratio_t)
    )
    units = calculate_units(deepspeech.model)
    logger.info(f'Model contains: {units//1e6:.0f}M units ({units})')

    metrics = evaluate(deepspeech, generator, save_activations, store_path)
    logger.info(f'Mean CER: {metrics.cer.mean():.4f}')
    logger.info(f'Mean WER: {metrics.wer.mean():.4f}')
示例#3
0
def test_utils_load():
    deepspeech = load('tests/models/test')  # Or call via: model name
    assert isinstance(deepspeech,
                      DeepSpeech)  # (but has to be in the models directory)
from matplotlib import pyplot as plt
from source import utils
plt.rc('text', usetex=True)
plt.rc('font', family='serif')

# Set up data
evaluation_regular, *_ = utils.load('../out/regular/results.bin')
evaluation_robust_1, *_ = utils.load('../out/robust/results.0.1.bin')
evaluation_robust_2, *_ = utils.load('../out/robust/results.0.2.bin')
evaluation_robust_3, *_ = utils.load('../out/robust/results.0.3.bin')
evaluations = [
    evaluation_regular, evaluation_robust_1, evaluation_robust_2,
    evaluation_robust_3
]

no_ticks = {'xticks': [], 'yticks': []}
fig, axes = plt.subplots(4, 4, figsize=[3, 3], subplot_kw=no_ticks)

# Plot misclassified samples from the regular model
for ax_row, evaluation in zip(axes, evaluations):
    epsilon, accuracy, top_misclassified_images, misclassified_logits = evaluation[
        0]
    for i, ax in enumerate(ax_row):
        try:
            image, logit, adversarial_image = top_misclassified_images[i]
            ax.imshow(1 - adversarial_image[..., 0], cmap='gray')
        except IndexError:
            ax.axis('off')

axes[0, 0].text(-10, 14, 'Regular', size=8, ha="right", va="center")
axes[1, 0].text(-10, 14, r'$\epsilon=0.1$', size=8, ha="right", va="center")
示例#5
0
def deepspeech() -> DeepSpeech:
    return load('tests/models/test')
def prepare(name):
    evaluation_robust, *_ = utils.load(name)
    data = [(epsilon, misclassified_logits)
            for epsilon, *_, misclassified_logits in evaluation_robust[5::5]]
    return data