Exemplo n.º 1
0
  def testMetaLearnerCanPredictWithConvolutionalModel(self):
    model = self.create_convolutional_model()
    wrapper = create_model_wrapper(model)
    meta = create_meta_learner(wrapper, input_type='sequences')
    meta.compile(loss=model.loss, optimizer='adam')

    batch = next(self.generator(return_sequences=True))
    self.assertEquals((1, 1, 2, 1), meta.predict(batch[0]).shape)
Exemplo n.º 2
0
  def testMetaLearnerCanPredict(self):
    model = self.create_model()
    wrapper = create_model_wrapper(model, sparse=True, num_sparse_params=4)
    meta = create_meta_learner(wrapper)
    meta.compile(loss=model.loss, optimizer='adam')

    batch = next(self.generator())
    self.assertEquals((1, 2, 1), meta.predict(batch[0]).shape)
Exemplo n.º 3
0
  def testMetaLearnerCanOverfitWithConvolutionalModel(self):
    np.random.seed(0)

    model = self.create_convolutional_model()
    wrapper = create_model_wrapper(model)
    meta = create_meta_learner(wrapper, input_type='sequences')
    meta.compile(loss=model.loss, optimizer='adam')

    generator = self.generator(return_sequences=True)
    history = meta.fit_generator(generator, steps_per_epoch=100, epochs=5)

    loss = history.history["loss"]
    self.assertTrue(loss[0] > loss[-1])
    self.assertTrue(0.05 > loss[-1])
Exemplo n.º 4
0
  def testMetaLearnerCanOverfit(self):
    np.random.seed(0)

    model = self.create_model()
    wrapper = create_model_wrapper(model)
    meta = create_meta_learner(wrapper)
    meta.compile(loss=model.loss, optimizer='adam')

    generator = self.generator()
    history = meta.fit_generator(generator, steps_per_epoch=100, epochs=5)

    loss = history.history["loss"]
    self.assertTrue(loss[0] > loss[-1])
    self.assertTrue(0.05 > loss[-1])
Exemplo n.º 5
0
        return_sequences = False

    if len(sys.argv) >= 12:
        subsampling_factor = int(sys.argv[9])
        left_context = int(sys.argv[10])
        right_context = int(sys.argv[11])
    else:
        subsampling_factor = 1
        left_context = 0
        right_context = 0

    model = load_acoustic_model(model_path, adaptation_type)
    model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
    model.summary()

    wrapper = create_model_wrapper(model)
    meta = create_meta_learner(wrapper, units=20, input_type=input_type)
    meta.compile(
        loss=model.loss,
        optimizer=Adam(),
        metrics=['accuracy']
    )
    meta.summary()

    params = get_model_weights(model)
    num_train_batches, train_generator, num_val_batches, val_generator = load_data(
        params, feats, utt2spk, adapt_pdfs, test_pdfs,
        subsampling_factor=subsampling_factor,
        left_context=left_context,
        right_context=right_context,
        return_sequences=return_sequences)
Exemplo n.º 6
0
    output_path = sys.argv[7]
    subsampling_factor = int(sys.argv[8])
    left_context = int(sys.argv[9])
    right_context = int(sys.argv[10])
    num_frames = int(sys.argv[11])
    meta_learner_mode = sys.argv[12]

    num_epochs = 20
    batch_size = 4

    model = load_model(model_path, adaptation_type)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    wrapper = create_model_wrapper(model, batch_size=batch_size)
    meta = create_meta_learner(wrapper,
                               meta_learner_type='lr_per_layer',
                               mode=meta_learner_mode)
    meta.compile(loss=model.loss, optimizer=Adam(), metrics=['accuracy'])
    meta.summary()

    model_params = get_model_weights(model)
    utt_to_adapt_pdfs = load_utt_to_pdfs(adapt_pdfs)
    utt_to_test_pdfs = load_utt_to_pdfs(test_pdfs)

    params_dataset = load_params_generator(model_params)
    params_dataset = params_dataset.batch(batch_size, drop_remainder=True)

    train_dataset = load_dataset_for_maml(train_feats,
                                          utt_to_adapt_pdfs,
Exemplo n.º 7
0
    m_in.get_layer('maml_1').wrapper.set_weights(
        m_in.get_layer('model_wrapper_2').get_weights())

    try:
        lda = m_in.get_layer('lda_1')
        lda_weights = [x.flatten() for x in lda.get_weights()]
    except ValueError:
        lda_weights = []
        model_weights = weights[0][0]

    maml = m_in.get_layer('maml_1')
    m_out = create_model(maml.wrapper, m_in.get_layer('lda_1'))

    model_weights = np.concatenate(lda_weights +
                                   [maml.get_weights()[0].flatten()])
    set_model_weights(m_out, model_weights, maml.wrapper)

    assert converted_models_produce_correct_output(m_in, m_out)

    m_out.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
    m_out.save(model_out)
    m_out.summary()

    adapter = create_adapter(create_model_wrapper(m_out), maml.num_steps,
                             maml.use_lr_per_step, maml.use_kld_regularization,
                             maml.get_weights()[1:])
    adapter.save(meta_out)
    adapter.summary()

    print maml.get_weights()[1]
    print models

    for model_path in models:
        model = keras.models.load_model(model_path,
                                        custom_objects={
                                            'FeatureTransform':
                                            FeatureTransform,
                                            'LHUC': LHUC,
                                            'Renorm': Renorm,
                                            'AdamW': AdamW
                                        })
        params.append(get_model_weights(model))

    params = np.array(params)
    coeffs = np.ones((1, params.shape[0])) / float(params.shape[0])
    average_model = create_model_average(create_model_wrapper(model), params,
                                         coeffs)
    average_model.summary()

    utt_to_spk = load_utt_to_spk(utt2spk)
    utt_to_pdfs = load_utt_to_pdfs(pdfs)

    train_dataset = load_dataset(train_data,
                                 utt_to_spk,
                                 utt_to_pdfs,
                                 chunk_size=8,
                                 subsampling_factor=1,
                                 left_context=left_context,
                                 right_context=right_context)
    train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
    train_dataset = train_dataset.prefetch(1024)