def main_train(params, clf: Type[Classifier]):
    model = clf(L, LABELS)
    name = "{}--{}".format(model.name, int(datetime.now().timestamp()))
    print(params)
    chekpoints_path = os.path.join(params['output_path'], name + '_weights')
    os.makedirs(chekpoints_path, exist_ok=True)
    batch_size = params['batch_size']
    n = params['sample_size']

    train_data, validate_data = load_train_data(params['audio_path'],
                                                params['validation_list_path'])
    assert len(train_data) != 0
    assert len(validate_data) != 0

    wav_reader = SimpleWavFileReader(L)
    silence_data = get_silence(train_data, wav_reader)

    train_sound_chain = SoundChain(
        SimpleWavFileReader(L),
        sp.AdjustLenWavProcessor(silence_data, L, L),
        sp.AddNoiseWavProcessor(silence_data, L, L, 20),
        sp.ShiftWavProcessor(silence_data, L, L),
        sp.EmphasisWavProcessor(silence_data, L, L, 0.97),
        sp.NormalizeWavProcessor(silence_data, L, L),
        sp.ReshapeWavProcessor(silence_data, L, L),
        sp.MinMaxWavProcessor(silence_data, L, L, (0, 1)),
    )

    valid_sound_chain = SoundChain(
        SimpleWavFileReader(L),
        sp.AdjustLenWavProcessor(silence_data, L, L),
        sp.EmphasisWavProcessor(silence_data, L, L, 0.97),
        sp.NormalizeWavProcessor(silence_data, L, L),
        sp.ReshapeWavProcessor(silence_data, L, L),
        sp.MinMaxWavProcessor(silence_data, L, L, (0, 1)),
    )

    if params['sample']:
        print("Get small sample")
        train_data, validate_data = get_sample_data(train_data, validate_data,
                                                    n)

    train_gen = train_generator(train_data, batch_size, train_sound_chain, n=n)
    validate_gen = valid_generator(validate_data, batch_size,
                                   valid_sound_chain, True)

    model.train(
        train_gen, validate_gen,
        dict(epochs=params['epochs'],
             batch_size=batch_size,
             tensorboard_dir=os.path.join(params['tensorboard_root'], name),
             chekpoints_path=chekpoints_path,
             steps_per_epoch=n * len(LABELS) / batch_size,
             validation_steps=int(np.ceil(validate_data.shape[0] /
                                          batch_size))))
Beispiel #2
0
 def test_train_generator(self):
     train_df, valid_df = load_train_data(self.audio_path,
                                          self.validation_list_path)
     wav_reader = SimpleWavFileReader(L)
     silence_data = get_silence(train_df, wav_reader)
     train_sound_chain = SoundChain(
         SimpleWavFileReader(L),
         sp.AdjustLenWavProcessor(silence_data, L, L),
         sp.EmphasisWavProcessor(silence_data, L, L, 0.97),
         sp.NormalizeWavProcessor(silence_data, L, L),
         sp.ReshapeWavProcessor(silence_data, L, L),
         sp.MinMaxWavProcessor(silence_data, L, L, (0, 1)),
     )
     n = 2
     gen = train_generator(train_df, 64, train_sound_chain, n)
     batch = gen.__next__()
     self.assertEqual(batch[0].shape, (len(LABELS) * n, L, 1))
     self.assertEqual(batch[1].shape, (len(LABELS) * n, len(LABELS)))
Beispiel #3
0
for layer in model.layers[:89]:
    layer.trainable = False


# In[8]:


model.summary()


# In[9]:


optimizer = Adam(lr=1e-3)
model.compile(optimizer, loss=earth_mover_loss)

checkpoint = ModelCheckpoint('weights/mobilenet_weights.h5', monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True,
                             mode='min')
tensorboard = TensorBoardBatch()
callbacks = [checkpoint, tensorboard]

batchsize = 200
epochs = 20

model.fit_generator(train_generator(batchsize=batchsize),
                    steps_per_epoch=(250000. // batchsize),
                    epochs=epochs, verbose=1, callbacks=callbacks,
                    validation_data=val_generator(batchsize=batchsize),
                    validation_steps=(5000. // batchsize))

Beispiel #4
0
        log_dir=tboard_logdir)
    model_checkpoint_callback = ModelCheckpointer(model_saver,
                                                  save_freq=5,
                                                  epoch_start=initial_epoch)

    lr_reduce_callback = ReduceLROnPlateau(
        monitor='loss',  # monitor our training loss. Could also be val_loss
        factor=0.35,
        patience=75,
        verbose=1,
        mode="min",
        min_lr=learning_rate * 0.001)

    with tf.device(TF_DEVICE):
        mcm_model.fit_generator(
            data.train_generator(data_loader, BATCH_SIZE),
            validation_data=data.test_generator(
                data_loader, len(data_loader.testData)
            ),  #test with the whole testing set, because it is not large.
            validation_steps=1,
            validation_freq=5,  # how many training epochs before we validate.
            epochs=EPOCHS,
            steps_per_epoch=BATCHES_PER_EPOCH,
            callbacks=[
                lr_reduce_callback, tensorboard_callback,
                model_checkpoint_callback
            ],
            initial_epoch=initial_epoch)

        mcm_model.evaluate_generator(data.test_generator(
            data_loader, len(data_loader.testData)),
Beispiel #5
0
                             save_best_only=True,
                             mode='min')
tensorboard = TensorBoardBatch()
callbacks = [checkpoint, tensorboard]

# ———— (fine-tune构建)提取'dense_1'层的输出 ——————
layer_name = 'dense_1'
intermediate_layer_model = Model(
    input=base_model.input, output=model_main.get_layer(layer_name).output)
outputs_inter = Dense(FLAGS.labels_dim,
                      activation='softmax')(intermediate_layer_model.output)
model_inter = Model(input=base_model.input, output=outputs_inter)
model = model_inter
model.summary()

# ■■■■■■■■ [3]模型编译 ■■■■■■■■
# 编译,loss function,训练过程中计算准确率
model.compile(optimizer=Adam(lr=FLAGS.lr),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# ■■■■■■■■ [4]训练模型 ■■■■■■■■
train_generator = train_generator(batchsize=FLAGS.batch_size)
validation_generator = val_generator(batchsize=FLAGS.batch_size)
model.fit_generator(train_generator,
                    steps_per_epoch=(150000. // FLAGS.batch_size),
                    epochs=FLAGS.epochs,
                    verbose=1,
                    callbacks=callbacks,
                    validation_data=validation_generator,
                    validation_steps=(50000. // FLAGS.batch_size))