Beispiel #1
0
class EvalCallback(Callback):
    def _setup_graph(self):
        self.pred = self.trainer.get_predictor(get_eval_input_names(),
                                               get_eval_output_names())
        self.data_loader = DataLoader(audio_meta,
                                      hp.eval.batch_size).dataflow()

    def _trigger_epoch(self):
        _, mel_spec, speaker_id = next(self.data_loader.get_data())
        acc, = self.pred(mel_spec, speaker_id)
        self.trainer.monitors.put_scalar('eval/accuracy', acc)
Beispiel #2
0
        df = DataLoader(audio_meta, hp.train.batch_size).dataflow(nr_prefetch=5000, nr_thread=int(multiprocessing.cpu_count() // 1.5))

    # set logger for event and model saver
    logger.set_logger_dir(hp.logdir)
    if True:
        train_conf = TrainConfig(
            model=ClassificationModel(num_classes=audio_meta.num_speaker, **hp.model),
            data=FlexibleQueueInput(df, capacity=500),
            callbacks=[
                ModelSaver(checkpoint_dir=hp.logdir),
                EvalCallback()
            ],
            steps_per_epoch=hp.train.steps_per_epoch,
            # session_config=session_config
        )

        ckpt = args.ckpt if args.ckpt else tf.train.latest_checkpoint(hp.logdir)
        if ckpt and not args.r:
            train_conf.session_init = SaverRestore(ckpt)

        if args.gpu:
            os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
            train_conf.nr_tower = len(args.gpu.split(','))

        trainer = SyncMultiGPUTrainerReplicated(hp.train.num_gpu)

        launch_train_with_config(train_conf, trainer=trainer)
    else:
        df = TestDataSpeed(df, 100000)
        for _ in df.get_data():
            pass