Пример #1
0
    def train(self, restore=True):
        logging.info('G max iteration: %d, D iter %d times per G iter' %
                     (config.MAX_ITERATION, config.D_ITER))
        logging.info('model save path: %s' % config.MODEL_PATH)

        tr_gen_g = gen_data.Generator(config.data, 'train', self.g_ext, self.d_ext)
        ev_gen_g = gen_data.Generator(config.data, 'eval', self.g_ext, self.d_ext)

        with self.graph.as_default():
            saver = tf.train.Saver()
            with tf.Session() as sess:
                eval_runner = InnerEvalRunner(self, sess)
                if restore:
                    logging.info('restore model from %s' % restore)
                    saver.restore(sess, config.MODEL_PATH)
                else:
                    sess.run(tf.global_variables_initializer())
                self.best_score, = - np.inf,
                for epoch in range(1, config.MAX_ITERATION + 1):
                    cost_d, cost_g, emd = 0., 0., 0.
                    count = 0.
                    for bat in tr_gen_g.asyn_gen():
                        count += 1
                        if count % config.D_ITER > 0:
                            feed = {self.mix_feat: bat.mix_feat, self.mix_mag: bat.mix, self.in_clean: bat.clean}
                            nn_model.feed_dropout_keep_prob(feed, self.g_keep, disable=True)
                            nn_model.feed_dropout_keep_prob(feed, self.d_keep)
                            _, c = sess.run(fetches=[self.opt_d, self.d_loss], feed_dict=feed)
                            cost_d += c
                        else:
                            feed = {self.mix_feat: bat.mix_feat, self.mix_mag: bat.mix, self.in_clean: bat.clean}
                            nn_model.feed_dropout_keep_prob(feed, self.g_keep)
                            nn_model.feed_dropout_keep_prob(feed, self.d_keep, disable=True)
                            _, c, _emd = sess.run(fetches=[self.opt_g, self.g_loss, self.emd], feed_dict=feed)
                            cost_g += c
                            emd += _emd

                    cost_d /= max(1, count) / (config.D_ITER + 1) * config.D_ITER
                    cost_g /= max(1., count) / (config.D_ITER + 1)
                    emd /= max(1., count) / (config.D_ITER + 1)
                    if epoch % 5 == 0 or epoch <= 1:
                        evaler = evaluate.Evaler(ev_gen_g, eval_runner)
                        mean_stoi, mean_pesq, mean_ssnr = evaler.run_eval()
                        logging.info("Epoch: %04d d_cost=%.4f g_cost=%.4f emd=%.4f stoi=%.4f pesq=%.4f "
                                     "ssnr=%.4f" % (epoch, cost_d, cost_g, emd, mean_stoi, mean_pesq, mean_ssnr))

                        if mean_stoi > self.best_score:
                            saver.save(sess, config.MODEL_PATH)
                            self.best_score = mean_stoi
                logging.info("Optimization Done! best eval score: %.9f" % self.best_score)
Пример #2
0
 def test(self):
     with self.graph.as_default():
         saver = tf.train.Saver()
         with tf.Session() as sess:
             saver.restore(sess, config.MODEL_PATH)
             eval_runner = InnerEvalRunner(self, sess)
             for snr in config.data['test']['snr']:
                 dataset = gen_data.Generator(config.data, 'test', self.g_ext, self.d_ext, snr)
                 root = os.path.join(dataset.vpath, 'test.out', str(snr))
                 post_runner = TestPostRunner(root, config.data['samplerate'])
                 tester = evaluate.Evaler(dataset, eval_runner, post_runner)
                 mean_stoi, mean_pesq, mean_ssnr = tester.run_eval()
                 logging.info('snr=%3d: stoi=%.4f, pesq=%.4f, ssnr=%.4f' %
                              (snr, mean_stoi, mean_pesq, mean_ssnr))
             logging.info('Test output saved in %s' % os.path.dirname(root))
Пример #3
0
    def train(self, restore=True):
        logging.info("train SENN with max iteration: %g" %
                     config.MAX_ITERATION)
        logging.info('model save path: %s' % config.MODEL_PATH)

        tr_gen_g = data.gen_data.Generator(config.data, 'train')
        ev_gen_g = data.gen_data.Generator(config.data, 'eval')

        with self.graph.as_default():
            saver = tf.train.Saver()
            # Launch the graph
            with tf.Session() as sess:
                eval_runner = InnerEvalRunner(self, sess)
                # Initializing the variables
                if restore:
                    saver.restore(sess, config.MODEL_PATH)
                else:
                    sess.run(tf.global_variables_initializer())
                self.best_score = -np.inf
                for epoch in range(1, config.MAX_ITERATION + 1):
                    cost_sum, count = 0., 0.
                    for bat in tr_gen_g.asyn_gen():
                        feed = {
                            self.mix_feat: bat.mix_feat,
                            self.clean: bat.clean,
                            self.mix_mag: bat.mix
                        }
                        nn_model.feed_dropout_keep_prob(feed, self.keep)
                        _, c = sess.run(fetches=[self.optimizer, self.cost],
                                        feed_dict=feed)
                        count += 1
                        cost_sum += c
                    if epoch % 1 == 0 or epoch <= 1:
                        evaler = evaluate.Evaler(ev_gen_g, eval_runner)
                        mean_stoi, mean_pesq, mean_ssnr = evaler.run_eval()
                        logging.info(
                            "Epoch: %04d cost=%.9f stoi=%.4f pesq=%.4f ssnr=%.4f"
                            %
                            (epoch, cost_sum, mean_stoi, mean_pesq, mean_ssnr))

                        if mean_stoi > self.best_score:
                            saver.save(sess, config.MODEL_PATH)
                            self.best_score = mean_stoi
                logging.info("Optimization Done! best eval score: %.9f" %
                             self.best_score)