Exemplo n.º 1
0
                loss = loss_rec - current_alpha * loss_clf
                reset_grad([self.Encoder, self.Decoder])
                loss.backward()
                grad_clip([self.Encoder, self.Decoder], self.hps.max_grad_norm)
                self.ae_opt.step()
                info = {
                    f'{flag}/loss_rec': loss_rec.item(),
                    f'{flag}/G_loss_clf': loss_clf.item(),
                    f'{flag}/alpha': current_alpha,
                    f'{flag}/G_acc': acc,
                }
                slot_value = (iteration + 1, hps.iters) + tuple(
                    [value for value in info.values()])
                log = 'G:[%06d/%06d], loss_rec=%.3f, loss_clf=%.2f, alpha=%.2e, acc=%.2f'
                print(log % slot_value)
                if iteration % 100 == 0:
                    for tag, value in info.items():
                        self.logger.scalar_summary(tag, value, iteration + 1)
                if iteration % 1000 == 0 or iteration + 1 == hps.iters:
                    self.save_model(model_path, iteration)


if __name__ == '__main__':
    hps = Hps()
    hps.load('./hps/v7.json')
    hps_tuple = hps.get_tuple()
    dataset = myDataset('/home/daniel/Documents/voice_integrador/vctk.h5',\
            '/home/daniel/Documents/programacion/multitarget-voice-conversion-vctk/preprocess/speaker_id_by_gender.json')
    data_loader = DataLoader(dataset)
    solver = Solver(hps_tuple, data_loader)
Exemplo n.º 2
0
            all_result = model.predict_step(batch_x)
            for result in all_result:
                for word_idx in result:
                    f_out.write('{} '.format(word_idx))
                f_out.write('\n')

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-hps_path', default='./hps/cd_v3.json')
    parser.add_argument('-vocab_path', default='/home/jjery2243542/datasets/summary/structured/26693_50_30/vocab.pkl')
    parser.add_argument('-model_path', default='./model/model.ckpt-2999')
    parser.add_argument('-dataset_path', default='/home/jjery2243542/datasets/summary/structured/26693_50_30/giga_40_10.h5')
    parser.add_argument('-dataset_type', default='valid')
    parser.add_argument('-output_path', default='result.txt')
    args = parser.parse_args()
    hps = Hps()
    hps.load(args.hps_path)
    hps_tuple = hps.get_tuple()
    print(hps_tuple)
    vocab = Vocab(args.vocab_path, args.dataset_path + '.unk.json')
    data_generator = DataGenerator(args.dataset_path)
    model = PointerModel(hps_tuple, vocab)
    model.load_model(args.model_path)
    dg = DataGenerator(args.dataset_path)
    iterator = dg.iterator(
        batch_size=hps_tuple.batch_size, 
        dataset_type=args.dataset_type, 
        infinite=False, 
        shuffle=False
    )
    predict(model, iterator, args.output_path)
Exemplo n.º 3
0
    def valid_step(self, batch_x, batch_y):
        loss = self.sess.run(
            self._log_loss,
            feed_dict={self.x:batch_x, self.y:batch_y, self.kp:1.0}
        )
        return loss

    def train_step(self, batch_x, batch_y, coverage=False):
        if not coverage:
            _, loss = self.sess.run(
                [self._nll_opt, self._log_loss], 
                feed_dict={self.x:batch_x, self.y:batch_y, self.kp:self._hps.keep_prob}
            )
        else:
            _, loss = self.sess.run(
                [self._coverage_opt, self._coverage_loss], 
                feed_dict={self.x:batch_x, self.y:batch_y, self.kp:self._hps.keep_prob}
            )
        return loss

if __name__ == '__main__':
    vocab = Vocab()
    hps = Hps()
    hps.load('./hps/cd_v3.json')
    hps_tuple = hps.get_tuple()
    model = PointerModel(hps_tuple, vocab)
    model.init()
    print('model build OK')
    model.tt()