Exemple #1
0
def main(config):
    prepare_dirs_loggers(config, os.path.basename(__file__))

    corpus_client = corpora.NormMultiWozCorpus(config)

    dial_corpus = corpus_client.get_corpus()
    train_dial, valid_dial, test_dial = dial_corpus
    sample_shape = config.batch_size, config.state_noise_dim, config.action_noise_dim
    # evaluator = evaluators.BleuEvaluator("os.path.basename(__file__)")
    evaluator = MultiWozEvaluator('SysWoz')
    # create data loader that feed the deep models
    train_feed = data_loaders.BeliefDbDataLoaders("Train", train_dial, config)
    valid_feed = data_loaders.BeliefDbDataLoaders("Valid", valid_dial, config)
    test_feed = data_loaders.BeliefDbDataLoaders("Test", test_dial, config)
    model = GanRnnAgent(corpus_client, config)
    load_context_encoder(
        model, os.path.join(config.log_dir, config.encoder_sess, "model_lirl"))

    if config.forward_only:
        test_file = os.path.join(
            config.log_dir, config.load_sess,
            "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.log_dir, config.load_sess, "model")
    else:
        test_file = os.path.join(
            config.session_dir,
            "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.session_dir,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.session_dir, "model")

    if config.use_gpu:
        model.cuda()

    print("Evaluate initial model on Validate set")
    engine.disc_validate(model, valid_feed, config, sample_shape)
    print("Start training")

    if config.forward_only is False:
        try:
            engine.gan_train(model, train_feed, valid_feed, test_feed, config,
                             evaluator)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")
    print("Trainig Done! Start Testing")
    model.load_state_dict(torch.load(model_file))
    engine.disc_validate(model, valid_feed, config, sample_shape)
    engine.disc_validate(model, test_feed, config, sample_shape)

    # dialog_utils.generate_with_adv(model, test_feed, config, None, num_batch=None)
    # selected_clusters, index_cluster_id_train = utt_utils.latent_cluster(model, train_feed, config, num_batch=None)
    # _, index_cluster_id_test = utt_utils.latent_cluster(model, test_feed, config, num_batch=None)
    # _, index_cluster_id_valid = utt_utils.latent_cluster(model, valid_feed, config, num_batch=None)
    # selected_outs = dialog_utils.selective_generate(model, test_feed, config, selected_clusters)
    # print(len(selected_outs))
    '''
Exemple #2
0
def main(config):
    prepare_dirs_loggers(config, os.path.basename(__file__))
    manualSeed=config.seed
    random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    np.random.seed(manualSeed)
    sample_shape = config.batch_size, config.state_noise_dim, config.action_noise_dim

    # evaluator = evaluators.BleuEvaluator(os.path.basename(__file__))
    evaluator = False

    train_feed = WoZGanDataLoaders("train", config)
    valid_feed = WoZGanDataLoaders("val", config)
    test_feed = WoZGanDataLoaders("test", config)


 # action2name = load_action2name(config)
    action2name = None
    corpus_client = None
    # model = GanAgent_AutoEncoder(corpus_client, config, action2name)
    # model = GanAgent_AutoEncoder_Encode(corpus_client, config, action2name)
    # model = GanAgent_AutoEncoder_State(corpus_client, config, action2name)
    if config.gan_type=='wgan':
        model = WGanAgent_VAE_State(corpus_client, config, action2name)
    else:
        model = GanAgent_VAE_State(corpus_client, config, action2name)
    
    logger.info(summary(model, show_weights=False))
    model.discriminator.apply(weights_init)
    model.generator.apply(weights_init)
    model.vae.apply(weights_init)

    if config.forward_only:
        test_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.log_dir, config.load_sess, "model")
    else:
        test_file = os.path.join(config.session_dir,
                                 "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.session_dir, "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.session_dir, "model")
        vocab_file = os.path.join(config.session_dir, "vocab.json")
    

    if config.use_gpu:
        model.cuda()

    pred_list = []
    generator_samples = []
    print("Evaluate initial model on Validate set")
    model.eval()
    # policy_validate_for_human(model,valid_feed, config, sample_shape)
    disc_validate(model, valid_feed, config, sample_shape)
    _, sample_batch = gen_validate(model,valid_feed, config, sample_shape, -1)
    generator_samples.append([-1, sample_batch])
    machine_data, human_data = build_fake_data(model, valid_feed, config, sample_shape)

    
    model.train()
    print("Start VAE training")


    # # this is for the training of VAE. If you already have a pretrained model, you can skip this step.
    # if config.forward_only is False:
    #     try:
    #         engine.vae_train(model, train_feed, valid_feed, test_feed, config)
    #     except KeyboardInterrupt:
    #         print("Training stopped by keyboard.")
    # print("AutoEncoder Training Done ! ")
    # load_model_vae(model, config)
    
    
    # this is a pretrained vae model, you can load it to the current model. TODO: move path todata_args
    path='./logs/2019-09-06T10:50:18.034181-mwoz_gan_vae.py'
    load_model_vae(model, path)
    
    print("Start GAN training")
    
    if config.forward_only is False:
        try:
            engine.gan_train(model, machine_data, train_feed, valid_feed, test_feed, config, evaluator, pred_list, generator_samples)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")
    print("Reward Model Training Done ! ")
    print("Saved path: {}".format(model_file))
Exemple #3
0
def main(config):
    prepare_dirs_loggers(config, os.path.basename(__file__))
    manualSeed=config.seed
    random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    np.random.seed(manualSeed)
    sample_shape = config.batch_size, config.state_noise_dim, config.action_noise_dim

    evaluator = evaluators.BleuEvaluator(os.path.basename(__file__))

    train_feed = WoZGanDataLoaders("train", config)
    valid_feed = WoZGanDataLoaders("val", config)
    test_feed = WoZGanDataLoaders("test", config)


    # action2name = load_action2name(config)
    action2name = None
    corpus_client = None
    if config.gan_type=='gan' and config.input_type=='sat':
        model = GanAgent_SAT_WoZ(corpus_client, config, action2name)
    else:
        raise ValueError("No such GAN types: {}".format(config.gan_type))
    logger.info(summary(model, show_weights=False))
    model.discriminator.apply(weights_init)
    model.generator.apply(weights_init)

    if config.state_type=='rnn':
        load_context_encoder(model, os.path.join(config.log_dir, config.encoder_sess, "model_lirl"))

    if config.forward_only:
        test_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.log_dir, config.load_sess, "model")
    else:
        test_file = os.path.join(config.session_dir,
                                 "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.session_dir, "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.session_dir, "model")
        vocab_file = os.path.join(config.session_dir, "vocab.json")
    
    

    if config.use_gpu:
        model.cuda()

    pred_list = []
    generator_samples = []
    print("Evaluate initial model on Validate set")
    model.eval()
    # policy_validate_for_human(model,valid_feed, config, sample_shape)
    disc_validate(model, valid_feed, config, sample_shape)
    _, sample_batch = gen_validate(model,valid_feed, config, sample_shape, -1)
    generator_samples.append([-1, sample_batch])
    machine_data, human_data = build_fake_data(model, valid_feed, config, sample_shape)

    
    model.train()
    print("Start training")

    if config.forward_only is False:
        try:
            engine.gan_train(model, machine_data, train_feed, valid_feed, test_feed, config, evaluator, pred_list, generator_samples)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")
    

    # save_data_for_tsne(human_data, machine_data, generator_samples, pred_list, config)
    print("Training Done ! ")
    '''
    model.load_state_dict(torch.load(model_file))
    print("Evaluate final model on Validate set")
    model.eval()
    policy_validate_for_human(model,valid_feed, config, sample_shape)
    disc_validate(model, valid_feed, config, sample_shape)
    gen_validate(model,valid_feed, config, sample_shape)

    print("Evaluate final model on Test set")
    policy_validate_for_human(model,test_feed, config, sample_shape)
    disc_validate(model, test_feed, config, sample_shape)
    gen_validate(model,test_feed, config, sample_shape)
    '''

    # dialog_utils.generate_with_adv(model, test_feed, config, None, num_batch=None)
    # selected_clusters, index_cluster_id_train = utt_utils.latent_cluster(model, train_feed, config, num_batch=None)
    # _, index_cluster_id_test = utt_utils.latent_cluster(model, test_feed, config, num_batch=None)
    # _, index_cluster_id_valid = utt_utils.latent_cluster(model, valid_feed, config, num_batch=None)
    # selected_outs = dialog_utils.selective_generate(model, test_feed, config, selected_clusters)
    # print(len(selected_outs))
    '''
def main(config):
    prepare_dirs_loggers(config, os.path.basename(__file__))
    manualSeed = config.seed
    random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    np.random.seed(manualSeed)
    sample_shape = config.batch_size, config.state_noise_dim, config.action_noise_dim

    evaluator = evaluators.BleuEvaluator(os.path.basename(__file__))

    train_feed = WoZGanDataLoaders_StateActionEmbed("train", config)
    valid_feed = WoZGanDataLoaders_StateActionEmbed("val", config)
    test_feed = WoZGanDataLoaders_StateActionEmbed("test", config)

    # action2name = load_action2name(config)
    action2name = None
    corpus_client = None

    # model = GanAgent_VAE_StateActioneEmbed(corpus_client, config, action2name)
    model = GanAgent_StateVaeActionSeg(corpus_client, config, action2name)

    logger.info(summary(model, show_weights=False))
    model.discriminator.apply(weights_init)
    model.generator.apply(weights_init)
    model.vae.apply(weights_init)

    if config.forward_only:
        test_file = os.path.join(
            config.log_dir, config.load_sess,
            "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.log_dir, config.load_sess,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.log_dir, config.load_sess, "model")
    else:
        test_file = os.path.join(
            config.session_dir,
            "{}-test-{}.txt".format(get_time(), config.gen_type))
        dump_file = os.path.join(config.session_dir,
                                 "{}-z.pkl".format(get_time()))
        model_file = os.path.join(config.session_dir, "model")
        vocab_file = os.path.join(config.session_dir, "vocab.json")

    if config.use_gpu:
        model.cuda()

    pred_list = []
    generator_samples = []
    print("Evaluate initial model on Validate set")
    model.eval()
    # policy_validate_for_human(model,valid_feed, config, sample_shape)
    disc_validate(model, valid_feed, config, sample_shape)
    _, sample_batch = gen_validate(model, valid_feed, config, sample_shape, -1)
    generator_samples.append([-1, sample_batch])
    machine_data, human_data = build_fake_data(model, valid_feed, config,
                                               sample_shape)

    model.train()
    print("Start training")

    if config.forward_only is False:
        try:
            engine.vae_train(model, train_feed, valid_feed, test_feed, config)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")
    print("AutoEncoder Training Done ! ")
    load_model_vae(model, config)

    # path='logs/2019-09-18T12:20:26.063708-mwoz_gan_vae_StateActionEmbed.py'  # this is embed version
    # path='logs/2019-09-18T12:24:45.517636-mwoz_gan_vae_StateActionEmbed.py'  # this is embed_merged version
    # path='logs/2019-09-18T17:21:35.420069-mwoz_gan_vae_StateActionEmbed.py'  # this is state_vae action_seg version, without hotel domain
    # load_model_vae(model, path)

    if config.forward_only is False:
        try:
            engine.gan_train(model, machine_data, train_feed, valid_feed,
                             test_feed, config, evaluator, pred_list,
                             generator_samples)
        except KeyboardInterrupt:
            print("Training stopped by keyboard.")
    print("Reward Model Training Done ! ")
    print("Saved path: {}".format(model_file))