Beispiel #1
0
def test():
    print('building model...')
    voc = Voc()
    seq2seq = Seq2Seq(voc.num_words).to(args.device)
    param_optimizer = args.optimiser(seq2seq.parameters(),
                                     lr=args.learning_rate)
    decoder_optimizer = args.optimiser(seq2seq.decoder.parameters(),
                                       lr=args.learning_rate *
                                       args.decoder_ratio)
    print('done')

    if args.param_file is None:
        print('please specify the saved param file.')
        exit(-1)
    else:
        print('loading saved parameters from ' + args.param_file + '...')
        checkpoint = torch.load(args.param_file)
        seq2seq.load_state_dict(checkpoint['model'])
        param_optimizer.load_state_dict(checkpoint['opt'])
        decoder_optimizer.load_state_dict(checkpoint['de_opt'])
        voc = checkpoint['voc']
        print('done')

    print('loading test data...')
    test_set = FruitSeqDataset(voc, dataset_file_path=args.test_file)
    print('done')

    test_seq_acc, test_tok_acc, test_loss = eval_model(seq2seq, test_set)
    print(
        "[TEST]Loss: {:.4f}; Seq-level Accuracy: {:.4f}; Tok-level Accuracy: {:.4f}"
        .format(test_loss, test_seq_acc * 100, test_tok_acc * 100))
Beispiel #2
0
def train_generation(model,
                     train_set,
                     dev_set,
                     learn_set,
                     sim_chk_inset,
                     sim_chk_batchset,
                     clip=args.clip,
                     generation_idx=0):
    s_optimiser = args.optimiser(model.speaker.parameters(),
                                 lr=args.learning_rate)
    l_optimiser = args.optimiser(model.listener.parameters(),
                                 lr=args.learning_rate)

    training_losses, training_acc, training_in_spkh_sim, training_in_msg_sim, \
        training_in_lish_sim, eval_acc = \
            game_play_phase(model, train_set, dev_set, sim_chk_inset, sim_chk_batchset, s_optimiser, l_optimiser, clip, generation_idx)

    if not generation_idx == args.num_generation:
        random.shuffle(learn_set.databatch_set)
        reproduced_msg_set, reproduced_msg_masks = \
            knowledge_generation_phase(model, learn_set)
        print('Generation: {}; Message Reproduction Phase Done.'.format(
            generation_idx))

        model.reset_speaker()
        print('Generation: {}; Speaker Reset Done.'.format(generation_idx))
        model.reset_listener()
        print('Generation: {}; Listener Reset Done.'.format(generation_idx))

        s_optimiser = args.optimiser(model.speaker.parameters(),
                                     lr=args.learning_rate)
        l_optimiser = args.optimiser(model.listener.parameters(),
                                     lr=args.learning_rate)

        speaker_learning_phase(model, s_optimiser, \
            learn_set, reproduced_msg_set, reproduced_msg_masks, generation_idx, clip)
        print('Generation: {}; Speaker Learning Phase Done.'.format(
            generation_idx))

        listener_warming_up_phase(model, train_set, dev_set, s_optimiser,
                                  l_optimiser, clip, generation_idx)
        print('Generation: {}; Listener Warming Up Phase Done.'.format(
            generation_idx))

        del reproduced_msg_set
        del reproduced_msg_masks

    return training_losses, training_acc, training_in_spkh_sim, training_in_msg_sim, training_in_lish_sim, eval_acc
Beispiel #3
0
def train():
    print('building vocabulary...')
    voc = Voc()
    print('done')

    print('loading data and building batches...')
    train_set = PairDataset(voc, dataset_file_path=args.train_file, reverse=True)
    dev_set = PairDataset(voc, dataset_file_path=args.dev_file, reverse=True)
    # test_set = PairDataset(voc, dataset_file_path=TEST_FILE_PATH)
    print('done')
        
    if args.param_file is not None:
        print('loading saved parameters from ' + args.param_file + '...')
        checkpoint = torch.load(args.param_file, map_location=args.device)
        train_args = checkpoint['args']
        voc = checkpoint['voc']
        print('done')

        print('arguments for training:')
        print(train_args)

        print('rebuilding model...')

        model = Set2Seq(voc.num_words).to(args.device)
        model.load_state_dict(checkpoint['model'])
        model_optimiser = train_args.optimiser(model.parameters(), lr=train_args.learning_rate)
        print('\tdone')
    else:
        print('building model...')
        model = Set2Seq(voc.num_words).to(args.device)
        model_optimiser = args.optimiser(model.parameters(), lr=args.learning_rate)
        print('done')
    
    print('initialising...')
    start_iteration = 1
    print_loss = 0.
    print_seq_acc = 0.
    print_tok_acc = 0.
    max_dev_seq_acc = 0.
    max_dev_tok_acc = 0.
    training_losses = []
    training_tok_acc = []
    training_seq_acc = []
    training_sim = []
    eval_tok_acc = []
    eval_seq_acc = []
    print('done')

    print('training...')
    for iter in range(start_iteration, args.iter_num+1):

        for idx, data_batch in enumerate(train_set):
            seq_acc, tok_acc, loss = train_epoch(model,
                data_batch,
                model_optimiser
            )
            print_loss += loss
            print_seq_acc += seq_acc
            print_tok_acc += tok_acc

        if iter % args.print_freq == 0:
            print_loss_avg = print_loss / (args.print_freq * len(train_set))
            print_seq_acc_avg = print_seq_acc / (args.print_freq * len(train_set))
            print_tok_acc_avg = print_tok_acc / (args.print_freq * len(train_set))

            print("Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg seq acc: {:.4f}; Avg tok acc: {:.4f}".format(
                iter, iter / args.iter_num * 100, print_loss_avg, print_seq_acc_avg, print_tok_acc_avg
                ))
            training_seq_acc.append(print_seq_acc_avg)
            training_tok_acc.append(print_tok_acc_avg)
            training_losses.append(print_loss_avg)
            print_seq_acc = 0.
            print_tok_acc = 0.
            print_loss = 0.

        if iter % args.eval_freq == 0:
            dev_seq_acc, dev_tok_acc, dev_loss = eval_model(model, dev_set)
            eval_tok_acc.append(dev_tok_acc)
            eval_seq_acc.append(dev_seq_acc)
            if dev_seq_acc > max_dev_seq_acc:
                max_dev_seq_acc = dev_seq_acc
            if dev_tok_acc > max_dev_tok_acc:
                max_dev_tok_acc = dev_tok_acc

            print("[EVAL]Iteration: {}; Loss: {:.4f}; Avg Seq Acc: {:.4f}; Avg Tok Acc: {:.4f}; Best Seq Acc: {:.4f}".format(
                iter, dev_loss, dev_seq_acc, dev_tok_acc, max_dev_seq_acc))

        
        if iter % args.save_freq == 0:
            path_join = 'speaker_' + str(args.num_words) + '_' + args.msg_mode
            path_join += '_hard' if not args.soft else '_soft'
            directory = os.path.join(args.save_dir, path_join)
            if not os.path.exists(directory):
                os.makedirs(directory)
            torch.save({
                'iteration': iter,
                'model': model.state_dict(),
                'opt': [
                    model_optimiser.state_dict()
                ],
                'loss': loss,
                'voc': voc,
                'args': args,
                'records': {
                    'training_loss': training_losses,
                    'training_tok_acc': training_tok_acc,
                    'training_seq_acc': training_seq_acc,
                    'training_sim': training_sim,
                    'eval_tok_acc': eval_tok_acc,
                    'eval_seq_acc': eval_seq_acc
                }
            }, os.path.join(directory, '{}_{}_{}.tar'.format(args.seed, iter, 'checkpoint')))
Beispiel #4
0
def train():
    print('building vocabulary...')
    voc = Voc()
    print('done')

    print('loading data and building batches...')
    train_set = ChooseDataset(voc, dataset_file_path=args.train_file)
    dev_set = ChooseDataset(voc, dataset_file_path=args.dev_file)
    print('done')

    if args.param_file is not None:
        print('loading saved parameters from ' + args.param_file + '...')
        checkpoint = torch.load(args.param_file, map_location=args.device)
        train_args = checkpoint['args']
        voc = checkpoint['voc']
        print('done')

        print('arguments for training:')
        print(train_args)

        print('rebuilding model...')

        model = Set2Seq2Choice(voc.num_words).to(args.device)
        model.load_state_dict(checkpoint['model'])
        speaker_optimiser = train_args.optimiser(model.speaker.parameters(),
                                                 lr=train_args.learning_rate)
        listner_optimiser = train_args.optimiser(model.listener.parameters(),
                                                 lr=train_args.learning_rate)
        print('\tdone')
    else:
        print('building model...')
        model = Set2Seq2Choice(voc.num_words).to(args.device)
        speaker_optimiser = args.optimiser(model.speaker.parameters(),
                                           lr=args.learning_rate)
        listner_optimiser = args.optimiser(model.listener.parameters(),
                                           lr=args.learning_rate)
        print('done')

    print('preparing data for testing topological similarity...')
    sim_chk_inset, sim_chk_batchset = get_batches4sim_check(
        voc, args.data_file)
    print('done')

    print('initialising...')
    start_iteration = 1
    print_loss = 0.
    print_acc = 0.
    max_dev_acc = 0.
    training_losses = []
    training_acc = []
    training_in_spkh_sim = []
    training_in_msg_sim = []
    training_in_lish_sim = []
    training_spk_lis_sim = []
    training_mi = []
    eval_acc = []
    print('done')

    in_spk_sim, in_msg_sim, in_lis_sim, spk_lis_sim = sim_check(
        model, sim_chk_inset, sim_chk_batchset)
    mi_sim = mi_check(model, sim_chk_batchset)
    print(
        '[SIM]Iteration: {}; In-SpkH Sim: {:.4f}; In-Msg Sim: {:.4f}; In-LisH Sim: {:.4f}; SpkH-LisH Sim: {:.4f}; In-Msg-MI: {:.4f}'
        .format(0, in_spk_sim, in_msg_sim, in_lis_sim, spk_lis_sim, mi_sim))

    print('training...')
    for iter in range(start_iteration, args.iter_num + 1):
        for idx, data_batch in enumerate(train_set):
            acc, loss = train_epoch(model, data_batch, args.tau,
                                    speaker_optimiser, listner_optimiser)
            print_loss += loss
            print_acc += acc

        if iter % args.print_freq == 0:
            print_loss_avg = print_loss / (args.print_freq * len(train_set))
            print_acc_avg = print_acc / (args.print_freq * len(train_set))
            print(
                "Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg acc: {:.4f};"
                .format(iter, iter / args.iter_num * 100, print_loss_avg,
                        print_acc_avg))
            training_acc.append(print_acc_avg)
            training_losses.append(print_loss_avg)
            print_acc = 0.
            print_loss = 0.

        if iter % args.eval_freq == 0:
            dev_acc, dev_loss = eval_model(model, dev_set)
            if dev_acc > max_dev_acc:
                max_dev_acc = dev_acc
            eval_acc.append(dev_acc)
            print(
                "[EVAL]Iteration: {}; Loss: {:.4f}; Avg Acc: {:.4f}; Best Acc: {:.4f}"
                .format(iter, dev_loss, dev_acc, max_dev_acc))

        if iter % args.sim_chk_freq == 0:
            in_spk_sim, in_msg_sim, in_lis_sim, spk_lis_sim = sim_check(
                model, sim_chk_inset, sim_chk_batchset)
            mi_sim = mi_check(model, sim_chk_batchset)
            training_in_spkh_sim.append(in_spk_sim)
            training_in_msg_sim.append(in_msg_sim)
            training_in_lish_sim.append(in_lis_sim)
            training_spk_lis_sim.append(spk_lis_sim)
            training_mi.append(mi_sim)
            print(
                '[SIM]Iteration: {}; In-SpkH Sim: {:.4f}; In-Msg Sim: {:.4f}; In-LisH Sim: {:.4f}; SpkH-LisH Sim: {:.4f}; In-Msg-MI: {:.4f}'
                .format(0, in_spk_sim, in_msg_sim, in_lis_sim, spk_lis_sim,
                        mi_sim))

        if iter % args.save_freq == 0:
            path_join = 'set2seq2choice_' + str(
                args.num_words) + '_' + args.msg_mode
            path_join += '_hard' if not args.soft else '_soft'
            directory = os.path.join(args.save_dir, path_join)
            if not os.path.exists(directory):
                os.makedirs(directory)
            torch.save(
                {
                    'iteration':
                    iter,
                    'model':
                    model.state_dict(),
                    'opt': [
                        speaker_optimiser.state_dict(),
                        listner_optimiser.state_dict()
                    ],
                    'loss':
                    loss,
                    'voc':
                    voc,
                    'args':
                    args,
                    'records': {
                        'training_loss': training_losses,
                        'training_acc': training_acc,
                        'training_in_spkh_sim': training_in_spkh_sim,
                        'training_in_msg_sim': training_in_msg_sim,
                        'training_in_lish_sim': training_in_lish_sim,
                        'training_spkh_lish_sim': training_spk_lis_sim,
                        'training_mi': training_mi,
                        'eval_acc': eval_acc,
                    }
                },
                os.path.join(
                    directory,
                    '{}_{:.4f}_{}.tar'.format(iter, dev_acc, 'checkpoint')))
Beispiel #5
0
def train():
    print('building vocabulary...')
    voc = Voc()
    print('done')

    print('loading data and building batches...')
    train_set = FruitSeqDataset(voc, dataset_file_path=args.train_file)
    dev_set = FruitSeqDataset(voc, dataset_file_path=args.dev_file)
    # test_set = FruitSeqDataset(voc, dataset_file_path=TEST_FILE_PATH)
    print('done')

    print('building model...')
    seq2seq = Seq2Seq(voc.num_words).to(args.device)
    param_optimizer = args.optimiser(seq2seq.parameters(),
                                     lr=args.learning_rate)
    decoder_optimizer = args.optimiser(seq2seq.decoder.parameters(),
                                       lr=args.learning_rate *
                                       args.speaker_ratio)
    if args.param_file is not None:
        print('\tloading saved parameters from ' + args.param_file + '...')
        checkpoint = torch.load(args.param_file)
        seq2seq.load_state_dict(checkpoint['model'])
        param_optimizer.load_state_dict(checkpoint['opt'])
        decoder_optimizer.load_state_dict(checkpoint['de_opt'])
        voc = checkpoint['voc']
        print('\tdone')
    print('done')

    print('initialising...')
    start_iteration = 1
    print_loss = 0.
    print_seq_acc = 0.
    print_tok_acc = 0.
    max_dev_seq_acc = 0.
    training_losses = []
    training_tok_acc = []
    training_seq_acc = []
    training_sim = []
    eval_tok_acc = []
    eval_seq_acc = []
    print('done')

    print('training...')
    for iter in range(start_iteration, args.iter_num + 1):
        for idx, data_batch in enumerate(train_set):
            seq_acc, tok_acc, loss = train_epoch(seq2seq, data_batch,
                                                 param_optimizer,
                                                 decoder_optimizer)
            print_loss += loss
            print_seq_acc += seq_acc
            print_tok_acc += tok_acc

        if iter % args.print_freq == 0:
            print_loss_avg = print_loss / (args.print_freq * len(train_set))
            print_seq_acc_avg = print_seq_acc / (args.print_freq *
                                                 len(train_set))
            print_tok_acc_avg = print_tok_acc / (args.print_freq *
                                                 len(train_set))
            print(
                "Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg seq acc: {:.4f}; Avg tok acc: {:.4f}"
                .format(iter, iter / args.iter_num * 100, print_loss_avg,
                        print_seq_acc_avg, print_tok_acc_avg))
            training_seq_acc.append(print_seq_acc_avg)
            training_tok_acc.append(print_tok_acc_avg)
            training_losses.append(print_loss_avg)
            print_seq_acc = 0.
            print_tok_acc = 0.
            print_loss = 0.

        if iter % args.eval_freq == 0:
            dev_seq_acc, dev_tok_acc, dev_loss = eval_model(seq2seq, dev_set)
            if dev_seq_acc > max_dev_seq_acc:
                max_dev_seq_acc = dev_seq_acc
            eval_seq_acc.append(dev_seq_acc)
            eval_tok_acc.append(dev_tok_acc)

            print(
                "[EVAL]Iteration: {}; Loss: {:.4f}; Avg Seq Acc: {:.4f}; Avg Tok Acc: {:.4f}; Best Seq Acc: {:.4f}"
                .format(iter, dev_loss, dev_seq_acc, dev_tok_acc,
                        max_dev_seq_acc))

        if iter % args.save_freq == 0:
            directory = os.path.join(args.save_dir, 'seq2seq')
            if not os.path.exists(directory):
                os.makedirs(directory)
            torch.save(
                {
                    'iteration': iter,
                    'model': seq2seq.state_dict(),
                    'opt': param_optimizer.state_dict(),
                    'de_opt': decoder_optimizer.state_dict(),
                    'loss': loss,
                    'voc': voc,
                    'args': args,
                    'records': {
                        'training_loss': training_losses,
                        'training_tok_acc': training_tok_acc,
                        'training_seq_acc': training_seq_acc,
                        'training_sim': training_sim,
                        'eval_tok_acc': eval_tok_acc,
                        'eval_seq_acc': eval_seq_acc
                    }
                },
                os.path.join(
                    directory, '{}_{}_{}.tar'.format(args.seed, iter,
                                                     'checkpoint')))