Example #1
0
def main(opt):
    # Build dictionary from task data
    if 'pretrained_model' in opt:
        dictionary = None
    else:
        dictionary = build_dict(opt)

    # Build document reader
    doc_reader = DocReaderAgent(opt, word_dict=dictionary)

    # Log params
    logger.info('[ Created with options: ] %s' %
                ''.join(['\n{}\t{}'.format(k, v)
                         for k, v in doc_reader.opt.items()]))

    # Build training world once
    opt['datatype'] = 'train'
    train_world = create_task(opt, doc_reader)
    train_time = Timer()

    # Keep track of best model + how long since the last improvement
    best_valid = 0
    impatience = 0

    logger.info("[ Ok, let's go... ]")
    iteration = 0
    while impatience < opt['patience']:
        # Train...
        logger.info('[ Training for %d iters... ]' % opt['train_interval'])
        train_time.reset()
        for _ in range(opt['train_interval']):
            train_world.parley()
        logger.info('[ Done. Time = %.2f (s) ]' % train_time.time())

        # ...validate!
        valid_metric = validate(opt, doc_reader, iteration)
        if valid_metric > best_valid:
            logger.info(
                '[ Best eval %d: %s = %.4f (old = %.4f) ]' %
                (iteration, opt['valid_metric'], valid_metric, best_valid)
            )
            best_valid = valid_metric
            impatience = 0
            if 'model_file' in opt:
                doc_reader.save(opt['model_file'])

            if valid_metric == 1:
                logger.info('[ Task solved! Stopping. ]')
                break
        else:
            impatience += 1

        iteration += 1
Example #2
0
def main(opt):
    # Check options
    assert ('pretrained_model' in opt)
    assert (opt['datatype'] in {'test', 'valid'})

    # Load document reader
    doc_reader = DocReaderAgent(opt)

    # Log params
    logger.info(
        '[ Created with options: ] %s' %
        ''.join(['\n{}\t{}'.format(k, v) for k, v in doc_reader.opt.items()]))

    logger.info('[ Running validation... ]')
    valid_world = create_task(opt, doc_reader)
    valid_time = Timer()
    for _ in valid_world:
        valid_world.parley()

    metrics = valid_world.report()
    if 'tasks' in metrics:
        for task, t_metrics in metrics['tasks'].items():
            logger.info('task = %s | EM = %.4f | F1 = %.4f | exs = %d | ' %
                        (task, t_metrics['accuracy'], t_metrics['f1'],
                         t_metrics['total']))
        logger.info('Overall EM = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['total']))
    else:
        logger.info('EM = %.4f | F1 = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['f1'], metrics['total']))
    logger.info('[ Done. Time = %.2f (s) ]' % valid_time.time())
Example #3
0
def main(opt):
    # Load document reader
    assert ('pretrained_model' in opt)
    doc_reader = DocReaderAgent(opt)

    # Log params
    logger.info(
        '[ Created with options: ] %s' %
        ''.join(['\n{}\t{}'.format(k, v) for k, v in doc_reader.opt.items()]))

    while True:
        context = input('Context: ')
        question = input('Question: ')
        observation = {
            'text': '\n'.join([context, question]),
            'episode_done': True
        }
        doc_reader.observe(observation)
        reply = doc_reader.act()
        print('Reply: %s' % reply['text'])
Example #4
0
    logger.info('[ Running validation... ]')
    valid_world = create_task(opt, doc_reader)
    valid_time = Timer()
    for _ in valid_world:
        valid_world.parley()

    metrics = valid_world.report()
    logger.info('EM = %.2f | F1 = %.2f | exs = %d' %
                (metrics['accuracy'], metrics['f1'], metrics['total']))
    logger.info('[ Done. Time = %.2f (s) ]' % valid_time.time())


if __name__ == '__main__':
    # Get command line arguments
    argparser = ParlaiParser()
    DocReaderAgent.add_cmdline_args(argparser)
    opt = argparser.parse_args()

    # Set logging (only stderr)
    logger = logging.getLogger('DrQA')
    logger.setLevel(logging.INFO)
    fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
    console = logging.StreamHandler()
    console.setFormatter(fmt)
    logger.addHandler(console)

    # Set cuda
    opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
    if opt['cuda']:
        logger.info('[ Using CUDA (GPU %d) ]' % opt['gpu'])
        torch.cuda.set_device(opt['gpu'])
Example #5
0
def main(opt):
    # Check options
    assert ('pretrained_model' in opt)
    assert (opt['datatype'] in {'test', 'valid'})

    # Calculate TDNN embedding dim (after applying TDNN to char tensor)
    opt['kernels'] = ''.join(opt['kernels'])
    if isinstance(opt['kernels'], str):
        opt['kernels'] = eval(
            opt['kernels'])  # convert string list of tuple --> list of tuple

    if opt['add_char2word']:
        opt['NULLWORD_Idx_in_char'] = opt['vocab_size_char'] - 1
        opt['embedding_dim_TDNN'] = 0
        for i, n in enumerate(opt['kernels']):
            opt['embedding_dim_TDNN'] += n[1]

        logger.info('TDNN embedding dim = %d' % (opt['embedding_dim_TDNN']))

    #Write prediction file
    f_predict = open((str(opt['model_file']) + '.prediction'), "w")
    f_predict.write("{")
    f_analysis = open((str(opt['model_file']) + '.analysis'), "w")

    # Load document reader
    doc_reader = DocReaderAgent(opt)

    # Log params
    logger.info(
        '[ Created with options: ] %s' %
        ''.join(['\n{}\t{}'.format(k, v) for k, v in doc_reader.opt.items()]))

    logger.info('[ Running validation... ]')
    valid_world = create_task(opt, doc_reader)
    valid_time = Timer()

    # Sent prediction
    #pdb.set_trace()
    valid_world.agents[1].opt['ans_sent_predict'] = False
    valid_world.agents[1].model.network.opt[
        'ans_sent_predict'] = False  # disable sentence predicction by default
    if opt['ans_sent_predict']:
        valid_world.agents[1].model.input_idx_bdy -= 1

    nExample = 0
    f1_avg_prev = 0
    acc_avg_prev = 0
    for _ in valid_world:
        pdb.set_trace()
        valid_world.parley()
        nExample += 1
        #pdb.set_trace()
        f_predict.write('"' + valid_world.acts[0]['reward'] + '" ')
        f_predict.write('"' + valid_world.acts[1]['text'] + '", ')

        f_analysis.write('Paragraph & Question = ' +
                         valid_world.acts[0]['text'] + '\n')
        f_analysis.write('Prediction = ' + valid_world.acts[1]['text'] + '\n')
        f_analysis.write('Answer = ' + valid_world.agents[0].lastY_prev[0] +
                         '\n')

        f1_avg_cur = valid_world.agents[0].report()['f1']
        f1_cur = nExample * f1_avg_cur - (nExample - 1) * f1_avg_prev
        f_analysis.write('F1 = ' + str(f1_cur) + '\n')
        f1_avg_prev = f1_avg_cur

    #pdb.set_trace()
    metrics = valid_world.report()
    if 'tasks' in metrics:
        for task, t_metrics in metrics['tasks'].items():
            logger.info('task = %s | EM = %.4f | F1 = %.4f | exs = %d | ' %
                        (task, t_metrics['accuracy'], t_metrics['f1'],
                         t_metrics['total']))
        logger.info('Overall EM = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['total']))
    else:
        logger.info('EM = %.4f | F1 = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['f1'], metrics['total']))
    logger.info('[ Done. Time = %.2f (s) ]' % valid_time.time())

    # Close prediction file
    f_predict.write("}")
    f_predict.close()
    f_analysis.close()
Example #6
0
def main(opt):
    # Check options
    assert ('pretrained_model' in opt)
    assert (opt['datatype'] in {'test', 'valid'})

    # Calculate TDNN embedding dim (after applying TDNN to char tensor)
    opt['kernels'] = ''.join(opt['kernels'])
    if isinstance(opt['kernels'], str):
        opt['kernels'] = eval(
            opt['kernels'])  # convert string list of tuple --> list of tuple

    if opt['add_char2word']:
        opt['embedding_dim_TDNN'] = 0
        for i, n in enumerate(opt['kernels']):
            opt['embedding_dim_TDNN'] += n[1]

        logger.info('TDNN embedding dim = %d' % (opt['embedding_dim_TDNN']))

    #Write prediction file
    f_predict = open(("exp-squad/" + str(opt['expnum']) + '.prediction'), "w")
    f_predict.write("{")

    # Load document reader
    doc_reader = DocReaderAgent(opt)

    # Log params
    logger.info(
        '[ Created with options: ] %s' %
        ''.join(['\n{}\t{}'.format(k, v) for k, v in doc_reader.opt.items()]))

    logger.info('[ Running validation... ]')
    valid_world = create_task(opt, doc_reader)
    valid_time = Timer()

    # Sent prediction
    valid_world.agents[1].opt['ans_sent_predict'] = False
    valid_world.agents[1].model.network.opt[
        'ans_sent_predict'] = False  # disable sentence predicction by default
    if opt['ans_sent_predict']:
        valid_world.agents[1].model.input_idx_bdy -= 1

    for _ in valid_world:
        valid_world.parley()

        #pdb.set_trace()
        #f_predict.write('"' + valid_world.acts[0]['reward'] + '" ')
        #f_predict.write('"' + valid_world.acts[1]['text'] + '", ')

    #pdb.set_trace()
    metrics = valid_world.report()
    if 'tasks' in metrics:
        for task, t_metrics in metrics['tasks'].items():
            logger.info('task = %s | EM = %.4f | F1 = %.4f | exs = %d | ' %
                        (task, t_metrics['accuracy'], t_metrics['f1'],
                         t_metrics['total']))
        logger.info('Overall EM = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['total']))
    else:
        logger.info('EM = %.4f | F1 = %.4f | exs = %d' %
                    (metrics['accuracy'], metrics['f1'], metrics['total']))
    logger.info('[ Done. Time = %.2f (s) ]' % valid_time.time())

    # Close prediction file
    f_predict.write("}")
    f_predict.close()
Example #7
0
def main(opt):
    #iter_global = 0
    #pdb.set_trace()
    # Cudnn

    # Build word dictionary from task data
    if os.path.isfile(("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl")):
        dictionary = pickle.load( open( ("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl"), "rb") )       # word dictionary
        logger.info('successfully load word dictionary')
    else:
        if 'pretrained_model' in opt:
            dictionary = None
        else:
            dictionary = build_dict(opt)
        pickle.dump( dictionary , open( ("data/MSmarco/dict.word." + str(opt['vocab_size']) + ".pkl"), "wb") )

    # Generator dictionary
    if os.path.isfile(("data/MSmarco/dict.word." + str(opt['vocab_size_generator']) + ".pkl")):
        dictionary_gen = pickle.load( open( ("data/MSmarco/dict.word." + str(opt['vocab_size_generator']) + ".pkl"), "rb") )       # word dictionary
        logger.info('successfully load word dictionary')
    else:
        dictionary_gen = build_dict(opt)
        pickle.dump( dictionary , open( ("data/MSmarco/dict.word." + str(opt['vocab_size_gen']) + ".pkl"), "wb") )

    # Character dictionary
    dictionary_char=None
    if opt['add_char2word']:
        opt['NULLWORD_Idx_in_char'] = opt['vocab_size_char']-1
        if os.path.isfile(("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl")):
            dictionary_char = pickle.load( open( ("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl"), "rb") )  # char dictionary
            logger.info('successfully load char dictionary')
        else:
            # Build char dictionary from task data
            dictionary_char = build_dict_char(opt)
            pickle.dump( dictionary_char , open( ("data/MSmarco/dict.char." + str(opt['vocab_size_char']) + ".pkl"), "wb") )

        # Figure out max word len

        # figuring out max_word_len from word dictionary is not valid choice ==> which is 25
        #opt['max_word_len'] = -100 # initialize
        #for i in range(len(dictionary)):
    #        cur_word_len = len(dictionary[i])
    #        if opt['max_word_len'] < cur_word_len:
    #            opt['max_word_len'] = cur_word_len

        # just set as hyperparameter in config.py
        logger.info('maximum word len = %d' % (opt['max_word_len']))

        # Calculate TDNN embedding dim (after applying TDNN to char tensor)
        opt['kernels'] = ''.join(opt['kernels'])
        if isinstance(opt['kernels'], str):
               opt['kernels'] = eval(opt['kernels']) # convert string list of tuple --> list of tuple
        opt['embedding_dim_TDNN']=0
        for i, n in enumerate(opt['kernels']):
            opt['embedding_dim_TDNN'] += n[1]

        logger.info('TDNN embedding dim = %d' % (opt['embedding_dim_TDNN']))

    #pdb.set_trace()

    # Build document reader
    doc_reader = DocReaderAgent(opt, word_dict=dictionary, gen_dict = dictionary_gen, char_dict=dictionary_char)

    # Log params
    logger.info('[ Created with options: ] %s' %
                ''.join(['\n{}\t{}'.format(k, v)
                         for k, v in doc_reader.opt.items()]))

    # Build training world once
    opt['datatype'] = 'train'
    train_world = create_task(opt, doc_reader)
    train_time = Timer()

    # Keep track of best model + how long since the last improvement
    best_valid = 0
    impatience = 0
    lrate_decay = 0

    logger.info("[ Ok, let's go... ]")
    iteration = 0
    while impatience < opt['patience']:
        
        # Train...
        logger.info('[ Training for %d iters... ]' % opt['train_interval'])
        train_time.reset()
        iter = 0
        for _ in range(opt['train_interval']):
            iter += 1
            #pdb.set_trace()
            train_world.parley()
            #if iter % opt['collect_garbage_every'] == 0:
            if iter % 100 == 0:
                gc.collect()

        # ...validate!
        print('start validation')
        valid_metric = validate(opt, doc_reader, iteration)
        if valid_metric > best_valid:
        #if False:
            logger.info(
                '[ Best eval %d: %s = %.4f (old = %.4f) ]' %
                (iteration, opt['valid_metric'], valid_metric, best_valid)
            )
            best_valid = valid_metric
            impatience = 0
            if 'model_file' in opt:
                doc_reader.save(opt['model_file'])

            if valid_metric == 1:
                logger.info('[ Task solved! Stopping. ]')
                break
        else:
            if opt['lrate_decay']:
                # doc_reader.model.opt['learning_rate'] *= 0.5
                opt['learning_rate'] *= opt['lrate_decay_factor']
                doc_reader.model.set_lrate(opt['learning_rate'])
                logger.info('[ Decrease learning_rate %.2e]' % opt['learning_rate'] )
                lrate_decay +=1
                if lrate_decay > 10:
                    break
            else:                
                impatience += 1
                logger.info('[ Increase impatience %d ]' % impatience)

        iteration += 1
    logger.info('[ >> Best eval : %s = %.4f ]' % (opt['valid_metric'], best_valid))
Example #8
0
def main(opt):
    # Check options
    assert('pretrained_model' in opt)
    assert(opt['datatype'] in {'test', 'valid'})

    # Calculate TDNN embedding dim (after applying TDNN to char tensor)
    opt['kernels'] = ''.join(opt['kernels'])
    if isinstance(opt['kernels'], str):
        opt['kernels'] = eval(opt['kernels']) # convert string list of tuple --> list of tuple

    if opt['add_char2word']:
        opt['NULLWORD_Idx_in_char'] = opt['vocab_size_char']-1
        opt['embedding_dim_TDNN']=0
        for i, n in enumerate(opt['kernels']):
            opt['embedding_dim_TDNN'] += n[1]

        logger.info('TDNN embedding dim = %d' % (opt['embedding_dim_TDNN']))

    # Load document reader
    doc_reader = DocReaderAgent(opt)
    
    logger.info('[ Running validation... ]')
    valid_world = create_task(opt, doc_reader)
    valid_time = Timer()
      
    nExample = 0
    for _ in valid_world:
        valid_world.parley()
        nExample+=1
        
        if nExample % 10 == 0:
            
            text = valid_world.acts[0]['text']        
            p = NLP.tokenizer(text.split('\n')[0])
            q = NLP.tokenizer(text.split('\n')[1])
                    
            ## ATTENTION weights
            qemb = valid_world.agents[1].model.network.qemb_match.get_alpha().squeeze()       
            qp_attention = valid_world.agents[1].model.network.qp_match.get_alpha().squeeze()
            #qp_gate = valid_world.agents[1].model.network.qp_match.get_gate().squeeze()
    
            pp_attention = valid_world.agents[1].model.network.pp_match.get_alpha().squeeze()
    #        pp_gate = valid_world.agents[1].model.network.pp_match.get_gate().squeeze()
            
            s_att = valid_world.agents[1].model.network.start_attn.get_alpha()
            e_att = valid_world.agents[1].model.network.end_attn.get_alpha()
            
            q_merge = valid_world.agents[1].model.network.self_attn.get_alpha()
            
            #pdb.set_trace()
            fig = plt.figure(1, figsize=(30,30))
            i=4        
            subplot(qemb, plt, fig, 1, i, 1, p, q, 'q_emb')
            subplot(qp_attention, plt, fig, 1, i, 2, p, q, 'qp-att')
            #subplot(torch.cat([qp_gate, pp_gate],0).t(), plt, fig, 1, 5, 3, p, ['qp-gate', 'pp-gate'], '')
            #subplot(pp_gate.t(), plt, fig, 1, 5, 4, p, [''], 'pp-gate')
            
            subplot(torch.cat([s_att, e_att],0).t(), plt, fig, 1, i, 3, p, ['start', 'end'], valid_world.agents[0].lastY_prev[0])
            subplot(q_merge.t(), plt, fig, 1, i, 4, q, [' '], 'q-merging')

            
            fig.savefig((str(opt['model_file']) + '_'+ str(nExample) +'.png'), transparent=True)
            plt.close()
            
            fig = plt.figure(2, figsize=(30,30))        
            subplot(pp_attention, plt, fig, 1, 1, 1, p, p, 'pp-att')        
            fig.savefig((str(opt['model_file']) + '_'+ str(nExample) +'pp.png'), transparent=True)
            plt.close()
        
        """
        subplot(qemb.t(), plt, fig, 4, 1, 1, q, p)
        subplot(qp_attention.t(), plt, fig, 4, 1, 2, q, p
        subplot(pp_attention.t(), plt, fig, 4, 1, 3, p, p)
        subplot(torch.cat([s_att, e_att],0), plt, fig, 4, 4, ['start', 'end'], p)
        """      
        
        if nExample == 100:
            break