Example #1
0
def train_and_test():
  
    word_vocab, nt_vocab, ter_vocab, act_vocab, word_tokens, tree_tokens, tran_actions\
                                                     = loader.load_data(options.data_dir, options.order)
 
    parser = model.LSTMParser(word_vocab, 
                              nt_vocab, 
                              ter_vocab,
                              act_vocab,
                              options.word_dim, 
                              options.nt_dim, 
                              options.ter_dim, 
                              options.lstm_dim, 
                              options.nlayers, 
                              options.order,
                              options.embedding_file,
                              options.attention,
                              options.train_selection,
                              options.test_selection,
                              options.beam_search,
                              options.beam_size)

    if os.path.exists(options.model_dir):
      parser.load_model(options.model_dir)

    trainer = optimizers[options.optimizer](parser.model)

    i = 0
    for epoch in range(options.epochs): 
      sents = 0
      total_loss = 0.0
      train_size = len(word_tokens['train'])
      for x, y, z in loader.iter_data(word_tokens, tran_actions, tree_tokens, 'train'):
        loss = parser.train(x, y, z, options)
        sents += 1
        if loss is not None:
          total_loss += loss.scalar_value() 
          loss.backward()
          trainer.update()
        e = float(i) / train_size
        if i % options.print_every == 0:
          print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
          sents = 0
          total_loss = 0.0

        i += 1

      print('testing...')
      save_as = '%s/epoch%03d.model' % (options.result_dir, epoch)
      parser.save_model(save_as)
      rf = open(options.result_dir + str(i), 'w')
      test_sents = 0
      test_loss = 0.0
      for x, y, z in loader.iter_data(word_tokens, tran_actions, tree_tokens, 'test'):
          output_actions, output_tokens = parser.parse(x, y, z)
          output = post_process.recover(output_actions, output_tokens, options.order)
          output = post_process.format_output(output)
          rf.write(output + '\n')
      rf.close()
Example #2
0
def find_executable_all(finished_beam, execution_model, kb, order):
    all_lf = []
    for bid, b in enumerate(finished_beam):
        output_actions, output_tokens = b.output_actions, b.output_tokens
        output = post_process.recover(output_actions, output_tokens, order)
        output = post_process.format_output(output)
        denotation = execution_model.execute(output, kb)
        if is_list(denotation) and len(denotation) > 0:
            all_lf.append((output, denotation))
    return all_lf
Example #3
0
def find_executable(finished_beam, execution_model, kb, order):
    for bid, b in enumerate(finished_beam):
        output_actions, output_tokens = b.output_actions, b.output_tokens
        output = post_process.recover(output_actions, output_tokens, order)
        output = post_process.format_output(output)
        if bid == 0:
            output0 = output
        denotation = execution_model.execute(output, kb)
        if is_list(denotation) and len(denotation) > 0:
            return output, denotation
    return output0, []
Example #4
0
def find_executable_by_result(finished_beam, execution_model, kb, order,
                              answer):
    good_lf = []
    bad_lf = []
    for bid, b in enumerate(finished_beam):
        output_actions, output_tokens = b.output_actions, b.output_tokens
        output = post_process.recover(output_actions, output_tokens, order)
        output = post_process.format_output(output)
        denotation = execution_model.execute(output, kb)
        if is_list(denotation) and len(denotation) > 0:
            if set(denotation) & set(answer):
                good_lf.append((output, denotation))
            else:
                bad_lf.append((output, denotation))
    return good_lf, bad_lf