Exemplo n.º 1
0
def main(args, local_rank=0):

    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)

    vocabs = dict()
    vocabs['src'] = Vocab(args.src_vocab, 0, [BOS, EOS])
    vocabs['tgt'] = Vocab(args.tgt_vocab, 0, [BOS, EOS])

    logger.info(args)
    for name in vocabs:
        logger.info("vocab %s, size %d, coverage %.3f", name,
                    vocabs[name].size, vocabs[name].coverage)

    set_seed(19940117)

    #device = torch.device('cpu')
    torch.cuda.set_device(local_rank)
    device = torch.device('cuda', local_rank)

    logger.info("start building model")
    logger.info("building retriever")
    if args.add_retrieval_loss:
        retriever, another_model = Retriever.from_pretrained(
            args.num_retriever_heads,
            vocabs,
            args.retriever,
            args.nprobe,
            args.topk,
            local_rank,
            load_response_encoder=True)
        matchingmodel = MatchingModel(retriever.model, another_model)
        matchingmodel = matchingmodel.to(device)
    else:
        retriever = Retriever.from_pretrained(args.num_retriever_heads, vocabs,
                                              args.retriever, args.nprobe,
                                              args.topk, local_rank)

    logger.info("building retriever + generator")
    model = RetrieverGenerator(vocabs, retriever, args.share_encoder,
                               args.embed_dim, args.ff_embed_dim,
                               args.num_heads, args.dropout, args.mem_dropout,
                               args.enc_layers, args.dec_layers,
                               args.mem_enc_layers, args.label_smoothing)

    model = model.to(device)

    model.eval()
    dev_data = DataLoader(vocabs,
                          args.dev_data,
                          args.dev_batch_size,
                          for_train=False)
    bleu = validate(device,
                    model,
                    dev_data,
                    beam_size=5,
                    alpha=0.6,
                    max_time_step=10)
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-config", type=str)
    parser.add_argument("-nmt_dir", type=str)
    parser.add_argument("-model_type", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument("-valid_file", type=str)
    parser.add_argument("-train_file", type=str)
    parser.add_argument("-train_score", type=str, default=None)
    parser.add_argument("-src_vocab", type=str)
    parser.add_argument("-tgt_vocab", type=str)
    parser.add_argument("-rg_model", type=str, default=None)
    parser.add_argument("-tg_model", type=str, default=None)
    parser.add_argument("-critic_model", type=str, default=None)

    args = parser.parse_args()
    opt = utils.load_hparams(args.config)
    cuda.set_device(args.gpuid[0])
    if opt.random_seed > 0:
        random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)
        np.random.seed(opt.random_seed)

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)

    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    mask_end = True
    train = Data_Loader(args.train_file,
                        opt.train_batch_size,
                        score=args.train_score,
                        mask_end=mask_end)
    valid = Data_Loader(args.valid_file,
                        opt.train_batch_size,
                        mask_end=mask_end)

    # Build model.
    model, critic, start_epoch_at = build_or_load_model(args, opt, fields)

    check_save_model_path(args, opt)

    # Build optimizer.
    optimR, lr_schedulerR, optimT, lr_schedulerT, optimC, lr_schedulerC = build_optims_and_schedulers(
        model, critic, opt)

    if use_cuda:
        model = model.cuda()
        if opt.use_critic:
            critic = critic.cuda()

    # Do training.
    train_model(opt, model, critic, train, valid, fields, optimR,
                lr_schedulerR, optimT, lr_schedulerT, optimC, lr_schedulerC,
                start_epoch_at)
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-model_type", type=str)
    parser.add_argument("-test_file", type=str)
    parser.add_argument("-tgt_out", type=str)
    parser.add_argument("-model", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument('-src_vocab', type=str)
    parser.add_argument('-tgt_vocab', type=str)
    parser.add_argument('-gan', type=bool, default=False)
    args = parser.parse_args()
    opt = torch.load(args.model)['opt']

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)
    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    use_cuda = False
    if args.gpuid:
        cuda.set_device(args.gpuid[0])
        use_cuda = True

    if args.model_type == "base":
        model = nmt.model_helper.create_base_model(opt, fields)
    if args.model_type == "bibase":
        model = nmt.model_helper.create_bibase_model(opt, fields)
    if args.model_type == "ref":
        model = nmt.model_helper.create_ref_model(opt, fields)
    if args.model_type == "ev":
        model = nmt.model_helper.create_ev_model(opt, fields)
    if args.model_type == "rg":
        model = nmt.model_helper.create_response_generator(opt, fields)
    if args.model_type == "joint":
        model = nmt.model_helper.create_joint_model(opt, fields)
    print('Loading parameters ...')

    if args.gan:
        ckpt = torch.load(args.model)
        model.load_state_dict(ckpt['generator_dict'])
    else:
        model.load_checkpoint(args.model)

    if use_cuda:
        model = model.cuda()

    scorer = nmt.Scorer(model, fields['tgt'].vocab, None, None, opt)
    mask_end = (args.model_type == 'ev') or (args.model_type == 'joint')
    test_iter = Data_Loader(args.test_file,
                            opt.train_batch_size,
                            train=False,
                            mask_end=mask_end)
    score_file(scorer, test_iter, args.tgt_out, fields, use_cuda)
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-config", type=str)
    parser.add_argument("-nmt_dir", type=str)
    parser.add_argument("-model_type", type=str)
    parser.add_argument('-gpuid', default=[0], nargs='+', type=int)
    parser.add_argument("-valid_file", type=str)
    parser.add_argument("-train_file", type=str)
    parser.add_argument("-train_score", type=str, default=None)
    parser.add_argument("-src_vocab", type=str)
    parser.add_argument("-tgt_vocab", type=str)
    parser.add_argument("-start_point", type=str, default=None)

    args = parser.parse_args()
    opt = utils.load_hparams(args.config)

    if opt.random_seed > 0:
        random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)

    fields = dict()
    vocab_src = Vocab(args.src_vocab, noST=True)
    vocab_tgt = Vocab(args.tgt_vocab)
    fields['src'] = vocab_wrapper(vocab_src)
    fields['tgt'] = vocab_wrapper(vocab_tgt)

    train = Data_Loader(args.train_file,
                        opt.train_batch_size,
                        score=args.train_score,
                        mask_end=(args.model_type == "ev"))
    valid = Data_Loader(args.valid_file,
                        opt.train_batch_size,
                        mask_end=(args.model_type == "ev"))

    # Build model.

    model, start_epoch_at = build_or_load_model(args, opt, fields)
    check_save_model_path(args, opt)

    optimG, schedulerG, optimD, schedulerD = build_optims_and_lr_schedulers(
        model, opt)

    if use_cuda:
        model = model.cuda()

    # Do training.
    #pretrain_discriminators(opt, model, train, valid, fields, optimD, schedulerD, start_epoch_at)
    train_model(opt, model, train, valid, fields, optimG, schedulerG, optimD,
                schedulerD, start_epoch_at)
    print("DONE")
    x = 0
    while True:
        x = (x + 1) % 5
Exemplo n.º 5
0
def main():
    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    FLAGS.batch_size = FLAGS.beam_size

    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.iteritems():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)
    tf.set_random_seed(111)  # a seed value for randomness
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(
        max_dec_steps=1
    )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode(
    )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
Exemplo n.º 6
0
def creatVocab(corpusFile):
    tag_counter = Counter()
    alldatas = read_corpus(corpusFile)
    for inst in alldatas:
        tag_counter[inst.tag] += 1

    return Vocab(tag_counter)
Exemplo n.º 7
0
def load_ResponseGenerator(load_path):
    ckpt = torch.load(load_path, map_location='cpu')
    model_args = ckpt['args']
    v = set([x.strip() for x in open(model_args.vocab_src).readlines()])

    vocab_src = Vocab(model_args.vocab_src, with_SE=False)
    vocab_tgt = Vocab(model_args.vocab_tgt, with_SE=True)

    model = ResponseGenerator(vocab_src, vocab_tgt, model_args.embed_dim,
                              model_args.hidden_size, model_args.num_layers,
                              model_args.dropout, model_args.input_feed)

    model.load_state_dict(ckpt['model'])
    model = model.cuda()
    model.eval()
    return model, v, vocab_src, vocab_tgt
Exemplo n.º 8
0
def createEmb(vocab_dir, emb_dir, size):

    vocab = Vocab(vocab_dir, size)

    word_to_id = vocab._word_to_id
    id_to_word = vocab._id_to_word

    with open(emb_dir + "/skipgram_matrice.p", 'rb') as pickle_file:
        emb = pickle.load(pickle_file)

    with open(emb_dir + "/skipgram_labels.p", 'rb') as pickle_file:
        lab = pickle.load(pickle_file)

    matrix = np.zeros((len(word_to_id), emb.shape[1]), dtype=np.float32)
    lables = list(lab.values())
    for element in word_to_id:
        if element in lables:
            matrix[word_to_id[element]] = emb[lables.index(element)] + matrix[
                word_to_id[element]]
        else:
            matrix[word_to_id[element]] = emb[lables.index("UNK")] + matrix[
                word_to_id[element]]

    with open("skipgram_matrix.p", "wb") as file:
        pickle.dump(matrix, file)
    return
Exemplo n.º 9
0
def main():
    args = get_args()
    vocab = Vocab(args.vocab_path, args.vocab_size)  # create a vocabulary
    hps = get_hps()
    if not args.data_path == "":
        batcher = Batcher(args.data_path, vocab, hps, args.single_pass)
        import pdb
        pdb.set_trace()
        x = batcher.next_batch()
        import pdb
        pdb.set_trace()
        pass
    else:
        with open(args.json_path) as f:
            art = json.load(f)
        article = neologdn.normalize(art['body'])
        abstract = neologdn.normalize(art['title'])
        m = MeCab('-Owakati')
        parsed_article = m.parse(article)
        abs_words = m.parse(abstract).split()
        ex = B.Example(parsed_article, abs_words, vocab, hps)
        b = B.Batch([ex], hps, vocab)
        import pdb
        pdb.set_trace()
        pass
Exemplo n.º 10
0
Arquivo: main.py Projeto: aichunks/NLP
def predict():

    if FLAGS.mode != "predict":
        print("Wrong Function")
        return

    FLAGS.batch_size = FLAGS.beam_size

    hps = prepare_hps()
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    decode_model_hps = hps._replace(max_dec_steps=1)
    generator = Generator(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(generator, None, vocab)

    # text=" tHE PRESIDENT wILL REMAIN IN THE uNITED sTATES TO OVERSE THE AMERICAN RESPONSE TO sYRIA AND TO MONITOR DEVELOPMENTS AROUND THE WORLD. NOW YOU REMEMBER YESTERDAY THE PRESIDENT SENT OUT A SET OUT A TIMELINE OF 24 TO 48 HOURS AS TO WHEN HE MOULD HAKE HIS DECISION KNOWN AS IT RELATES TO THE us REsp ONSE TO THE CRISIS IN sYRIA SO HE COULD GET SOME MORE CLARITY ON THAT TODAY. buT sARAH sANDERS IS NOM HITTING THE FACT THAT THIS TRIP IS CANCELED ON SOME OF ThaT DECISION-MAKING cHRIS."
    # text='''  yOU HAVE SOME BREAKING NEHS ABOUT PRESIDENT CANCELING A SCHEDULED TRIP
    # # THAT HAS PLANNED LATER THIS HEEK. IET'S GO BACK TO THE WHITE hOUSE. nbc'S JEFF
    # # bENNETT, wHAT ARE YOU HEARING.? hEY CHRIS, PRESIDENT tRUMP HAS SET TO MAKE HIS FIRST VISIT OF HIS PRESIDENCY TO lATIN eMERICA LATER THIS HEEK. bUT HE'VE JUST LEARNED FROM WHITE hOUSE DRESS SECRETARY SARAH SANDERS THAT TRIP IS NOM CALLED OFF.
    # # hERE'S THE STATEMENT. SHE SENT OUT MOMENTS AGO PRESIDENT tRUMP HILL NOT ATTEND
    # # HE SUMMIT OF THE eMERICAS IN lIMA PERU OR TRAVEL TO bOGOTA COLOMBIA AS ORIGINALY SCHEDULED AT THE PRESIDENT'S REQUEST. tHE VICE PRESIDENT HILL TRAVEL IN INSTED'''
    text = "-lrb- cnn -rrb- the palestinian authority officially became the 123rd member of the international criminal court on wednesday , a step that gives the court jurisdiction over alleged crimes in palestinian territories . the formal accession was marked with a ceremony at the hague , in the netherlands , where the court is based . the palestinians signed the icc 's founding rome statute in january , when they also accepted its jurisdiction over alleged crimes committed `` in the occupied palestinian territory , including east jerusalem , since june 13 , 2014 . '' later that month , the icc opened a preliminary examination into the situation in palestinian territories , paving the way for possible war crimes investigations against israelis . as members of the court , palestinians may be subject to counter-charges as well . israel and the united states , neither of which is an icc member , opposed the palestinians ' efforts to join the body . but palestinian foreign minister riad al-malki , speaking at wednesday 's ceremony , said it was a move toward greater justice . `` as palestine formally becomes a state party to the rome statute today , the world is also a step closer to ending a long era of impunity and injustice , '' he said , according to an icc news release . `` indeed , today brings us closer to our shared goals of justice and peace . '' judge kuniko ozaki , a vice president of the icc , said acceding to the treaty was just the first step for the palestinians . `` as the rome statute today enters into force for the state of palestine , palestine acquires all the rights as well as responsibilities that come with being a state party to the statute . these are substantive commitments , which can not be taken lightly , '' she said . rights group human rights watch welcomed the development . `` governments seeking to penalize palestine for joining the icc should immediately end their pressure , and countries that support universal acceptance of the court 's treaty should speak out to welcome its membership , '' said balkees jarrah , international justice counsel for the group . `` what 's objectionable is the attempts to undermine international justice , not palestine 's decision to join a treaty to which over 100 countries around the world are members . '' in january , when the preliminary icc examination was opened , israeli prime minister benjamin netanyahu described it as an outrage , saying the court was overstepping its boundaries . the united states also said it `` strongly '' disagreed with the court 's decision . `` as we have said repeatedly , we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc , '' the state department said in a statement . it urged the warring sides to resolve their differences through direct negotiations . `` we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace , '' it said . but the icc begs to differ with the definition of a state for its purposes and refers to the territories as `` palestine . '' while a preliminary examination is not a formal investigation , it allows the court to review evidence and determine whether to investigate suspects on both sides . prosecutor fatou bensouda said her office would `` conduct its analysis in full independence and impartiality . '' the war between israel and hamas militants in gaza last summer left more than 2,000 people dead . the inquiry will include alleged war crimes committed since june . the international criminal court was set up in 2002 to prosecute genocide , crimes against humanity and war crimes . cnn 's vasco cotovio , kareem khadder and faith karimi contributed to this report ."
    # text="marseille , france -lrb- cnn -rrb- the french prosecutor leading an investigation into the crash of germanwings flight 9525 insisted wednesday that he was not aware of any video footage from on board the plane . marseille prosecutor brice robin told cnn that `` so far no videos were used in the crash investigation . '' he added , `` a person who has such a video needs to immediately give it to the investigators . '' robin 's comments follow claims by two magazines , german daily bild and french paris match , of a cell phone video showing the harrowing final seconds from on board germanwings flight 9525 as it crashed into the french alps . all 150 on board were killed . paris match and bild reported that the video was recovered from a phone at the wreckage site . the two publications described the supposed video , but did not post it on their websites . the publications said that they watched the video , which was found by a source close to the investigation . `` one can hear cries of ` my god ' in several languages , '' paris match reported . `` metallic banging can also be heard more than three times , perhaps of the pilot trying to open the cockpit door with a heavy object . towards the end , after a heavy shake , stronger than the others , the screaming intensifies . then nothing . '' `` it is a very disturbing scene , '' said julian reichelt , editor-in-chief of bild online . an official with france 's accident investigation agency , the bea , said the agency is not aware of any such video . lt. col. jean-marc menichini , a french gendarmerie spokesman in charge of communications on rescue efforts around the germanwings crash site , told cnn that the reports were `` completely wrong '' and `` unwarranted . '' cell phones have been collected at the site , he said , but that they `` had n't been exploited yet . '' menichini said he believed the cell phones would need to be sent to the criminal research institute in rosny sous-bois , near paris , in order to be analyzed by specialized technicians working hand-in-hand with investigators . but none of the cell phones found so far have been sent to the institute , menichini said . asked whether staff involved in the search could have leaked a memory card to the media , menichini answered with a categorical `` no . '' reichelt told `` erin burnett : outfront '' that he had watched the video and stood by the report , saying bild and paris match are `` very confident '' that the clip is real . he noted that investigators only revealed they 'd recovered cell phones from the crash site after bild and paris match published their reports . `` that is something we did not know before . ... overall we can say many things of the investigation were n't revealed by the investigation at the beginning , '' he said . what was mental state of germanwings co-pilot ? german airline lufthansa confirmed tuesday that co-pilot andreas lubitz had battled depression years before he took the controls of germanwings flight 9525 , which he 's accused of deliberately crashing last week in the french alps . lubitz told his lufthansa flight training school in 2009 that he had a `` previous episode of severe depression , '' the airline said tuesday . email correspondence between lubitz and the school discovered in an internal investigation , lufthansa said , included medical documents he submitted in connection with resuming his flight training . the announcement indicates that lufthansa , the parent company of germanwings , knew of lubitz 's battle with depression , allowed him to continue training and ultimately put him in the cockpit . lufthansa , whose ceo carsten spohr previously said lubitz was 100 % fit to fly , described its statement tuesday as a `` swift and seamless clarification '' and said it was sharing the information and documents -- including training and medical records -- with public prosecutors . spohr traveled to the crash site wednesday , where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside . he saw the crisis center set up in seyne-les-alpes , laid a wreath in the village of le vernet , closer to the crash site , where grieving families have left flowers at a simple stone memorial . menichini told cnn late tuesday that no visible human remains were left at the site but recovery teams would keep searching . french president francois hollande , speaking tuesday , said that it should be possible to identify all the victims using dna analysis by the end of the week , sooner than authorities had previously suggested . in the meantime , the recovery of the victims ' personal belongings will start wednesday , menichini said . among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board . check out the latest from our correspondents . the details about lubitz 's correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and lubitz 's possible motive for downing the jet . a lufthansa spokesperson told cnn on tuesday that lubitz had a valid medical certificate , had passed all his examinations and `` held all the licenses required . '' earlier , a spokesman for the prosecutor 's office in dusseldorf , christoph kumpa , said medical records reveal lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot 's license . kumpa emphasized there 's no evidence suggesting lubitz was suicidal or acting aggressively before the crash . investigators are looking into whether lubitz feared his medical condition would cause him to lose his pilot 's license , a european government official briefed on the investigation told cnn on tuesday . while flying was `` a big part of his life , '' the source said , it 's only one theory being considered . another source , a law enforcement official briefed on the investigation , also told cnn that authorities believe the primary motive for lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems . lubitz 's girlfriend told investigators he had seen an eye doctor and a neuropsychologist , both of whom deemed him unfit to work recently and concluded he had psychological issues , the european government official said . but no matter what details emerge about his previous mental health struggles , there 's more to the story , said brian russell , a forensic psychologist . `` psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they were n't going to keep doing their job and they 're upset about that and so they 're suicidal , '' he said . `` but there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person 's problems . '' germanwings crash compensation : what we know . who was the captain of germanwings flight 9525 ? cnn 's margot haddad reported from marseille and pamela brown from dusseldorf , while laura smith-spark wrote from london . cnn 's frederik pleitgen , pamela boykoff , antonia mortensen , sandrine amiel and anna-maja rappard contributed to this report ."

    data = genrate_Input(text, hps, vocab)

    out = decoder.predict(data)
    print(out)
Exemplo n.º 11
0
    def _prepare_data(self):

        vocabs = dict()
        vocabs['concept'] = Vocab(self.args.concept_vocab, 5, [CLS])
        vocabs['token'] = Vocab(self.args.token_vocab, 5, [STR, END])
        vocabs['token_char'] = Vocab(self.args.token_char_vocab, 100,
                                     [STR, END])
        vocabs['concept_char'] = Vocab(self.args.concept_char_vocab, 100,
                                       [STR, END])
        vocabs['relation'] = Vocab(self.args.relation_vocab, 5,
                                   [CLS, rCLS, SEL, TL])
        lexical_mapping = LexicalMap()

        for name in vocabs:
            print((name, vocabs[name].size, vocabs[name].coverage))
        return vocabs, lexical_mapping
Exemplo n.º 12
0
def main(unused_argv):
  set_random_seeds()

  get_datapath() # The dataset path
  get_steps() # setting steps according data_size

  tf.logging.set_verbosity(tf.logging.INFO)
  print('Now the mode of this mode is {} !'.format(FLAGS.mode))

  # if log_dir is not exited, create it.
  if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir)

  if FLAGS.mode == 'decode':
    FLAGS.branch_batch_size = FLAGS.beam_size  # for beam search
    FLAGS.TS_mode = False

  hps = make_hps() # make a hps namedtuple

  # Vocabulary
  vocab = Vocab(hps.vocab_path, hps.vocab_size)
  # Train or Inference
  if hps.mode == 'train':
    batcher = Batcher(hps.data_path, vocab, hps)
    eval_hps = hps._replace(mode='eval')
    eval_batcher = Batcher(hps.eval_data_path, vocab, eval_hps)

    model = GSNModel(hps, vocab)
    train(model, batcher, eval_batcher, vocab, hps)
  elif hps.mode == 'decode':
    decode_mdl_hps = hps._replace(max_dec_steps=1)
    batcher = Batcher(hps.test_data_path, vocab, decode_mdl_hps)  # for test

    model = GSNModel(decode_mdl_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder._decode()
def run(size):
  # print ((unused_argv))
  # if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
  #   raise Exception("Problem with flags: %s" % unused_argv)

  FLAGS.min_dec_steps = size//4
  FLAGS.max_dec_steps = size
  FLAGS.max_enc_steps = size
  tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
  tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

  # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
  FLAGS.log_root = log_path
  FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
  if not os.path.exists(FLAGS.log_root):
    if FLAGS.mode =="train":
      os.makedirs(FLAGS.log_root)
    else:
      raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))
  print("vocab path is ",FLAGS.vocab_path)
  vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

  # If in decode mode, set batch_size = beam_size
  # Reason: in decode mode, we decode one example at a time.
  # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
  if FLAGS.mode == 'decode':
    FLAGS.batch_size = FLAGS.beam_size

  # If single_pass=True, check we're in decode mode
  if FLAGS.single_pass and FLAGS.mode!='decode':
    raise Exception("The single_pass flag should only be True in decode mode")

  # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
  hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
  hps_dict = {}
  #print("This is FLAGS -->",FLAGS)
  for val in FLAGS: # for each flag // New modification for TF 1.5
    if val in hparam_list: # if it's in the list
      hps_dict[val] = FLAGS[val].value # add it to the dict // New modification for TF 1.5
  hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
  # Create a batcher object that will create minibatches of data
  batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass)

  tf.set_random_seed(111) # a seed value for randomness

  if hps.mode == 'train':
    print("creating model...")
    model = SummarizationModel(hps, vocab)
    setup_training(model, batcher)
  elif hps.mode == 'eval':
    model = SummarizationModel(hps, vocab)
    run_eval(model, batcher, vocab)
  elif hps.mode == 'decode':
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
  else:
    raise ValueError("The 'mode' flag must be one of train/eval/decode")
Exemplo n.º 14
0
    def __init__(self, model_file_path, data_path, data_class='val'):
        self.data_class = data_class
        if self.data_class not in  ['val', 'test']:
            print("data_class must be 'val' or 'test'.")
            raise ValueError

        # model_file_path e.g. --> ../log/{MODE NAME}/best_model/model_best_XXXXX
        model_name = os.path.basename(model_file_path)
        # log_root e.g. --> ../log/{MODE NAME}/
        log_root = os.path.dirname(os.path.dirname(model_file_path))
        # _decode_dir e.g. --> ../log/{MODE NAME}/decode_model_best_XXXXX/
        self._decode_dir = os.path.join(log_root, 'decode_%s' % (model_name))
        self._rouge_ref_dir = os.path.join(self._decode_dir, 'rouge_ref')
        self._rouge_dec_dir = os.path.join(self._decode_dir, 'rouge_dec_dir')
        self._result_path = os.path.join(self._decode_dir, 'result_%s_%s.txt' \
                                                        % (model_name, self.data_class))
        # remove result file if exist
        if os.path.isfile(self._result_path):
            os.remove(self._result_path)
        for p in [self._decode_dir, self._rouge_ref_dir, self._rouge_dec_dir]:
            if not os.path.exists(p):
                os.mkdir(p)

        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(data_path, self.vocab, mode='decode',
                               batch_size=config.beam_size, single_pass=True)
        time.sleep(5)

        self.model = Model(model_file_path, is_eval=True)
Exemplo n.º 15
0
def main():

    args.log_root = os.path.join(args.log_root, args.exp_name)
    if not os.path.exists(args.log_root):
        if args.mode == 'train':
            os.mkdir(args.log_root)
        else:
            raise Exception('Logdir do not exist')

    vocab = Vocab(args.vocab_path, args.vocab_size)

    if args.mode == 'decode':
        args.batch_size = args.beam_size

    hparam_list = ['mode', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'pointer_gen']

    hps = {'mode':args.mode,
           'batch_size':args.batch_size,
           'max_dec_steps':args.decode_len,
           'max_enc_steps':args.encode_len,
           'pointer_gen':args.pointer_gen}
    batcher = Batcher(args.data_path, vocab, hps, single_pass=True if args.mode=='decode' else False)


    # todo load dataset
    # todo load vocab, word2id, id2word
    # do train or decode
    model = DCA_Model(args)
Exemplo n.º 16
0
def _load_model():
    # These imports are slow - lazy import.
    import tensorflow as tf
    from data import Vocab
    from model import Hps, Settings, SummarizationModel

    global _settings, _hps, _vocab, _sess, _model

    # Define settings and hyperparameters
    _settings = Settings(
        embeddings_path='',
        log_root='',
        trace_path='',  # traces/traces_blog',
    )
    _hps = Hps(
        # parameters important for decoding
        attn_only_entities=False,
        batch_size=_beam_size,
        copy_only_entities=False,
        emb_dim=128,
        enc_hidden_dim=200,
        dec_hidden_dim=300,
        max_dec_steps=1,
        max_enc_steps=400,
        mode='decode',
        output_vocab_size=20000,
        restrictive_embeddings=False,
        save_matmul=False,
        tied_output=True,
        two_layer_lstm=True,
        # other parameters
        adagrad_init_acc=.1,
        adam_optimizer=True,
        copy_common_loss_wt=0.,
        cov_loss_wt=0.,
        high_attn_loss_wt=0.,
        lr=.15,
        max_grad_norm=2.,
        people_loss_wt=0.,
        rand_unif_init_mag=.02,
        scatter_loss_wt=0.,
        sharp_loss_wt=0.,
        trunc_norm_init_std=1e-4,
    )

    # Define model
    _vocab = Vocab(_vocab_path, _vocab_size)
    _model = SummarizationModel(_settings, _hps, _vocab)
    _model.build_graph()

    # Load model from disk
    saver = tf.train.Saver()
    config = tf.ConfigProto(
        allow_soft_placement=True,
        #intra_op_parallelism_threads=1,
        #inter_op_parallelism_threads=1,
    )
    _sess = tf.Session(config=config)
    ckpt_state = tf.train.get_checkpoint_state(_model_dir)
    saver.restore(_sess, ckpt_state.model_checkpoint_path)
Exemplo n.º 17
0
def main(unused_argv):
    if len(unused_argv)!=1:
        raise Exception('Problem with flags: %s'%unused_argv)

    FLAGS.log_root=os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        raise Exception('log directory %s does not exist.'%FLAGS.log_root)

    vocab=Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    hparam_list=['mode','lr','adagrad_init_acc','rand_unif_init_mag','trunc_norm_init_std','max_grad_norm','hidden_dim','emb_dim','batch_size','max_dec_steps','max_enc_steps','coverage','cov_loss_wt','pointer_gen']
    hps_dict={}
    for key,val in FLAGS.__flags.iteritems(): # for each flag
        if key in hparam_list: # if it's in the list
            hps_dict[key]=val # add it to the dict
    hps=namedtuple("HParams", hps_dict.keys())(**hps_dict)    

    model=SummarizationModel(hps,vocab)

    result_map=[]
    model.build_graph()
    sess=tf.Session(config=get_config())
    trained_model_folder=os.path.join(FLAGS.log_root,'train')

    evaluation_folder=os.path.join(FLAGS.log_root,'eval')
    ckpt_list=get_ckpt_list(trained_model_folder, max_ckpt_num=FLAGS.max_ckpt_num, interval=FLAGS.interval)
    if os.path.exists(evaluation_folder+os.sep+'result.pkl'):
        result_map=cPickle.load(open(evaluation_folder+os.sep+'result.pkl','rb'))
        ckpt_list_included=[]
        ckpt_list_extra=[]
        for ckpt_file, loss in result_map:
            ckpt_list_included.append(ckpt_file)
        for ckpt_file in ckpt_list:
            if not ckpt_file in ckpt_list_included:
                ckpt_list_extra.append(ckpt_file)
        ckpt_list=ckpt_list_extra
        print('%d ckpt already included in the existing result.pkl, skip ...'%len(ckpt_list_included))
    print('There are %d ckpts to evaluate'%len(ckpt_list))

    for idx,ckpt_file in enumerate(ckpt_list):
        print('Start analyzing checkpoint %d/%d'%(idx+1,len(ckpt_list)))
        saver=tf.train.Saver(max_to_keep=3)
        load_ckpt(saver,sess,os.path.join(trained_model_folder,ckpt_file))
        batcher=Batcher(FLAGS.data_path,vocab,hps,single_pass=True)
        avg_loss=eval(model,batcher,vocab,sess)
        print('check point:%s, Average loss in validation set: %.3f'%(ckpt_file, avg_loss))
        result_map.append([ckpt_file,avg_loss])
        if not os.path.exists(evaluation_folder):
            os.makedirs(evaluation_folder)
        cPickle.dump(result_map,open(evaluation_folder+os.sep+'result.pkl','wb'))

    if sys.version_info.major==2:
        result_map=sorted(result_map,lambda x,y:-1 if x[1]>y[1] else 1)
    else:
        result_map=sorted(result_map,key=lambda x:x[1],reverse=True)
    print('==Summary==')
    for ckpt,avg_loss in result_map:
        print('check point: %s, average loss: %.3f'%(ckpt,avg_loss))
    cPickle.dump(result_map,open(evaluation_folder+os.sep+'result.pkl','wb'))
    print('results saved in %s'%(evaluation_folder+os.sep+'result.pkl'))
Exemplo n.º 18
0
def run(args, local_rank):
    """ Distributed Synchronous """
    torch.manual_seed(1234)
    vocab = Vocab(args.vocab, min_occur_cnt=args.min_occur_cnt, specials=[])
    if (args.world_size == 1 or dist.get_rank() == 0):
        print ("vocab.size = %d"%vocab.size, flush=True)
    model = BIGLM(local_rank, vocab, args.embed_dim, args.ff_embed_dim,\
                  args.num_heads, args.dropout, args.layers, args.smoothing, args.approx)
    if args.start_from is not None:
        ckpt = torch.load(args.start_from, map_location='cpu')
        model.load_state_dict(ckpt['model'])
    model = model.cuda(local_rank)
   
    if args.world_size > 1:
        torch.manual_seed(1234 + dist.get_rank())
        random.seed(5678 + dist.get_rank())
    
    optimizer = Optim(model.embed_dim, args.lr, args.warmup_steps, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.998), eps=1e-9))

    if args.start_from is not None:
        optimizer.load_state_dict(ckpt['optimizer'])

    #train_data = DataLoader(vocab, args.train_data+"0"+str(local_rank), args.batch_size, args.max_len, args.min_len)
    train_data = DataLoader(vocab, args.train_data, args.batch_size, args.max_len, args.min_len)
    batch_acm = 0
    acc_acm, nll_acm, ppl_acm, ntokens_acm, nxs, npairs_acm, loss_acm = 0., 0., 0., 0., 0., 0., 0.
    while True:
        model.train()
        for truth, inp, msk in train_data:
            batch_acm += 1
            truth = truth.cuda(local_rank)
            inp = inp.cuda(local_rank)
            msk = msk.cuda(local_rank)

            model.zero_grad()
            res, loss, acc, nll, ppl, ntokens, npairs = model(truth, inp, msk)
            loss_acm += loss.item()
            acc_acm += acc
            nll_acm += nll
            ppl_acm += ppl
            ntokens_acm += ntokens
            npairs_acm += npairs
            nxs += npairs
            
            loss.backward()
            if args.world_size > 1:
                average_gradients(model)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            
            if (args.world_size==1 or dist.get_rank() ==0) and batch_acm%args.print_every == -1%args.print_every:
                print ('batch_acm %d, loss %.3f, acc %.3f, nll %.3f, ppl %.3f, x_acm %d, lr %.6f'\
                        %(batch_acm, loss_acm/args.print_every, acc_acm/ntokens_acm, \
                        nll_acm/nxs, ppl_acm/nxs, npairs_acm, optimizer._rate), flush=True)
                acc_acm, nll_acm, ppl_acm, ntokens_acm, loss_acm, nxs = 0., 0., 0., 0., 0., 0.
            if (args.world_size==1 or dist.get_rank() ==0) and batch_acm%args.save_every == -1%args.save_every:
                if not os.path.exists(args.save_dir):
                    os.mkdir(args.save_dir)
                torch.save({'args':args, 'model':model.state_dict(), 'optimizer':optimizer.state_dict()}, '%s/epoch%d_batch_%d'%(args.save_dir, train_data.epoch_id, batch_acm))
Exemplo n.º 19
0
def main(unused_argv):

    if len(unused_argv) != 1: # prints a message if you've entered flags inc
        raise Exception("Problem with flags: %s" % unused_argv)

    if FLAGS.dataset_name != "":
        FLAGS.data_path = os.path.join(FLAGS.data_root, FLAGS.dataset_name, FLAGS.dataset_split + '*')

    logging.set_verbosity(logging.INFO) # choose what level of logging you want
    logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.exp_name = FLAGS.exp_name if FLAGS.exp_name != '' else FLAGS.dataset_name
    FLAGS.actual_log_root = FLAGS.log_root
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)

    original_dataset_name = 'xsum' if 'xsum' in FLAGS.dataset_name else 'cnn_dm' if 'cnn_dm' in FLAGS.dataset_name or 'duc_2004' in FLAGS.dataset_name else ''
    vocab = Vocab(FLAGS.vocab_path + '_' + original_dataset_name, FLAGS.vocab_size) # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode!='decode':
        raise Exception("The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std',
                   'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps',
                   'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen', 'lambdamart_input']
    hps_dict = {}
    for key,val in FLAGS.__flags.items(): # for each flag
        if key in hparam_list: # if it's in the list
            hps_dict[key] = val.value # add it to the dict
    hps = namedtuple("HParams", list(hps_dict.keys()))(**hps_dict)

    tf.set_random_seed(113) # a seed value for randomness

    decode_model_hps = hps._replace(
        max_dec_steps=1)  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries

    if len(unused_argv) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    np.random.seed(random_seed)
    # batcher = Batcher(None, vocab, hps, single_pass=FLAGS.single_pass)
    # model = SummarizationModel(decode_model_hps, vocab)
    # decoder = BeamSearchDecoder(model, None, vocab)

    # decode_dir = decoder._decode_dir
    ckpt_folder = util.find_largest_ckpt_folder(FLAGS.log_root)
    decode_dir = os.path.join(FLAGS.log_root, ckpt_folder)
    print(decode_dir)
    attn_dir = os.path.join(decode_dir, 'attn_vis_data')

    process_attn_selections(attn_dir, decode_dir, vocab, extraction_eval=True)
Exemplo n.º 20
0
def init_model(m_path, device, vocab):
    ckpt= torch.load(m_path, map_location='cpu')
    lm_args = ckpt['args']
    lm_vocab = Vocab(vocab, min_occur_cnt=lm_args.min_occur_cnt, specials=[])
    lm_model = BIGLM(device, lm_vocab, lm_args.embed_dim, lm_args.ff_embed_dim, lm_args.num_heads, lm_args.dropout, lm_args.layers, 0.1, lm_args.approx)
    lm_model.load_state_dict(ckpt['model'])
    lm_model = lm_model.to(device)
    lm_model.eval()
    return lm_model, lm_vocab, lm_args
Exemplo n.º 21
0
def main(unused_argv):
    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == "train":
            os.makedirs(FLAGS.log_root)
        else:
            raise Exception(
                "Logdir %s doesn't exist. Run in train mode to create it." %
                (FLAGS.log_root))

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)

    tf.set_random_seed(111)  # a seed value for randomness

    # if hps.mode == 'train':
    print("creating model...")
    model = SummarizationModel(hps, vocab)
    setup_training(model, batcher)
Exemplo n.º 22
0
def main():
  global model
  global vocab
  global hps
  global decoder

  FLAGS.mode = 'decode'
  FLAGS.vocab_path = '../vocab'
  FLAGS.log_root = '../models'
  FLAGS.exp_name = 'pretrained_model_tf1.2.1'
  FLAGS.max_enc_steps = 400
  FLAGS.max_dec_steps = 120
  FLAGS.coverage = 1
  FLAGS.single_pass = True

  tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
  tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

  # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
  FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
  if not os.path.exists(FLAGS.log_root):
    if FLAGS.mode=="train":
      os.makedirs(FLAGS.log_root)
    else:
      raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

  vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

  # If in decode mode, set batch_size = beam_size
  # Reason: in decode mode, we decode one example at a time.
  # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
  if FLAGS.mode == 'decode':
    FLAGS.batch_size = FLAGS.beam_size

  # If single_pass=True, check we're in decode mode
  if FLAGS.single_pass and FLAGS.mode!='decode':
    raise Exception("The single_pass flag should only be True in decode mode")

  # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
  hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
  hps_dict = {}
  for key,val in FLAGS.__flags.items(): # for each flag
    if key in hparam_list: # if it's in the list
      hps_dict[key] = val # add it to the dict
  hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

  tf.set_random_seed(111) # a seed value for randomness

  decode_model_hps = hps  # This will be the hyperparameters for the decoder model
  decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
  model = SummarizationModel(decode_model_hps, vocab)

  abstract = "tim ist toll"
  article = "tim ist toll"

  batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass, abstract=abstract, article=article)
  decoder = BeamSearchDecoder(model, batcher, vocab)
Exemplo n.º 23
0
def loadModel():
    FLAGS.batch_size = FLAGS.beam_size
    hps = prepare_hps()
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    decode_model_hps = hps._replace(max_dec_steps=1)
    generator = Generator(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(generator, None, vocab)
    return decoder, hps, vocab
Exemplo n.º 24
0
def main(unused_args):
    if len(unused_args) != 1: raise Exception('Problem with flags: %s' % unused_args)

    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Starting pointer generator in %s mode...', (FLAGS.mode))

    # setup log directory
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)

    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == 'train': os.makedirs(FLAGS.log_root)
        else: raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception("The single_pass flag should only be True in decode mode")

    if not FLAGS.p_gen:
        assert not FLAGS.coverage and not FLAGS.experiment

    # setup vocabulary
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size, FLAGS.emb_dim)

    # setup hps
    hps_name = ['mode',
                'hidden_dim', 'batch_size', 'emb_dim', 'max_enc_steps', 'max_dec_steps', 'vocab_size',
                'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trun_norm_init_std', 'max_grad_norm', 'cov_loss_weight',
                'cpu_only', 'p_gen', 'coverage', 'experiment']
    hps = {}

    for k, v in FLAGS.__flags.items():
        if k in hps_name: hps[k] = v.value

    if FLAGS.mode == 'decode':
        hps['max_dec_steps'] = 1

    hps = namedtuple('HyperParams', hps.keys())(**hps)

    batcher = Batcher(FLAGS.data_path, vocab, hps, FLAGS.single_pass)

    tf.set_random_seed(13131)

    if FLAGS.mode == 'train':
        model = Model(hps)
        run_training(model, batcher)
    elif FLAGS.mode == 'eval':
        model = Model(hps)
        run_eval(model, batcher)
    elif FLAGS.mode == 'decode':
        model = Model(hps)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        decoder.decode()
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
Exemplo n.º 25
0
def build_vocab(exs, args):
    """ Build vocab using training examples. """
    src_lines, tgt_lines = [], []

    for ex in exs:
        src_lines.append(ex['src'])
        tgt_lines.append(ex['trg'])

    vocab = Vocab(src_lines + tgt_lines, args.vocab_limit)
    return vocab
Exemplo n.º 26
0
def main():
    vocab = Vocab(hps.word_count_path, hps.glove_path, hps.embedding_dim)

    net = PointerNet(hps, vocab.emb_mat)
    net = net.cuda()

    data_batcher = batcher(hps.data_path, vocab, hps, hps.single_pass)
    model_parameters = filter(lambda p: p.requires_grad, net.parameters())
    optimizer = optim.Adam(model_parameters)

    loss_track = []
    global_step = 0
    while True:
        start = time.time()
        batch = next(data_batcher)
        #batch = pickle.load(open('one_batch.pkl', 'rb'))
        paragraph_tensor = torch.tensor(batch.enc_batch, dtype=torch.int64, requires_grad=False).cuda()
        question_tensor = torch.tensor(batch.dec_batch, dtype=torch.int64, requires_grad=False).cuda()
        answer_position_tensor = torch.tensor(batch.ans_indices, dtype=torch.int64, requires_grad=False).cuda()
        target_tensor = torch.tensor(batch.target_batch, dtype=torch.int64, requires_grad=False).cuda()
        
        paragraph_batch_extend_vocab = None
        max_para_oovs = None
        if hps.pointer_gen:
            paragraph_batch_extend_vocab = torch.tensor(batch.enc_batch_extend_vocab, dtype=torch.int64, requires_grad=False).cuda()
            max_para_oovs = batch.max_para_oovs
        
        vocab_scores, vocab_dists, attn_dists, final_dists = net(paragraph_tensor, question_tensor, answer_position_tensor,
                                                                 paragraph_batch_extend_vocab, max_para_oovs)
        
        optimizer.zero_grad()
        dec_padding_mask = torch.ne(target_tensor, 0).float().cuda()
        if hps.pointer_gen:
            loss_per_step = []
            for dec_step, dist in enumerate(final_dists):
                # dist = [batch_size, extended_vsize]
                targets = target_tensor[:,dec_step]
                gold_probs = torch.gather(dist, 1, targets.unsqueeze(1)).squeeze()
                losses = -torch.log(gold_probs)
                loss_per_step.append(losses) # a list of [batch_size,]
            loss = mask_and_avg(loss_per_step, dec_padding_mask)
        else:
            # a list of dec_max_len (vocab_scores)
            loss_batch_by_step = F.cross_entropy(torch.cat(vocab_scores, dim=1).reshape(-1, vocab.size()), target_tensor.reshape(-1), size_average=False, reduce=False)
            # loss [batch_size*dec_max_len,]
            loss = torch.sum(loss_batch_by_step * dec_padding_mask.reshape(-1))/torch.sum(dec_padding_mask)
        loss_track.append(loss.item())
        global_step += 1

        loss.backward()
        optimizer.step()
        if global_step % hps.print_every == 0:
            print('Step {:>10}: ave loss: {:>10.4f}, speed: {:.4f}/case'.format(global_step, sum(loss_track)/len(loss_track), (time.time()-start)/hps.batch_size))
            loss_track = []
Exemplo n.º 27
0
def creatVocab(corpusFile, min_occur_count, tokenizer):
    '''根据语料创建Vocab'''
    word_counter = Counter()
    tag_counter = Counter()
    alldatas = read_corpus(corpusFile, tokenizer)
    for inst in alldatas:
        tag_counter[inst.tag] += 1
        if len(tag_counter) == 3:
            break

    return Vocab(word_counter, tag_counter, min_occur_count), alldatas
Exemplo n.º 28
0
    def __init__(self, model_file_path):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.eval_data_path,
                               self.vocab,
                               mode='eval',
                               batch_size=config.batch_size,
                               single_pass=True)
        self.model_file_path = model_file_path
        time.sleep(5)

        self.model = Model(model_file_path, is_eval=True)
Exemplo n.º 29
0
 def __init__(self, config):
     self.config = config
     self.step = 0
     self.vocab = Vocab(config.vocab_file, config.vocab_size)
     self.train_data = CNNDMDataset('train', config.data_path, config,
                                    self.vocab)
     self.validate_data = CNNDMDataset('val', config.data_path, config,
                                       self.vocab)
     # self.model = Model(config).to(device)
     # self.optimizer = None
     self.setup(config)
    def __init__(self):
        self.vocab = Vocab(config.vocab_path, config.vocab_size)
        self.batcher = Batcher(config.train_data_path,
                               self.vocab,
                               batch_size=config.batch_size)
        train_dir = os.path.join(config.log_root)
        if not os.path.exists(train_dir):
            os.mkdir(train_dir)

        self.model_dir = os.path.join(train_dir, 'model')
        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)