예제 #1
0
def decode_Beam(FLAGS):
    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    #if FLAGS.mode == 'decode':
    #    FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    #if FLAGS.single_pass and FLAGS.mode != 'decode':
    #    raise Exception("The single_pass flag should only be True in decode mode")


    vocab_in, vocab_out = data.load_dict_data(FLAGS)

    FLAGS_batcher = config.retype_FLAGS()

    FLAGS_decode = FLAGS_batcher._asdict()
    FLAGS_decode["max_dec_steps"] = 1
    FLAGS_decode["mode"] = "decode"
    FLAGS_decode = config.generate_nametuple(FLAGS_decode)
    # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    batcher = Batcher(FLAGS.data_path, vocab_in,vocab_out, FLAGS_batcher,  data_file=FLAGS.test_name)

    model = SummarizationModel(FLAGS_decode, vocab_in,vocab_out,batcher)
    decoder = BeamSearchDecoder(model, batcher, vocab_out)
    decoder.decode()
예제 #2
0
def calc_features(cnn_dm_train_data_path, hps, vocab, batcher, save_path):
    if not os.path.exists(save_path): os.makedirs(save_path)
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.calc_importance_features(cnn_dm_train_data_path, hps, save_path,
                                     1000)
예제 #3
0
def main(unused_argv):
    # prints a message if you've entered flags incorrectly
    if len(unused_argv) != 1:
        raise Exception("Problem with flags: %s" % unused_argv)

    hps, vocab = prepare_hps_vocab()

    generator_batcher = Batcher(FLAGS.data_path,
                                vocab,
                                hps,
                                single_pass=FLAGS.single_pass)
    discriminator_batcher = Batcher(FLAGS.data_path,
                                    vocab,
                                    hps,
                                    single_pass=FLAGS.single_pass)

    if hps.mode == 'train':
        generator, discriminator = build_seqgan_graph(hps, vocab)
        setup_training(generator, discriminator, generator_batcher,
                       discriminator_batcher)
    elif hps.mode == 'decode':
        # The model is configured with max_dec_steps=1 because we only ever run one step of
        # the decoder at a time (to do beam search).
        decode_model_hps = hps._replace(max_dec_steps=1)
        generator = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(generator, generator_batcher, vocab)
        decoder.decode()
    else:
        raise ValueError("The 'mode' flag must be one of train/decode")
예제 #4
0
def main(args):
    if args.mode == 'prepare':  # python3 run.py  --mode prepare --pointer-gen
        prepare(args)
    elif args.mode == 'train':  # python3 run.py  --mode train -b 100 -o output --gpu 0  --restore
        train(args)
    elif args.mode == 'eval':
        # python3 run.py --mode eval --eval-model
        evaluate(args)
    elif args.mode == 'decode':  #
        # python3 run.py --mode decode --beam-size 10 --decode-model output_big_data/model/model-250000 --decode-dir output_big_data/result --gpu 1
        args.batch_size = args.beam_size
        vocab_encoder = Vocab(args, "encoder_vocab")
        vocab_decoder = Vocab(args, "decoder_vocab")
        vocab_user = User_Vocab(args, name="user_vocab")
        test_file = "./test.data"
        #test_file = os.path.join(args.data, 'chat_data/tmp.data')
        # test_file = os.path.join(args.data, 'news_train_span_50.data')
        batcher = TestBatcher(args, vocab_encoder, vocab_decoder, vocab_user,
                              test_file).batcher()
        if args.cpu:
            with tf.device('/cpu:0'):
                model = CommentModel(args, vocab_decoder)
        else:
            model = CommentModel(args, vocab_decoder)

        decoder = BeamSearchDecoder(args, model, batcher, vocab_decoder)
        decoder.decode()
    elif args.mode == 'debug':
        debug(args)
    else:
        raise RuntimeError(f'mode {args.mode} is invalid.')
예제 #5
0
파일: main.py 프로젝트: aichunks/NLP
def predict():

    if FLAGS.mode != "predict":
        print("Wrong Function")
        return

    FLAGS.batch_size = FLAGS.beam_size

    hps = prepare_hps()
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    decode_model_hps = hps._replace(max_dec_steps=1)
    generator = Generator(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(generator, None, vocab)

    # text=" tHE PRESIDENT wILL REMAIN IN THE uNITED sTATES TO OVERSE THE AMERICAN RESPONSE TO sYRIA AND TO MONITOR DEVELOPMENTS AROUND THE WORLD. NOW YOU REMEMBER YESTERDAY THE PRESIDENT SENT OUT A SET OUT A TIMELINE OF 24 TO 48 HOURS AS TO WHEN HE MOULD HAKE HIS DECISION KNOWN AS IT RELATES TO THE us REsp ONSE TO THE CRISIS IN sYRIA SO HE COULD GET SOME MORE CLARITY ON THAT TODAY. buT sARAH sANDERS IS NOM HITTING THE FACT THAT THIS TRIP IS CANCELED ON SOME OF ThaT DECISION-MAKING cHRIS."
    # text='''  yOU HAVE SOME BREAKING NEHS ABOUT PRESIDENT CANCELING A SCHEDULED TRIP
    # # THAT HAS PLANNED LATER THIS HEEK. IET'S GO BACK TO THE WHITE hOUSE. nbc'S JEFF
    # # bENNETT, wHAT ARE YOU HEARING.? hEY CHRIS, PRESIDENT tRUMP HAS SET TO MAKE HIS FIRST VISIT OF HIS PRESIDENCY TO lATIN eMERICA LATER THIS HEEK. bUT HE'VE JUST LEARNED FROM WHITE hOUSE DRESS SECRETARY SARAH SANDERS THAT TRIP IS NOM CALLED OFF.
    # # hERE'S THE STATEMENT. SHE SENT OUT MOMENTS AGO PRESIDENT tRUMP HILL NOT ATTEND
    # # HE SUMMIT OF THE eMERICAS IN lIMA PERU OR TRAVEL TO bOGOTA COLOMBIA AS ORIGINALY SCHEDULED AT THE PRESIDENT'S REQUEST. tHE VICE PRESIDENT HILL TRAVEL IN INSTED'''
    text = "-lrb- cnn -rrb- the palestinian authority officially became the 123rd member of the international criminal court on wednesday , a step that gives the court jurisdiction over alleged crimes in palestinian territories . the formal accession was marked with a ceremony at the hague , in the netherlands , where the court is based . the palestinians signed the icc 's founding rome statute in january , when they also accepted its jurisdiction over alleged crimes committed `` in the occupied palestinian territory , including east jerusalem , since june 13 , 2014 . '' later that month , the icc opened a preliminary examination into the situation in palestinian territories , paving the way for possible war crimes investigations against israelis . as members of the court , palestinians may be subject to counter-charges as well . israel and the united states , neither of which is an icc member , opposed the palestinians ' efforts to join the body . but palestinian foreign minister riad al-malki , speaking at wednesday 's ceremony , said it was a move toward greater justice . `` as palestine formally becomes a state party to the rome statute today , the world is also a step closer to ending a long era of impunity and injustice , '' he said , according to an icc news release . `` indeed , today brings us closer to our shared goals of justice and peace . '' judge kuniko ozaki , a vice president of the icc , said acceding to the treaty was just the first step for the palestinians . `` as the rome statute today enters into force for the state of palestine , palestine acquires all the rights as well as responsibilities that come with being a state party to the statute . these are substantive commitments , which can not be taken lightly , '' she said . rights group human rights watch welcomed the development . `` governments seeking to penalize palestine for joining the icc should immediately end their pressure , and countries that support universal acceptance of the court 's treaty should speak out to welcome its membership , '' said balkees jarrah , international justice counsel for the group . `` what 's objectionable is the attempts to undermine international justice , not palestine 's decision to join a treaty to which over 100 countries around the world are members . '' in january , when the preliminary icc examination was opened , israeli prime minister benjamin netanyahu described it as an outrage , saying the court was overstepping its boundaries . the united states also said it `` strongly '' disagreed with the court 's decision . `` as we have said repeatedly , we do not believe that palestine is a state and therefore we do not believe that it is eligible to join the icc , '' the state department said in a statement . it urged the warring sides to resolve their differences through direct negotiations . `` we will continue to oppose actions against israel at the icc as counterproductive to the cause of peace , '' it said . but the icc begs to differ with the definition of a state for its purposes and refers to the territories as `` palestine . '' while a preliminary examination is not a formal investigation , it allows the court to review evidence and determine whether to investigate suspects on both sides . prosecutor fatou bensouda said her office would `` conduct its analysis in full independence and impartiality . '' the war between israel and hamas militants in gaza last summer left more than 2,000 people dead . the inquiry will include alleged war crimes committed since june . the international criminal court was set up in 2002 to prosecute genocide , crimes against humanity and war crimes . cnn 's vasco cotovio , kareem khadder and faith karimi contributed to this report ."
    # text="marseille , france -lrb- cnn -rrb- the french prosecutor leading an investigation into the crash of germanwings flight 9525 insisted wednesday that he was not aware of any video footage from on board the plane . marseille prosecutor brice robin told cnn that `` so far no videos were used in the crash investigation . '' he added , `` a person who has such a video needs to immediately give it to the investigators . '' robin 's comments follow claims by two magazines , german daily bild and french paris match , of a cell phone video showing the harrowing final seconds from on board germanwings flight 9525 as it crashed into the french alps . all 150 on board were killed . paris match and bild reported that the video was recovered from a phone at the wreckage site . the two publications described the supposed video , but did not post it on their websites . the publications said that they watched the video , which was found by a source close to the investigation . `` one can hear cries of ` my god ' in several languages , '' paris match reported . `` metallic banging can also be heard more than three times , perhaps of the pilot trying to open the cockpit door with a heavy object . towards the end , after a heavy shake , stronger than the others , the screaming intensifies . then nothing . '' `` it is a very disturbing scene , '' said julian reichelt , editor-in-chief of bild online . an official with france 's accident investigation agency , the bea , said the agency is not aware of any such video . lt. col. jean-marc menichini , a french gendarmerie spokesman in charge of communications on rescue efforts around the germanwings crash site , told cnn that the reports were `` completely wrong '' and `` unwarranted . '' cell phones have been collected at the site , he said , but that they `` had n't been exploited yet . '' menichini said he believed the cell phones would need to be sent to the criminal research institute in rosny sous-bois , near paris , in order to be analyzed by specialized technicians working hand-in-hand with investigators . but none of the cell phones found so far have been sent to the institute , menichini said . asked whether staff involved in the search could have leaked a memory card to the media , menichini answered with a categorical `` no . '' reichelt told `` erin burnett : outfront '' that he had watched the video and stood by the report , saying bild and paris match are `` very confident '' that the clip is real . he noted that investigators only revealed they 'd recovered cell phones from the crash site after bild and paris match published their reports . `` that is something we did not know before . ... overall we can say many things of the investigation were n't revealed by the investigation at the beginning , '' he said . what was mental state of germanwings co-pilot ? german airline lufthansa confirmed tuesday that co-pilot andreas lubitz had battled depression years before he took the controls of germanwings flight 9525 , which he 's accused of deliberately crashing last week in the french alps . lubitz told his lufthansa flight training school in 2009 that he had a `` previous episode of severe depression , '' the airline said tuesday . email correspondence between lubitz and the school discovered in an internal investigation , lufthansa said , included medical documents he submitted in connection with resuming his flight training . the announcement indicates that lufthansa , the parent company of germanwings , knew of lubitz 's battle with depression , allowed him to continue training and ultimately put him in the cockpit . lufthansa , whose ceo carsten spohr previously said lubitz was 100 % fit to fly , described its statement tuesday as a `` swift and seamless clarification '' and said it was sharing the information and documents -- including training and medical records -- with public prosecutors . spohr traveled to the crash site wednesday , where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside . he saw the crisis center set up in seyne-les-alpes , laid a wreath in the village of le vernet , closer to the crash site , where grieving families have left flowers at a simple stone memorial . menichini told cnn late tuesday that no visible human remains were left at the site but recovery teams would keep searching . french president francois hollande , speaking tuesday , said that it should be possible to identify all the victims using dna analysis by the end of the week , sooner than authorities had previously suggested . in the meantime , the recovery of the victims ' personal belongings will start wednesday , menichini said . among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board . check out the latest from our correspondents . the details about lubitz 's correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and lubitz 's possible motive for downing the jet . a lufthansa spokesperson told cnn on tuesday that lubitz had a valid medical certificate , had passed all his examinations and `` held all the licenses required . '' earlier , a spokesman for the prosecutor 's office in dusseldorf , christoph kumpa , said medical records reveal lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot 's license . kumpa emphasized there 's no evidence suggesting lubitz was suicidal or acting aggressively before the crash . investigators are looking into whether lubitz feared his medical condition would cause him to lose his pilot 's license , a european government official briefed on the investigation told cnn on tuesday . while flying was `` a big part of his life , '' the source said , it 's only one theory being considered . another source , a law enforcement official briefed on the investigation , also told cnn that authorities believe the primary motive for lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems . lubitz 's girlfriend told investigators he had seen an eye doctor and a neuropsychologist , both of whom deemed him unfit to work recently and concluded he had psychological issues , the european government official said . but no matter what details emerge about his previous mental health struggles , there 's more to the story , said brian russell , a forensic psychologist . `` psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they were n't going to keep doing their job and they 're upset about that and so they 're suicidal , '' he said . `` but there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person 's problems . '' germanwings crash compensation : what we know . who was the captain of germanwings flight 9525 ? cnn 's margot haddad reported from marseille and pamela brown from dusseldorf , while laura smith-spark wrote from london . cnn 's frederik pleitgen , pamela boykoff , antonia mortensen , sandrine amiel and anna-maja rappard contributed to this report ."

    data = genrate_Input(text, hps, vocab)

    out = decoder.predict(data)
    print(out)
예제 #6
0
def main(unused_argv):
  set_random_seeds()

  get_datapath() # The dataset path
  get_steps() # setting steps according data_size

  tf.logging.set_verbosity(tf.logging.INFO)
  print('Now the mode of this mode is {} !'.format(FLAGS.mode))

  # if log_dir is not exited, create it.
  if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir)

  if FLAGS.mode == 'decode':
    FLAGS.branch_batch_size = FLAGS.beam_size  # for beam search
    FLAGS.TS_mode = False

  hps = make_hps() # make a hps namedtuple

  # Vocabulary
  vocab = Vocab(hps.vocab_path, hps.vocab_size)
  # Train or Inference
  if hps.mode == 'train':
    batcher = Batcher(hps.data_path, vocab, hps)
    eval_hps = hps._replace(mode='eval')
    eval_batcher = Batcher(hps.eval_data_path, vocab, eval_hps)

    model = GSNModel(hps, vocab)
    train(model, batcher, eval_batcher, vocab, hps)
  elif hps.mode == 'decode':
    decode_mdl_hps = hps._replace(max_dec_steps=1)
    batcher = Batcher(hps.test_data_path, vocab, decode_mdl_hps)  # for test

    model = GSNModel(decode_mdl_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder._decode()
def run(size):
  # print ((unused_argv))
  # if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
  #   raise Exception("Problem with flags: %s" % unused_argv)

  FLAGS.min_dec_steps = size//4
  FLAGS.max_dec_steps = size
  FLAGS.max_enc_steps = size
  tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
  tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

  # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
  FLAGS.log_root = log_path
  FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
  if not os.path.exists(FLAGS.log_root):
    if FLAGS.mode =="train":
      os.makedirs(FLAGS.log_root)
    else:
      raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))
  print("vocab path is ",FLAGS.vocab_path)
  vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

  # If in decode mode, set batch_size = beam_size
  # Reason: in decode mode, we decode one example at a time.
  # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
  if FLAGS.mode == 'decode':
    FLAGS.batch_size = FLAGS.beam_size

  # If single_pass=True, check we're in decode mode
  if FLAGS.single_pass and FLAGS.mode!='decode':
    raise Exception("The single_pass flag should only be True in decode mode")

  # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
  hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
  hps_dict = {}
  #print("This is FLAGS -->",FLAGS)
  for val in FLAGS: # for each flag // New modification for TF 1.5
    if val in hparam_list: # if it's in the list
      hps_dict[val] = FLAGS[val].value # add it to the dict // New modification for TF 1.5
  hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
  # Create a batcher object that will create minibatches of data
  batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass)

  tf.set_random_seed(111) # a seed value for randomness

  if hps.mode == 'train':
    print("creating model...")
    model = SummarizationModel(hps, vocab)
    setup_training(model, batcher)
  elif hps.mode == 'eval':
    model = SummarizationModel(hps, vocab)
    run_eval(model, batcher, vocab)
  elif hps.mode == 'decode':
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
  else:
    raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #8
0
def evaluate_model_beam_search(model, src, src_test, trg,
    trg_test, config, beam_size=1, use_cuda=False):
    """
    evaluate the model use beam search.
    :param model:
    :param src:
    :param src_test:
    :param trg:
    :param trg_test:
    :param config:
    :param beam_size:
    :param use_cuda:
    :return:
    """
    # the test word dict use src's dict
    src_test['word2id'] = src['word2id']
    src_test['id2word'] = src['id2word']

    trg_test['word2id'] = trg['word2id']
    trg_test['id2word'] = trg['id2word']

    decoder = BeamSearchDecoder(config, model.state_dict(),
                                src_test, trg_test, beam_size=beam_size, use_cuda=use_cuda)
    bleu_score = decoder.translate()
    return bleu_score
예제 #9
0
def main():
    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    FLAGS.batch_size = FLAGS.beam_size

    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.iteritems():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)
    tf.set_random_seed(111)  # a seed value for randomness
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(
        max_dec_steps=1
    )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode(
    )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
예제 #10
0
class SummaRiser:
    def __init__(self, vocab_path, log_path):
        self.pointer_gen = True
        self.single_pass = True
        self.batch_size = self.beam_size = 4
        self.vocab_size = 50000
        self.vocab_path = vocab_path
        self.log_root = log_path

        # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
        hparam_list = [
            'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
            'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
            'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
            'cov_loss_wt', 'pointer_gen'
        ]
        hps_dict = {
            'mode': 'decode',
            'lr': 0.15,
            'adagrad_init_acc': 0.1,
            'rand_unif_init_mag': 0.02,
            'trunc_norm_init_std': 1e-4,
            'max_grad_norm': 2.0,
            'hidden_dim': 256,
            'emb_dim': 128,
            'batch_size': self.batch_size,
            'max_dec_steps': 100,
            'max_enc_steps': 500,
            'coverage': 1,
            'cov_loss_wt': 1.0,
            'pointer_gen': True,
            'min_dec_steps': 35,
            'beam_size': self.beam_size
        }

        self.hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
        self.vocab = Vocab(self.vocab_path, self.vocab_size)

        decode_model_hps = self.hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = self.hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries

        tf.set_random_seed(111)  # a seed value for randomness
        self.model = SummarizationModel(decode_model_hps, self.vocab)
        self.decoder = BeamSearchDecoder(self.model, self.vocab, True,
                                         self.hps, self.pointer_gen,
                                         self.log_root)

    def summarize(self, articles):
        self.batcher = Batcher(articles,
                               self.vocab,
                               self.hps,
                               single_pass=self.single_pass)
        self.decoder.setBatcher(self.batcher)
        return self.decoder.decode(
            articles
        )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
def main(unused_argv):
  if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
    raise Exception("Problem with flags: %s" % unused_argv)

  tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
  tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

  # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
  FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
  if not os.path.exists(FLAGS.log_root):
    if FLAGS.mode=="train":
      os.makedirs(FLAGS.log_root)
    else:
      raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

  vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

  # If in decode mode, set batch_size = beam_size
  # Reason: in decode mode, we decode one example at a time.
  # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
  if FLAGS.mode == 'decode':
    FLAGS.batch_size = FLAGS.beam_size

  # If single_pass=True, check we're in decode mode
  if FLAGS.single_pass and FLAGS.mode!='decode':
    raise Exception("The single_pass flag should only be True in decode mode")

  # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
  hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
  hps_dict = {}
  for key,val in FLAGS.__flags.iteritems(): # for each flag
    if key in hparam_list: # if it's in the list
      hps_dict[key] = val # add it to the dict
  hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

  # Create a batcher object that will create minibatches of data
  batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass)

  tf.set_random_seed(111) # a seed value for randomness

  if hps.mode == 'train':
    print "creating model..."
    model = SummarizationModel(hps, vocab)
    setup_training(model, batcher)
  elif hps.mode == 'eval':
    model = SummarizationModel(hps, vocab)
    run_eval(model, batcher, vocab)
  elif hps.mode == 'decode':
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
  else:
    raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #12
0
    def __init__(self, vocab_path, log_root):
        self.pointer_gen = True
        self.single_pass = True
        self.batch_size = self.beam_size = 4
        self.vocab_size = 50000
        self.vocab_path = vocab_path
        self.log_root = log_root
        # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
        hparam_list = [
            'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
            'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
            'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
            'cov_loss_wt', 'pointer_gen'
        ]
        hps_dict = {
            'mode': 'decode',
            'lr': 0.15,
            'adagrad_init_acc': 0.1,
            'rand_unif_init_mag': 0.02,
            'trunc_norm_init_std': 1e-4,
            'max_grad_norm': 2.0,
            'hidden_dim': 256,
            'emb_dim': 128,
            'batch_size': self.batch_size,
            'max_dec_steps': 100,
            'max_enc_steps': 400,
            'coverage': 1,
            'cov_loss_wt': 1.0,
            'pointer_gen': True,
            'min_dec_steps': 35,
            'beam_size': self.beam_size
        }

        self.hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
        self.vocab = Vocab(self.vocab_path, self.vocab_size)
        tf.logging.set_verbosity(
            tf.logging.INFO)  # choose what level of logging you want
        # If in decode mode, set batch_size = beam_size
        # Reason: in decode mode, we decode one example at a time.
        # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.

        #
        decode_model_hps = self.hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = self.hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries

        tf.set_random_seed(111)  # a seed value for randomness
        self.model = SummarizationModel(decode_model_hps, self.vocab,
                                        self.log_root)
        self.decoder = BeamSearchDecoder(self.model, self.vocab, True,
                                         self.hps, self.pointer_gen,
                                         self.log_root)
예제 #13
0
def decode():
    config = decode_config()
    vocab = Vocab(config['vocab_file'], config['vocab_size'])
    hps = namedtuple("HParams", config.keys())(**config)
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=config['single_pass'])
    from model import SummarizeModel
    from decode import BeamSearchDecoder
    ### Model
    model = SummarizeModel(**config)
    beamsearch_model = BeamSearchDecoder(model, batcher, vocab)
    beamsearch_model.decode()
예제 #14
0
def main():
    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    args = FLAGS  # get_args()
    vocab = Vocab(args.vocab_path, args.vocab_size)  # create a vocabulary
    hps = get_hps()
    b = json_batch(args.json_path, hps, vocab)
    batcher = MyBatcher(b, vocab, hps, args.single_pass)

    decode_model_hps = hps._replace(max_dec_steps=1)
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode()
    import pdb
    pdb.set_trace()
    pass
예제 #15
0
def main():

    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('Starting seq2seq_attention in %s mode...', (args.mode))

    args.model_path = os.path.join(args.model_path, args.exp_name)
    if not os.path.exists(args.model_path):
        if args.mode == "train":
            os.makedirs(args.model_path)
        else:
            raise Exception(
                "Logdir %s doesn't exist. Run in train mode to create it." %
                (args.model_path))


#加载数据集 加载source的字典
    src_vocab = utils.Vocab(args.src_vocab_path, args.src_vocab_size)
    #加载targe的单词字典#
    tgt_vocab = utils.Vocab(args.tgt_vocab_path, args.tgt_vocab_size)
    #把数据集进行batch化,同时加入并发数据队列对数据进行并发传入
    batcher = Batcher(args.data_path, src_vocab, tgt_vocab, args)

    if args.model == "vanilla":
        model_class = VanillaSeq2seqModel
    elif args.model == "sep_dec":
        model_class = SeparateDecoderModel
    elif args.model == "shd_dec":
        model_class = SharedDecoderModel

    tf.set_random_seed(111)

    if args.mode == 'train':
        model = model_class(args, src_vocab, tgt_vocab)
        setup_training(model, batcher)
    elif args.mode == 'eval':
        model = model_class(args, src_vocab, tgt_vocab)
        run_eval(model, batcher, args.ckpt_id)
    elif args.mode == "decode":
        args.batch_size = args.beam_size
        args.arg_max_dec_steps = 1
        args.kp_max_dec_steps = 1
        model = model_class(args, src_vocab, tgt_vocab)
        decoder = BeamSearchDecoder(model, batcher, src_vocab, tgt_vocab,
                                    args.ckpt_id)
        decoder.decode()
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
def main(unused_argv, sess_config=None, server_target=None):
    # if len(unused_argv) != 1:  # prints a message if you've entered flags incorrectly
    #     raise Exception("Problem with flags: %s" % unused_argv)

    vocab, hps = default_setup()

    if hps.inference:
        print "Inference Mode"
        batcher = RawTextBatcher(FLAGS.data_path,
                                 vocab,
                                 hps,
                                 single_pass=FLAGS.single_pass)

        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(
            model, batcher,
            vocab) if sess_config is None else BeamSearchDecoder(
                model, batcher, vocab, sess_config, server_target)
        decoder.decode(
            withRouge=False
        )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    else:
        # Create a batcher object that will create minibatches of data
        batcher = Batcher(FLAGS.data_path,
                          vocab,
                          hps,
                          single_pass=FLAGS.single_pass)

        if hps.mode == 'train':
            print "creating model..."
            model = SummarizationModel(hps, vocab)
            setup_training(model, batcher)
        elif hps.mode == 'eval':
            model = SummarizationModel(hps, vocab)
            run_eval(model, batcher, vocab)
        elif hps.mode == 'decode':
            decode_model_hps = hps  # This will be the hyperparameters for the decoder model
            decode_model_hps = hps._replace(
                max_dec_steps=1
            )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
            model = SummarizationModel(decode_model_hps, vocab)
            decoder = BeamSearchDecoder(model, batcher, vocab)
            decoder.decode(
            )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
        else:
            raise ValueError(
                "The 'mode' flag must be one of train/eval/decode")
예제 #17
0
def main():
  global model
  global vocab
  global hps
  global decoder

  FLAGS.mode = 'decode'
  FLAGS.vocab_path = '../vocab'
  FLAGS.log_root = '../models'
  FLAGS.exp_name = 'pretrained_model_tf1.2.1'
  FLAGS.max_enc_steps = 400
  FLAGS.max_dec_steps = 120
  FLAGS.coverage = 1
  FLAGS.single_pass = True

  tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
  tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

  # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
  FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
  if not os.path.exists(FLAGS.log_root):
    if FLAGS.mode=="train":
      os.makedirs(FLAGS.log_root)
    else:
      raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

  vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

  # If in decode mode, set batch_size = beam_size
  # Reason: in decode mode, we decode one example at a time.
  # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
  if FLAGS.mode == 'decode':
    FLAGS.batch_size = FLAGS.beam_size

  # If single_pass=True, check we're in decode mode
  if FLAGS.single_pass and FLAGS.mode!='decode':
    raise Exception("The single_pass flag should only be True in decode mode")

  # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
  hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
  hps_dict = {}
  for key,val in FLAGS.__flags.items(): # for each flag
    if key in hparam_list: # if it's in the list
      hps_dict[key] = val # add it to the dict
  hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

  tf.set_random_seed(111) # a seed value for randomness

  decode_model_hps = hps  # This will be the hyperparameters for the decoder model
  decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
  model = SummarizationModel(decode_model_hps, vocab)

  abstract = "tim ist toll"
  article = "tim ist toll"

  batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass, abstract=abstract, article=article)
  decoder = BeamSearchDecoder(model, batcher, vocab)
예제 #18
0
def loadModel():
    FLAGS.batch_size = FLAGS.beam_size
    hps = prepare_hps()
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    decode_model_hps = hps._replace(max_dec_steps=1)
    generator = Generator(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(generator, None, vocab)
    return decoder, hps, vocab
예제 #19
0
def main(unused_argv):
    # if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
    #   raise Exception("Problem with flags: %s" % unused_argv)

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)

    tf.set_random_seed(111)  # a seed value for randomness

    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(
        max_dec_steps=1
    )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, batcher, vocab)
    decoder.decode(
    )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
def inference_on_flink(context, sess_config, server_target):
    vocab, hps = default_setup()
    if hps.inference:
        print "Inference Mode"
        batcher = FlinkInferenceBatcher(context, vocab, hps)
        writer = FlinkWriter(context)
        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(
            model, batcher,
            vocab) if sess_config is None else BeamSearchDecoder(
                model, batcher, vocab, sess_config, server_target, writer)

        decoder.decode(
            withRouge=False
        )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
예제 #21
0
def main(unused_argv):
    if len(unused_argv) != 1:
        raise Exception("Problem with flags: %s" % unused_argv)
    if FLAGS.mode not in ['train', 'eval', 'decode']:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")

    tf.logging.set_verbosity(tf.logging.INFO)
    tf.set_random_seed(FLAGS.random_seed)
    print('INFO: Starting seq2seq_attention model in {} mode...'.format(
        FLAGS.mode))
    if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir)

    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    hparam_list = [
        'mode', 'lr', 'adagrad_acc', 'norm_unif', 'norm_trunc', 'norm_grad',
        'pointer', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps',
        'max_enc_steps'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.iteritems():
        if key in hparam_list:
            hps_dict[key] = val
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)
    batcher = Batcher(FLAGS.data_path, vocab, hps, onetime=FLAGS.onetime)

    if hps.mode == 'train':
        print('INFO: creating model...')
        model = SummarizationModel(hps, vocab)
        train(model, batcher)
    elif hps.mode == 'eval':
        model = SummarizationModel(hps, vocab)
        cval(model, batcher, vocab)
    elif hps.mode == 'decode':
        decode_mdl_hps = hps
        decode_mdl_hps = hps._replace(max_dec_steps=1)
        model = SummarizationModel(decode_mdl_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        decoder._decode()
예제 #22
0
파일: main.py 프로젝트: aichunks/NLP
def main(args):
    # prints a message if you've entered flags incorrectly
    if len(args) != 1:
        raise Exception("Problem with flags: %s" % args)

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode' or FLAGS.mode == 'predict':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode' and FLAGS.mode != 'predict':
        raise Exception(
            "The single_pass flag should only be True in decode mode")
    hps = prepare_hps()
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)
    generator_batcher = Batcher(FLAGS.data_path,
                                vocab,
                                hps,
                                single_pass=FLAGS.single_pass)
    discriminator_batcher = Batcher(FLAGS.data_path,
                                    vocab,
                                    hps,
                                    single_pass=FLAGS.single_pass)

    if hps.mode == "pretrain" or hps.mode == "train":
        generator, discriminator = build_seqgan_graph(hps, vocab)
        setup_training(hps.mode, generator, discriminator, generator_batcher,
                       discriminator_batcher)
    elif hps.mode == 'decode':
        # The model is configured with max_dec_steps=1 because we only ever run one step of
        # the decoder at a time (to do beam search).
        decode_model_hps = hps._replace(max_dec_steps=1)
        generator = Generator(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(generator, generator_batcher, vocab)
        decoder.decode()
    else:
        raise ValueError(
            "The 'mode' flag must be one of pretrain/train/decode")
예제 #23
0
def setup_summarizer(settings):
    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention ')

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    vocab = Vocab(settings.vocab_path,
                  settings.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    FLAGS.batch_size = FLAGS.beam_size

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", list(hps_dict.keys()))(**hps_dict)

    tf.set_random_seed(111)  # a seed value for randomness

    if hps.mode != 'decode':
        raise ValueError("The 'mode' flag must be decode for serving")
    decode_model_hps = hps  # This will be the hyperparameters for the decoder model
    decode_model_hps = hps._replace(
        max_dec_steps=1
    )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
    serving_device = '/cpu:0'
    model = SummarizationModel(decode_model_hps,
                               vocab,
                               default_device=serving_device)
    decoder = BeamSearchDecoder(model, None, vocab)
    return Summarizer(decoder, vocab=vocab, hps=hps)
예제 #24
0
def main(unused_argv):
    if FLAGS.placeholder:
        tf.logging.info('try to occupy GPU memory!')
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.per_process_gpu_memory_fraction = 0.8
        placeholder_session = tf.Session(config=config)
        limit = placeholder_session.run(
            tf.contrib.memory_stats.BytesLimit()) / 1073741824
        tf.logging.info('occupy GPU memory %f GB', limit)
    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == "train":
            os.makedirs(FLAGS.log_root)
        else:
            raise Exception(
                "Logdir %s doesn't exist. Run in train mode to create it." %
                (FLAGS.log_root))

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode' or FLAGS.mode == 'auto_decode':
        FLAGS.batch_size = FLAGS.beam_size

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'max_side_steps',
        'coverage', 'cov_loss_wt', 'pointer_gen', 'epoch_num',
        'current_source_code_zip', 'multi_dec_steps'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val.value  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # save python source code
    current_time_str = datetime.now().strftime('%m-%d-%H-%M')
    FLAGS.current_source_code_zip = os.path.abspath(
        os.path.join(
            FLAGS.log_root,
            'source_code_bak-' + current_time_str + '-' + FLAGS.mode + '.zip'))
    tf.logging.info('saving source code: %s', FLAGS.current_source_code_zip)
    python_list = glob.glob('./*.py')
    zip_file = zipfile.ZipFile(FLAGS.current_source_code_zip, 'w')
    for d in python_list:
        zip_file.write(d)
    zip_file.close()

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)

    tf.set_random_seed(111)  # a seed value for randomness

    if hps.mode == 'train':
        print("creating model...")
        model = SummarizationModel(hps, vocab)
        if FLAGS.placeholder:
            placeholder_session.close()
        setup_training(model, batcher)
    elif hps.mode == 'eval':
        model = SummarizationModel(hps, vocab)
        run_eval(model, batcher, vocab)
    elif hps.mode == 'decode':
        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        decoder.decode(
        )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    elif hps.mode == 'auto_decode':
        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab, hps.epoch_num)
        decoder.decode()
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #25
0
def main(unused_argv):
    if len(
            unused_argv) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)
    if FLAGS.dataset_name != "":
        FLAGS.data_path = os.path.join(FLAGS.data_root, FLAGS.dataset_name,
                                       FLAGS.dataset_split + '*')
    if not os.path.exists(
            os.path.join(FLAGS.data_root, FLAGS.dataset_name)) or len(
        os.listdir(os.path.join(FLAGS.data_root, FLAGS.dataset_name))) == 0:
        print(
                'No TF example data found at %s so creating it from raw data.' % os.path.join(
            FLAGS.data_root, FLAGS.dataset_name))
        convert_data.process_dataset(FLAGS.dataset_name)

    logging.set_verbosity(logging.INFO)  # choose what level of logging you want
    logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.exp_name = FLAGS.exp_name if FLAGS.exp_name != '' else FLAGS.dataset_name
    FLAGS.actual_log_root = FLAGS.log_root
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
                   'trunc_norm_init_std',
                   'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size',
                   'max_dec_steps',
                   'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
    hps_dict = {}
    for key, val in FLAGS.__flags.iteritems():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val.value  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    if FLAGS.pg_mmr or FLAGS.pg_mmr_sim or FLAGS.pg_mmr_diff:

        # Fit the TFIDF vectorizer if not already fitted
        if FLAGS.importance_fn == 'tfidf':
            tfidf_model_path = os.path.join(FLAGS.actual_log_root,
                                            'tfidf_vectorizer',
                                            FLAGS.dataset_name + '.dill')
            if not os.path.exists(tfidf_model_path):
                print(
                        'No TFIDF vectorizer model file found at %s, so fitting the model now.' % tfidf_model_path)
                tfidf_vectorizer = fit_tfidf_vectorizer(hps, vocab)
                with open(tfidf_model_path, 'wb') as f:
                    dill.dump(tfidf_vectorizer, f)

        # Train the SVR model on the CNN validation set if not already trained
        if FLAGS.importance_fn == 'svr':
            save_path = os.path.join(FLAGS.data_root, 'svr_training_data')
            importance_model_path = os.path.join(FLAGS.actual_log_root,
                                                 'svr.pickle')
            dataset_split = 'val'
            if not os.path.exists(importance_model_path):
                if not os.path.exists(save_path) or len(
                        os.listdir(save_path)) == 0:
                    print(
                            'No importance_feature instances found at %s so creating it from raw data.' % save_path)
                    decode_model_hps = hps._replace(
                        max_dec_steps=1, batch_size=100,
                        mode='calc_features')  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
                    cnn_dm_train_data_path = os.path.join(FLAGS.data_root,
                                                          FLAGS.dataset_name,
                                                          dataset_split + '*')
                    batcher = Batcher(cnn_dm_train_data_path, vocab,
                                      decode_model_hps,
                                      single_pass=FLAGS.single_pass,
                                      cnn_500_dm_500=False)
                    calc_features(cnn_dm_train_data_path, decode_model_hps,
                                  vocab, batcher, save_path)

                print(
                        'No importance_feature SVR model found at %s so training it now.' % importance_model_path)
                features_list = importance_features.get_features_list(True)
                sent_reps = importance_features.load_data(
                    os.path.join(save_path, dataset_split + '*'), -1)
                print 'Loaded %d sentences representations' % len(sent_reps)
                x_y = importance_features.features_to_array(sent_reps,
                                                            features_list)
                train_x, train_y = x_y[:, :-1], x_y[:, -1]
                svr_model = importance_features.run_training(train_x, train_y)
                with open(importance_model_path, 'wb') as f:
                    cPickle.dump(svr_model, f)

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path, vocab, hps,
                      single_pass=FLAGS.single_pass)

    tf.set_random_seed(111)  # a seed value for randomness

    # Start decoding on multi-document inputs
    if hps.mode == 'decode':
        decode_model_hps = hps._replace(
            max_dec_steps=1)  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        decoder.decode()  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #26
0
def main(unused_argv):
    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    extractor = 'bert' if FLAGS.use_bert else 'lambdamart'
    pretrained_dataset = FLAGS.dataset_name
    if FLAGS.dataset_name == 'duc_2004':
        pretrained_dataset = 'cnn_dm'
    if FLAGS.singles_and_pairs == 'both':
        FLAGS.exp_name = FLAGS.dataset_name + '_' + FLAGS.exp_name + extractor + '_both'
        FLAGS.pretrained_path = os.path.join(FLAGS.log_root,
                                             pretrained_dataset + '_both')
        dataset_articles = FLAGS.dataset_name
    else:
        FLAGS.exp_name = FLAGS.dataset_name + '_' + FLAGS.exp_name + extractor + '_singles'
        FLAGS.pretrained_path = os.path.join(FLAGS.log_root,
                                             pretrained_dataset + '_singles')
        dataset_articles = FLAGS.dataset_name + '_singles'

    if FLAGS.upper_bound:
        FLAGS.exp_name = FLAGS.exp_name + '_upperbound'
        ssi_list = None  # this is if we are doing the upper bound evaluation (ssi_list comes straight from the groundtruth)
    else:
        my_log_dir = os.path.join(
            log_dir, '%s_%s_%s' %
            (FLAGS.dataset_name, extractor, FLAGS.singles_and_pairs))
        with open(os.path.join(my_log_dir, 'ssi.pkl'), 'rb') as f:
            ssi_list = pickle.load(f)

    print('Running statistics on %s' % FLAGS.exp_name)

    if FLAGS.dataset_name != "":
        FLAGS.data_path = os.path.join(FLAGS.data_root, FLAGS.dataset_name,
                                       FLAGS.dataset_split + '*')
    if not os.path.exists(os.path.join(
            FLAGS.data_root, FLAGS.dataset_name)) or len(
                os.listdir(os.path.join(FLAGS.data_root,
                                        FLAGS.dataset_name))) == 0:
        raise Exception('No TF example data found at %s.' %
                        os.path.join(FLAGS.data_root, FLAGS.dataset_name))

    logging.set_verbosity(
        logging.INFO)  # choose what level of logging you want
    logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.exp_name = FLAGS.exp_name if FLAGS.exp_name != '' else FLAGS.dataset_name
    FLAGS.actual_log_root = FLAGS.log_root
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)

    print(util.bcolors.OKGREEN + "Experiment path: " + FLAGS.log_root +
          util.bcolors.ENDC)

    if FLAGS.dataset_name == 'duc_2004':
        vocab = Vocab(FLAGS.vocab_path + '_' + 'cnn_dm',
                      FLAGS.vocab_size)  # create a vocabulary
    else:
        vocab = Vocab(FLAGS.vocab_path + '_' + FLAGS.dataset_name,
                      FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        item for item in list(FLAGS.flag_values_dict().keys()) if item != '?'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val.value  # add it to the dict
    hps = namedtuple("HParams", list(hps_dict.keys()))(**hps_dict)

    tf.set_random_seed(113)  # a seed value for randomness

    decode_model_hps = hps._replace(
        max_dec_steps=1
    )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries

    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)
    start_time = time.time()
    np.random.seed(random_seed)
    source_dir = os.path.join(FLAGS.data_root, dataset_articles)
    source_files = sorted(glob.glob(source_dir + '/' + dataset_split + '*'))

    total = len(
        source_files
    ) * 1000 if 'cnn' in dataset_articles or 'xsum' in dataset_articles else len(
        source_files)
    example_generator = data.example_generator(source_dir + '/' +
                                               dataset_split + '*',
                                               True,
                                               False,
                                               should_check_valid=False)
    # batcher = Batcher(None, vocab, hps, single_pass=FLAGS.single_pass)
    model = SummarizationModel(decode_model_hps, vocab)
    decoder = BeamSearchDecoder(model, None, vocab)
    decoder.decode_iteratively(example_generator, total, names_to_types,
                               ssi_list, hps)
예제 #27
0
  def main(self, unused_argv):
    if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
      raise Exception("Problem with flags: %s" % unused_argv)

    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    flags = getattr(FLAGS,"__flags")

    if not os.path.exists(FLAGS.log_root):
      if FLAGS.mode=="train":
        os.makedirs(FLAGS.log_root)
      else:
        raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

    fw = open('{}/config.txt'.format(FLAGS.log_root),'w')
    for k,v in flags.items():
      fw.write('{}\t{}\n'.format(k,v))
    fw.close()

    self.vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
      FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode!='decode':
      raise Exception("The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs

    hparam_list = ['mode', 'lr', 'gpu_num',
    'gamma', 'eta', 'zeta', 'fixed_zeta', 'zeta_clipping', 'rl_start_step',
    'fixed_eta', 'reward_function', 'intradecoder', 
    'use_temporal_attention', 'ac_training','rl_training', 'matrix_attention', 'calculate_true_q',
    'enc_hidden_dim', 'dec_hidden_dim', 'k', 
    'scheduled_sampling', 'sampling_probability','fixed_sampling_probability',
    'alpha', 'hard_argmax', 'greedy_scheduled_sampling',
    'adagrad_init_acc', 'rand_unif_init_mag', 
    'trunc_norm_init_std', 'max_grad_norm', 
    'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps',
    'dqn_scheduled_sampling', 'dqn_sleep_time', 'E2EBackProp',
    'coverage', 'cov_loss_wt', 'pointer_gen', 'partial_rewarding']
    hps_dict = {}
    for key,val in flags.items(): # for each flag
      if key in hparam_list: # if it's in the list
        hps_dict[key] = val.value # add it to the dict
    self.hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    self.full_batcher = Batcher(FLAGS.full_data_path, self.vocab, self.hps, single_pass=FLAGS.single_pass, decode_after=FLAGS.decode_after)
    self.partial_batcher = Batcher(FLAGS.partial_data_path, self.vocab, self.hps, single_pass=FLAGS.single_pass, decode_after=FLAGS.decode_after)
    tf.set_random_seed(111) # a seed value for randomness

    if self.hps.mode == 'train':
      print("creating model...")
      if FLAGS.rl_training: # merging batches from full and partial datasets
        self.hps = self.hps._replace(batch_size=2 * self.hps.batch_size)
      self.model = SummarizationModel(self.hps, self.vocab)
      self.setup_training()
    elif self.hps.mode == 'eval':
      if FLAGS.rl_training: # merging batches from full and partial datasets
        self.hps = self.hps._replace(batch_size=2 * self.hps.batch_size)
      self.model = SummarizationModel(self.hps, self.vocab)
      self.run_eval()
    elif self.hps.mode == 'decode':
      decode_model_hps = self.hps
      decode_model_hps = self.hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
      print(decode_model_hps)
      model = SummarizationModel(decode_model_hps, self.vocab)
      if FLAGS.partial_decoding:
        decoder = BeamSearchDecoder(model, self.partial_batcher, self.vocab, dqn = None)
      else:
        decoder = BeamSearchDecoder(model, self.full_batcher, self.vocab, dqn = None)
      decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    else:
      raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #28
0
def main(unused_argv):
    # GPU tricks
    if FLAGS.device == None:
        index_of_gpu = get_available_gpu()
        if index_of_gpu < 0:
            index_of_gpu = ''
        FLAGS.device = index_of_gpu
        tf.logging.info(bcolors.OKGREEN + 'using {}'.format(FLAGS.device) +
                        bcolors.ENDC)  #终端颜色
    else:
        index_of_gpu = FLAGS.device
    os.environ["CUDA_VISIBLE_DEVICES"] = str(index_of_gpu)
    tf.logging.info('try to occupy GPU memory!')
    placeholder_session = tf.Session()
    #tf.contrib.memory_stats.BytesLimit():Generates an op that measures the total memory (in bytes) of a device.
    limit = placeholder_session.run(
        tf.contrib.memory_stats.BytesLimit()) / 1073741824
    tf.logging.info('occupy GPU memory %f GB', limit)

    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == "train":
            os.makedirs(FLAGS.log_root)
        else:
            raise Exception(
                "Logdir %s doesn't exist. Run in train mode to create it." %
                (FLAGS.log_root))
    tf.logging.info("vocab path is %s ", FLAGS.vocab_path)
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage',
        'cov_loss_wt', 'pointer_gen'
    ]
    hps_dict = {}
    export_json = {}
    for key, val in FLAGS.__flags.items():

        export_json[key] = val
        if key in hparam_list:
            hps_dict[key] = val
            tf.logging.info('{} {}'.format(key, val))
    #
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
    ######################
    # save parameters and python script
    ######################
    # save parameters
    tf.logging.info('saving parameters')
    current_time_str = datetime.now().strftime('%m-%d-%H-%M')
    json_para_file = open(
        os.path.join(FLAGS.log_root,
                     'flags-' + current_time_str + '-' + FLAGS.mode + '.json'),
        'w')
    json_para_file.write(json.dumps(export_json, indent=4) + '\n')
    json_para_file.close()
    # save python source code
    tf.logging.info('saving source code')
    python_list = glob.glob('./*.py')
    zip_file = zipfile.ZipFile(
        os.path.join(
            FLAGS.log_root,
            'source_code_bak-' + current_time_str + '-' + FLAGS.mode + '.zip'),
        'w')
    for d in python_list:
        zip_file.write(d)
    zip_file.close()

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)

    tf.set_random_seed(111)  # a seed value for randomness

    if hps.mode == 'train':
        tf.logging.info("creating model...")
        model = SummarizationModel(hps, vocab)
        placeholder_session.close()
        setup_training(model, batcher)
    elif hps.mode == 'eval':
        model = SummarizationModel(hps, vocab)
        placeholder_session.close()
        run_eval(model, batcher, vocab)
    elif hps.mode == 'decode':
        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        placeholder_session.close()
        try:
            decoder.decode(
            )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
        except KeyboardInterrupt:
            tf.logging.info('stop decoding!')
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #29
0
def run_training_reinforce(hps, retrain_model_hps, eval_hps, model_train, model_decode, batcher, neg_batcher, vocab):
    train_dir = os.path.join(FLAGS.log_root, 'train')
    
    if not os.path.exists(train_dir):
        os.makedirs(train_dir)

    config = util.get_config()
    default_device = tf.device('/cpu:0')

    with default_device():
        G_d = tf.Graph()
        G_t = tf.Graph()
        R = tf.Graph()

        with G_d.as_default():
            model_decode.build_graph()
            model_dvars = tf.get_collection(tf.GraphKeys.VARIABLES)

        with G_t.as_default():
            model_train.build_grapg()
            model_tvars = tf.get_collection(tf.GraphKeys.VARIABLES)

        ranker, rank_sess, _ = MultiFeedForwardClassifier.load(
            FLAGS.rank_log_root, graph=R, batch_size=FLAGS.batch_size)

        rank_world_dict, rank_embeddings = ioutils.load_embeddings(
            FLAGS.rank_embed_size, FLAGS.rank_vocab_file, 
            generate=False, load_extrac_from=FLAGS.rank_log_root, normalize=True)

        ranker.initialize_embeddings(rank_sess, rank_embeddings)

    decode_saver = tf.train.Saver(model_dvars)
    train_saver = tf.train.Saver(model_tvars, max_to_keep=10)
    model_ckpt_state = tf.train.get_checkpoint_state(train_dir)

    decode_sess = tf.Session(config=config, graph=G_d)
    train_sess = tf.Session(config=config, graph=G_t)
    train_saver = util.restore_model(
        train_sess, train_saver, model_ckpt_state.model_checkpoint_path, model_tvars, save=True)

    decode_saver = util.restore_model(
        decode_sess, decode_saver, model_ckpt_state.model_checkpoint_path, model_dvars)

    decoder = BeamSearchDecoder(model_decode, None, vocab, sess=decode_sess)

    train_step = 0

    hist_batch = []

    while True:
        batch = batcher.next_batch()
        neg_batch = neg_batcher.next_batch()
        pos_tg_len = np.sum(batch.padding_mask, axis=1)
        neg_tg_len = np.sum(neg_batch.padding_mask, axis=1)
        model_ckpt_state = train.train.get_checkpoint_state(train_dir)
        decode_saver.restore(decode_sess, model_ckpt_state.model_checkpoint_path)
        decoder = BeamSeachDecoder(model_decode, None, vocab, sess=decode_sess)
        pos_gen_out, pos_gen_out_extended_vocab, pos_all_hyp = decoder.sample_batch_wise(
            batch, sampling=FLAGS.sampling, preserve_hyp=True, temp_ratio=FLAGS.temp_ratio)
        neg_gen_out, neg_gen_out_extended_vocab, neg_all_hyp = decoder.sample_batch_wise(
            neg_batch, sampling=FLAGS.sampling, preserve_hyp=True, temp_ratio=FLAGS.temp_ratio)
        
        assert len(pos_gen_out) == hps.batch_size

        pos_decode_len = util.measure_len(pos_gen_out, vocab)
        neg_decode_len = util.measure_len(neg_gen_out, vocab)

        rank_src = util.convert_to_full_vocab(batch.enc_batch_extend_vocab, rank_word_dict, vocab, batch.art_oovs)
        rank_tg = util.convert_to_full_vocab(pos_gen_out_extended_vocabm rank_word_dict, vocab, batch.art_oovs)
        pos_sent_reward = ranker.eval(rank_sess, rank_src, rank_tg, batch.enc_lens, np.array(pos_decode_len)-1)

        rank_src = util.convert_to_full_vocab(
            neg_batch.enc_batch_extend_vocab, rank_word_dict, vocab, neg_batch.art_oovs)
        rank_tg = util.convert_to_full_vocab(
            neg_gen_out_extended_vocab, rank_word_dict, vocab, neg_batch.art_oovs)
        neg_sent_reward = ranker.eval(rank_sess, rank_src, rank_tg, neg_batch.enc_lens, np.array(neg_decode_len)-1)

        sent_reward = np.concatenate((pos_sent_reward, neg_sent_reward), axis=0)
        ###################################################################################
        # train G
        pos_decode_reward = cal_ranker_reward(
            batch, vocab, rank_word_dict, pos_all_hyp, decoder, ranker, rank_sess, pos_tg_len, rank_gt=FLAGS.rank_gt)
        neg_decode_reward = cal_ranker_reward(
            neg_batch, vocab, rank_word_dict, neg_all_hyp, decoder, ranker, rank_sess, neg_tg_len)
        decode_reward = np.concatenate((pos_decode_reward, neg_decode_reward), axis=0)

        msk = np.zeros_like(decode_reward)
        decode_len = np.concatenate((pos_decode_len, neg_decode_len), axis=0)
        
        for b, l in enumerate(decode_len):
            msk[b, :l] = 1

        rewards = decode_reward * msk / FLAGS.num_mc 

        sent_reward = util.rank_sentence(sent_reward, scale=FLAGS.sent_scale)
        rewards = util.rescale_reward(rewards, sent_reward, msk, scale=FLAGS.token_scale)

        rewards = rewards * msk

        dec_inp, target, padding = util.prepare_retrain_data(
            hps, pos_gen_out, pos_gen_out_extended_vocab, vocab, pos_decode_len)
        neg_dec_inp, neg_target, neg_padding = util.prepare_retrain_data(
            hps, neg_gen_out, neg_gen_out_extended_vocab, vocab, neg_decode_len)

        dec_inp = np.concatenate((dec_inp, dec_inp, neg_dec_inp), axis=0)
        target = np.concatenate((target, target, neg_target), axis=0)
        padding = np.concatenate((padding, padding, neg_padding), axis=0)
        rewards = np.concatenate((
            rewards[:FLAGS.batch_size], 
            np.ones(FLAGS.batch_size), 
            rewards[FLAGS.batch_size:]
            ))
        rslts = model_train.run_train_step_with_reward(
            train_sess, batch, dec_inp, target, padding, rewards, 
            neg_batch=neg_batch, temp_ratio=FLAGS.temp_ratio)

        train_step = rslts['global_step']
        train_saver.save(
            train_sess, 
            os.path.join(train_dir, 'model.ckpt'),
            global_step=train_step
            )

        if train_step & 50 == 0:
            train_saver.save(
                train_sess, os.path.join(save_dir, 'model.ckpt'),
                global_step=train_step
                )

            eval_loss = 0
            eval_batcher = SimpleBatcher(FLAGS.eval_data_path, vocab, eval_hps)
            eval_batcher_sz = SimpleBatcher(FLAGS.eval_data_path, vocab, eval_hps)

            eval_saver.restore(eval_sess, model_ckpt_state.model_checkpoint_path)
            for _ in range(eval_batcher.num_batch):
                eval_batch = eval_batcher.next_batch()
                eval_loss += model_eval.run_eval_step(eval_sess, eval_batch)['loss']

            score = 0
            sample_score = 0
            rouge_score = 0
            rouge2_score = 0
            rougel_score = 0
            bleu_score = 0

            for j in range(eval_batcher_sz.num_batch):
                eval_batch = eval_batcher_sz.next_batch()
                tg_len = np.sum(eval_batch.padding_mask, axis=1)
                _, sample_out_extended_vocab = decoder.sample_batch_wise(
                    eval_batch, sampling=FLAGS.rank_sampling, temp_ratio=FLAGS.sample_temp_ratio)
                sample_len = util.measure_len(sample_out_extended_vocab, vocab)
                gen_out, gen_out_extended_vocab = decoder.sample_batch_wise
예제 #30
0
def main(_):
    tf.gfile.MakeDirs(FLAGS.output_dir)
    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # create a vocabulary
    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode not in ['decode', 'test']:
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    run_config = tf.contrib.tpu.RunConfig(
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps)

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'init_checkpoint', 'mode', 'learning_rate', 'adagrad_init_acc',
        'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm',
        'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps',
        'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen',
        'num_train_steps', 'use_tpu', 'num_train_steps', 'num_warmup_steps'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.input_file,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass)

    if hps.mode.value == 'train':
        tf.logging.info("***** Running training *****")
        tf.logging.info("  Batch size = %d", FLAGS.batch_size)

        model = SummarizationModel(hps, vocab)
        setup_training(model, batcher)

    elif hps.mode.value == 'eval':
        tf.logging.info("***** Running evaluation *****")
        tf.logging.info("  Batch size = %d", FLAGS.batch_size)
        model = SummarizationModel(hps, vocab)
        run_eval(model, batcher, vocab)

    elif hps.mode.value == 'decode':
        decode_model_hps = hps._replace(max_dec_steps=1)
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        # When we set single_pass=True (default), decode the dataset exactly once
        decoder.decode()

    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #31
0
def main(unused_argv):
    if len(unused_argv
           ) != 1:  # prints a message if you've entered flags incorrectly
        raise Exception("Problem with flags: %s" % unused_argv)

    tf.logging.set_verbosity(
        tf.logging.INFO)  # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    if not os.path.exists(FLAGS.log_root):
        if FLAGS.mode == "train":
            os.makedirs(FLAGS.log_root)
        else:
            raise Exception(
                "Logdir %s doesn't exist. Run in train mode to create it." %
                (FLAGS.log_root))

    vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)  # create a vocabulary

    stop_word_ids = get_stop_word_ids(
        FLAGS.stop_words_path, vocab
    ) if FLAGS.pointer_gen and (
        FLAGS.co_occurrence or FLAGS.prev_relation or FLAGS.co_occurrence_h
        or FLAGS.co_occurrence_i or
        (FLAGS.coverage and FLAGS.coverage_weighted)
    ) or FLAGS.attention_weighted or FLAGS.markov_attention or FLAGS.markov_attention_contribution else None

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
        FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode != 'decode':
        raise Exception(
            "The single_pass flag should only be True in decode mode")

    # if FLAGS.prev_relation and not FLAGS.co_occurrence:
    #   raise Exception("The co_occurrence flag should be True when the prev_relation flag is True")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs
    hparam_list = [
        'top_ten_kept', 'decode_only', 'generation_only', 'copy_only',
        'occurrence_window_size', 'max_title_len', 'title_engaged',
        'title_guided', 'ref_dir', 'tagger_encoding', 'tagger_attention',
        'source_siding_bridge', 'target_siding_bridge', 'language', 'dropout',
        'optimizer', 'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag',
        'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim',
        'batch_size', 'beam_depth', 'max_dec_steps', 'max_enc_steps',
        'max_keyphrase_num', 'attention_weighted', 'coverage',
        'coverage_weighted', 'coverage_weighted_expansion', 'co_occurrence',
        'prev_relation', 'co_occurrence_h', 'co_occurrence_i', 'cov_loss_wt',
        'pointer_gen', 'cell_type', 'markov_attention',
        'markov_attention_contribution', 'markov_attention_contribution_used_x'
    ]
    hps_dict = {}
    for key, val in FLAGS.__flags.items():  # for each flag
        if key in hparam_list:  # if it's in the list
            hps_dict[key] = val  # add it to the dict
    hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    batcher = Batcher(FLAGS.data_path,
                      vocab,
                      hps,
                      single_pass=FLAGS.single_pass,
                      stop_words=stop_word_ids)

    tf.set_random_seed(111)  # a seed value for randomness

    if hps.mode == 'train':
        print("creating model...")
        model = SummarizationModel(hps, vocab)
        setup_training(model, batcher)
    elif hps.mode == 'eval':
        model = SummarizationModel(hps, vocab)
        run_eval(model, batcher, vocab)
    elif hps.mode == 'decode':
        decode_model_hps = hps  # This will be the hyperparameters for the decoder model
        decode_model_hps = hps._replace(
            max_dec_steps=1
        )  # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
        model = SummarizationModel(decode_model_hps, vocab)
        decoder = BeamSearchDecoder(model, batcher, vocab)
        decoder.decode(
        )  # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    else:
        raise ValueError("The 'mode' flag must be one of train/eval/decode")
예제 #32
0
  def main(self, unused_argv):
    if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly
      raise Exception("Problem with flags: %s" % unused_argv)

    FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
    tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want
    tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))

    # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary
    flags = getattr(FLAGS,"__flags")

    if not os.path.exists(FLAGS.log_root):
      if FLAGS.mode=="train":
        os.makedirs(FLAGS.log_root)
      else:
        raise Exception("Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root))

    fw = open('{}/config.txt'.format(FLAGS.log_root), 'w')
    for k, v in flags.items():
      fw.write('{}\t{}\n'.format(k, v))
    fw.close()

    self.vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary

    # If in decode mode, set batch_size = beam_size
    # Reason: in decode mode, we decode one example at a time.
    # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.
    if FLAGS.mode == 'decode':
      FLAGS.batch_size = FLAGS.beam_size

    # If single_pass=True, check we're in decode mode
    if FLAGS.single_pass and FLAGS.mode!='decode':
      raise Exception("The single_pass flag should only be True in decode mode")

    # Make a namedtuple hps, containing the values of the hyperparameters that the model needs

    hparam_list = ['mode', 'lr', 'gpu_num',
    #'sampled_greedy_flag', 
    'gamma', 'eta', 
    'fixed_eta', 'reward_function', 'intradecoder', 
    'use_temporal_attention', 'ac_training','rl_training', 'matrix_attention', 'calculate_true_q',
    'enc_hidden_dim', 'dec_hidden_dim', 'k', 
    'scheduled_sampling', 'sampling_probability','fixed_sampling_probability',
    'alpha', 'hard_argmax', 'greedy_scheduled_sampling',
    'adagrad_init_acc', 'rand_unif_init_mag', 
    'trunc_norm_init_std', 'max_grad_norm', 
    'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps',
    'dqn_scheduled_sampling', 'dqn_sleep_time', 'E2EBackProp',
    'coverage', 'cov_loss_wt', 'pointer_gen']
    hps_dict = {}
    for key,val in flags.items(): # for each flag
      if key in hparam_list: # if it's in the list
        hps_dict[key] = val.value # add it to the dict
    if FLAGS.ac_training:
      hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})
    self.hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)
    # creating all the required parameters for DDQN model.
    if FLAGS.ac_training:
      hparam_list = ['lr', 'dqn_gpu_num', 
      'dqn_layers', 
      'dqn_replay_buffer_size', 
      'dqn_batch_size', 
      'dqn_target_update',
      'dueling_net',
      'dqn_polyak_averaging',
      'dqn_sleep_time',
      'dqn_scheduled_sampling',
      'max_grad_norm']
      hps_dict = {}
      for key,val in flags.items(): # for each flag
        if key in hparam_list: # if it's in the list
          hps_dict[key] = val.value # add it to the dict
      hps_dict.update({'dqn_input_feature_len':(FLAGS.dec_hidden_dim)})
      hps_dict.update({'vocab_size':self.vocab.size()})
      self.dqn_hps = namedtuple("HParams", hps_dict.keys())(**hps_dict)

    # Create a batcher object that will create minibatches of data
    self.batcher = Batcher(FLAGS.data_path, self.vocab, self.hps, single_pass=FLAGS.single_pass, decode_after=FLAGS.decode_after)

    tf.set_random_seed(111) # a seed value for randomness

    if self.hps.mode == 'train':
      print("creating model...")
      self.model = SummarizationModel(self.hps, self.vocab)
      if FLAGS.ac_training:
        # current DQN with paramters \Psi
        self.dqn = DQN(self.dqn_hps,'current')
        # target DQN with paramters \Psi^{\prime}
        self.dqn_target = DQN(self.dqn_hps,'target')
      self.setup_training()
    elif self.hps.mode == 'eval':
      self.model = SummarizationModel(self.hps, self.vocab)
      if FLAGS.ac_training:
        self.dqn = DQN(self.dqn_hps,'current')
        self.dqn_target = DQN(self.dqn_hps,'target')
      self.run_eval()
    elif self.hps.mode == 'decode':
      decode_model_hps = self.hps  # This will be the hyperparameters for the decoder model
      decode_model_hps = self.hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries
      model = SummarizationModel(decode_model_hps, self.vocab)
      if FLAGS.ac_training:
        # We need our target DDQN network for collecting Q-estimation at each decoder step.
        dqn_target = DQN(self.dqn_hps,'target')
      else:
        dqn_target = None
      decoder = BeamSearchDecoder(model, self.batcher, self.vocab, dqn = dqn_target)
      decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)
    else:
      raise ValueError("The 'mode' flag must be one of train/eval/decode")