Exemplo n.º 1
0
def train_model(args):
    trace('making vocabularies ...')
    src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
    trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)

    trace('making model ...')
    model = EncoderDecoderModel.new(src_vocab, trg_vocab, args.embed, args.hidden)

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        gen1 = gens.word_list(args.source)
        gen2 = gens.word_list(args.target)
        gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch), args.minibatch)
        model.init_optimizer()

        for src_batch, trg_batch in gen3:
            src_batch = fill_batch(src_batch)
            trg_batch = fill_batch(trg_batch)
            K = len(src_batch)
            hyp_batch = model.train(src_batch, trg_batch)

            for k in range(K):
                trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
                trace('  src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
                trace('  trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
                trace('  hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))

            trained += K

        trace('saving model ...')
        model.save(args.model + '.%03d' % (epoch + 1))

    trace('finished.')
Exemplo n.º 2
0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(word_lists, args.vocab)
  phrase_set = set()
  semi_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semi_set |= set(extract_semi_labels(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semi_vocab = Vocabulary.new([list(semi_set)], len(semi_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semi_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
      args.vocab, args.embed, args.queue, args.stack,
      len(phrase_set), len(semi_set),
  )
  if USE_GPU:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0
    
    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = my_zeros((), np.float32)

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1
      
      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semi_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
    def train_model(self):
        trace('making vocaburaries ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        model = self.new(src_vocab, trg_vocab, self.embed, self.hidden, self.parameter_dict)

        random_number = random.randint(0, self.minibatch)
        for i_epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (i_epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            model.init_optimizer()

            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch = model.train(src_batch, trg_batch)

                if trained == 0:
                    self.print_out(random_number, i_epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

            trace('saving model ...')
            model.save("ChainerMachineTranslation" + '.%03d' % (self.epoch + 1))

        trace('finished.')
Exemplo n.º 4
0
def train_model(args):
    trace('making vocaburaries ...')
    src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
    trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)

    trace('making model ...')
    model = EncoderDecoderModel.new(src_vocab, trg_vocab, args.embed, args.hidden)

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        gen1 = gens.word_list(args.source)
        gen2 = gens.word_list(args.target)
        gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch), args.minibatch)
        model.init_optimizer()

        for src_batch, trg_batch in gen3:
            src_batch = fill_batch(src_batch)
            trg_batch = fill_batch(trg_batch)
            K = len(src_batch)
            hyp_batch = model.train(src_batch, trg_batch)

            for k in range(K):
                trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
                trace('  src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
                trace('  trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
                trace('  hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))

            trained += K

        trace('saving model ...')
        model.save(args.model + '.%03d' % (epoch + 1))

    trace('finished.')
Exemplo n.º 5
0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(word_lists, args.vocab)
  phrase_set = set()
  semi_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semi_set |= set(extract_semi_labels(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semi_vocab = Vocabulary.new([list(semi_set)], len(semi_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
  op_lists = [convert_op_list(x, phrase_vocab, semi_vocab) for x in op_lists]

  trace('start training ...')
  parser = Parser(
      args.vocab, args.embed, args.queue, args.stack,
      len(phrase_set), len(semi_set),
  )
  if USE_GPU:
    parser.to_gpu()
  opt = optimizers.AdaGrad(lr = 0.005)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(5))

  for epoch in range(args.epoch):
    n = 0
    
    for samples in batch(zip(word_lists, op_lists), args.minibatch):
      parser.zerograds()
      loss = my_zeros((), np.float32)

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0)
        n += 1
      
      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semi_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

  trace('finished.')
def train(args):
    trace('making vocabularies ...')
    src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
    trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)

    trace('making model ...')
    attmt = AttentionMT(args.vocab, args.embed, args.hidden)
    if args.use_gpu:
        attmt.to_gpu()

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        gen1 = gens.word_list(args.source)
        gen2 = gens.word_list(args.target)
        gen3 = gens.batch(
            gens.sorted_parallel(gen1, gen2, 100 * args.minibatch),
            args.minibatch)
        opt = optimizers.AdaGrad(lr=0.01)
        opt.setup(attmt)
        opt.add_hook(optimizer.GradientClipping(5))

        for src_batch, trg_batch in gen3:
            src_batch = fill_batch(src_batch)
            trg_batch = fill_batch(trg_batch)
            K = len(src_batch)
            hyp_batch, loss = forward(src_batch, trg_batch, src_vocab,
                                      trg_vocab, attmt, True, 0)
            loss.backward()
            opt.update()

            for k in range(K):
                trace('epoch %3d/%3d, sample %8d' %
                      (epoch + 1, args.epoch, trained + k + 1))
                trace(
                    '  src = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in src_batch[k]]))
                trace(
                    '  trg = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in trg_batch[k]]))
                trace(
                    '  hyp = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in hyp_batch[k]]))

            trained += K

        trace('saving model ...')
        prefix = args.model + '.%03.d' % (epoch + 1)
        src_vocab.save(prefix + '.srcvocab')
        trg_vocab.save(prefix + '.trgvocab')
        attmt.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', attmt)

    trace('finished.')
    def train(self):
        """
        Train method
        If you use the word2vec model, you possible to use the copy weight
        Optimizer method use the Adagrad
        """
        trace('making vocabularies ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        self.attention_dialogue = AttentionDialogue(self.vocab, self.embed,
                                                    self.hidden, self.XP)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, self.attention_dialogue.emb)
            self.copy_model(self.word2vec,
                            self.attention_dialogue.dec,
                            dec_flag=True)

        for epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(
                gens.sorted_parallel(gen1, gen2, 100 * self.minibatch),
                self.minibatch)
            opt = optimizers.AdaGrad(lr=0.01)
            opt.setup(self.attention_dialogue)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch, loss = self.forward_implement(
                    src_batch, trg_batch, src_vocab, trg_vocab,
                    self.attention_dialogue, True, 0)
                loss.backward()
                opt.update()

                self.print_out(random_number, epoch, trained, src_batch,
                               trg_batch, hyp_batch)

                trained += K

        trace('saving model ...')
        prefix = self.model
        model_path = APP_ROOT + "/model/" + prefix
        src_vocab.save(model_path + '.srcvocab')
        trg_vocab.save(model_path + '.trgvocab')
        self.attention_dialogue.save_spec(model_path + '.spec')
        serializers.save_hdf5(model_path + '.weights', self.attention_dialogue)

        trace('finished.')
Exemplo n.º 8
0
def train(args):
    if os.path.exists("./model/vocab.bin"):
        src_vocab = Vocabulary.load("./model/vocab.bin")
    else:
        src_vocab = Vocabulary.new(gens.word_list(args.source), args.n_vocab)
        src_vocab.save('./model/vocab.bin')
    if os.path.exists("./model/tag.bin"):
        trg_tag = Vocabulary.load("./model/tag.bin")
    else:
        trg_tag = Vocabulary.new(gens.word_list(args.target), args.n_tag)
        trg_tag.save('./model/tag.bin')
    print("vocab_len:{}".format(src_vocab.__len__))
    print("tag_len:{}".format(trg_tag.__len__))
    encdec = BiEncDecLSTM(args.n_vocab, args.layer, args.embed, args.hidden,
                          args.n_tag)
    optimizer = optimizers.Adam()
    optimizer.setup(encdec)

    for e_i in range(args.epoch):
        tt_list = [[src_vocab.stoi(char) for char in char_arr]
                   for char_arr in gens.word_list(args.source_tr)]
        tag_list = [
            trg_tag.stoi(tag[0]) for tag in gens.word_list(args.target_tr)
        ]
        print("{}:{}".format(len(tt_list), len(tag_list)))
        assert len(tt_list) == len(tag_list)
        ind_arr = [ri for ri in range(len(tt_list))]
        random.shuffle(ind_arr)
        tt_now = (tt_list[ri] for ri in ind_arr)
        tag_now = (tag_list[ri] for ri in ind_arr)
        tt_gen = gens.batch(tt_now, args.batchsize)
        tag_gen = gens.batch(tag_now, args.batchsize)

        for tt, tag in zip(tt_gen, tag_gen):
            y_ws = encdec(tt)

            teac_arr = [src_vocab.itos(t) for t in tt[0]]
            pred_arr = [trg_tag.itos(y_each.data.argmax(0)) for y_each in y_ws]
            print("teach:{}:{}:{}".format(teac_arr, trg_tag.itos(tag[0]),
                                          pred_arr[0]))
            tag = xp.array(tag, dtype=xp.int32)
            loss = F.softmax_cross_entropy(y_ws, tag)

            encdec.cleargrads()
            loss.backward()
            optimizer.update()

            # loss.backward()
            # optimizer.target.cleargrads()
            # loss.backward()
            # loss.unchain_backward()
            # optimizer.update()

        serializers.save_npz('./model/attn_tag_model_{}.npz'.format(e_i),
                             encdec)
    def train(self):
        """
        Train method
        If you use the word2vec model, you possible to use the copy weight
        Optimizer method use the Adagrad
        """
        trace("making vocabularies ...")
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace("making model ...")
        self.attention_dialogue = AttentionDialogue(self.vocab, self.embed, self.hidden, self.XP)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, self.attention_dialogue.emb)
            self.copy_model(self.word2vec, self.attention_dialogue.dec, dec_flag=True)

        for epoch in range(self.epoch):
            trace("epoch %d/%d: " % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            opt = optimizers.AdaGrad(lr=0.01)
            opt.setup(self.attention_dialogue)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch, loss = self.forward_implement(
                    src_batch, trg_batch, src_vocab, trg_vocab, self.attention_dialogue, True, 0
                )
                loss.backward()
                opt.update()

                self.print_out(random_number, epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

        trace("saving model ...")
        prefix = self.model
        model_path = APP_ROOT + "/model/" + prefix
        src_vocab.save(model_path + ".srcvocab")
        trg_vocab.save(model_path + ".trgvocab")
        self.attention_dialogue.save_spec(model_path + ".spec")
        serializers.save_hdf5(model_path + ".weights", self.attention_dialogue)

        trace("finished.")
Exemplo n.º 10
0
    def train(self):
        trace('making vocabularies ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        encdec = EncoderDecoder(self.vocab, self.embed, self.hidden)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, encdec.enc)
            self.copy_model(self.word2vec, encdec.dec, dec_flag=True)

        for epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(
                gens.sorted_parallel(gen1, gen2, 100 * self.minibatch),
                self.minibatch)
            opt = optimizers.AdaGrad(lr=0.01)
            opt.setup(encdec)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                # If you use the ipython note book you hace to use the forward function
                # hyp_batch, loss = self.forward(src_batch, trg_batch, src_vocab, trg_vocab, encdec, True, 0)
                hyp_batch, loss = self.forward_implement(
                    src_batch, trg_batch, src_vocab, trg_vocab, encdec, True,
                    0)
                loss.backward()
                opt.update()

                self.print_out(random_number, epoch, trained, src_batch,
                               trg_batch, hyp_batch)

                trained += K

        trace('saving model ...')
        prefix = self.model
        src_vocab.save(prefix + '.srcvocab')
        trg_vocab.save(prefix + '.trgvocab')
        encdec.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', encdec)

        trace('finished.')
    def train(self):
        trace('making vocabularies ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        encdec = EncoderDecoder(self.vocab, self.embed, self.hidden)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, encdec.enc)
            self.copy_model(self.word2vec, encdec.dec, dec_flag=True)
        else:
            encdec = self.encdec

        for epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            opt = optimizers.AdaGrad(lr = 0.01)
            opt.setup(encdec)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch, loss = self.forward(src_batch, trg_batch, src_vocab, trg_vocab, encdec, True, 0)
                loss.backward()
                opt.update()

                if trained == 0:
                    self.print_out(random_number, epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

        trace('saving model ...')
        prefix = self.model
        src_vocab.save(prefix + '.srcvocab')
        trg_vocab.save(prefix + '.trgvocab')
        encdec.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', encdec)

        trace('finished.')
Exemplo n.º 12
0
def train(args):
  trace('making vocabularies ...')
  src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
  trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)

  trace('making model ...')
  attmt = AttentionMT(args.vocab, args.embed, args.hidden)
  if args.use_gpu:
    attmt.to_gpu()

  for epoch in range(args.epoch):
    trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
    trained = 0
    gen1 = gens.word_list(args.source)
    gen2 = gens.word_list(args.target)
    gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch), args.minibatch)
    opt = optimizers.AdaGrad(lr = 0.01)
    opt.setup(attmt)
    opt.add_hook(optimizer.GradientClipping(5))

    for src_batch, trg_batch in gen3:
      src_batch = fill_batch(src_batch)
      trg_batch = fill_batch(trg_batch)
      K = len(src_batch)
      hyp_batch, loss = forward(src_batch, trg_batch, src_vocab, trg_vocab, attmt, True, 0)
      loss.backward()
      opt.update()

      for k in range(K):
        trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
        trace('  src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
        trace('  trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
        trace('  hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))

      trained += K

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    src_vocab.save(prefix + '.srcvocab')
    trg_vocab.save(prefix + '.trgvocab')
    attmt.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', attmt)

  trace('finished.')
Exemplo n.º 13
0
 def setCateg(self, args):
     categ_name = "./{}/categ_{}.bin".format(args.dataname, args.dataname)
     if os.path.exists(categ_name):
         categ_vocab = Vocabulary.load(categ_name)
     else:
         set_cat = set()
         [[set_cat.add(word) for word in word_arr] for word_arr in gens.word_list(args.category)]
         n_categ = len(set_cat) + 3
         print("n_categ:{}".format(n_categ))
         categ_vocab = Vocabulary.new(gens.word_list(args.category), n_categ)
         categ_vocab.save(categ_name)
     self.categ_vocab = categ_vocab
     return categ_vocab
Exemplo n.º 14
0
def train_model(args):
    train_begin = time.time()
    trace('making vocaburaries ...')
    vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab) 

    trace('begin training ...')
    model = TransSegmentationModel.new(vocab, args.context, args.hidden, args.labels, args.eta)

    for epoch in range(args.epoch):
        epoch_beg = time.time() 
        trace('START epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        total_loss = 0

        model.init_optimizer()

        with open(args.corpus) as fp:
            for text in fp:
                word_list = text.split()
                if not word_list:
                    continue

                text = ' '.join(word_list)
                letters = ''.join(word_list)
                labels, accum_loss_f = model.train(text)
                total_loss += accum_loss_f
                trained += 1
                hyp = make_hyp(letters, labels)
                
                """for 1sentence output
                trace("accum_loss : %lf"% (accum_loss_f))
                trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
                trace('trained %d: '% trained)
                trace(text)
                trace(hyp)
                """
                """
                if trained % 100 == 0:
                    trace('  %8d' % trained)
                """
        trace('FINISHED epoch %d/%d: ' % (epoch + 1, args.epoch))
        trace('total_loss : %lf'%total_loss)
        trace('saving model ...')
        model.save(args.model + '.%03d' % (epoch + 1))
        epoch_time = time.time() - epoch_beg
        trace('elapsed_time/1epoch : %lf'%epoch_time)

    trace('finished.')
    elapsed_time = time.time() - train_begin
    trace('train_time : %lf'%elapsed_time)
    trace('')
Exemplo n.º 15
0
def train_model(args):
    train_begin = time.time()
    trace('making vocaburaries ...')
    vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab)

    trace('begin training ...')
    model = TransSegmentationModel.new(vocab, args.context, args.hidden,
                                       args.labels, args.eta)

    for epoch in range(args.epoch):
        epoch_beg = time.time()
        trace('START epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        total_loss = 0

        model.init_optimizer()

        with open(args.corpus) as fp:
            for text in fp:
                word_list = text.split()
                if not word_list:
                    continue

                text = ' '.join(word_list)
                letters = ''.join(word_list)
                labels, accum_loss_f = model.train(text)
                total_loss += accum_loss_f
                trained += 1
                hyp = make_hyp(letters, labels)
                """for 1sentence output
                trace("accum_loss : %lf"% (accum_loss_f))
                trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
                trace('trained %d: '% trained)
                trace(text)
                trace(hyp)
                """
                """
                if trained % 100 == 0:
                    trace('  %8d' % trained)
                """
        trace('FINISHED epoch %d/%d: ' % (epoch + 1, args.epoch))
        trace('total_loss : %lf' % total_loss)
        trace('saving model ...')
        model.save(args.model + '.%03d' % (epoch + 1))
        epoch_time = time.time() - epoch_beg
        trace('elapsed_time/1epoch : %lf' % epoch_time)

    trace('finished.')
    elapsed_time = time.time() - train_begin
    trace('train_time : %lf' % elapsed_time)
    trace('')
Exemplo n.º 16
0
 def setVocab(self, args):
     vocab_name = "./{}/vocab_{}.bin".format(args.dataname, args.dataname)
     if os.path.exists(vocab_name):
         src_vocab = Vocabulary.load(vocab_name)
     else:
         set_vocab = set()
         [[set_vocab.add(word) for word in word_arr] for word_arr in gens.word_list(args.source)]
         n_vocab = len(set_vocab) + 3
         print("n_vocab:{}".format(n_vocab))
         print("arg_vocab:{}".format(args.n_vocab))
         src_vocab = Vocabulary.new(gens.word_list(args.source), args.n_vocab)
         src_vocab.save(vocab_name)
     self.vocab = src_vocab
     return src_vocab
    def train_model(self):
        trace('making vocaburaries ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        model = self.new(src_vocab, trg_vocab, self.embed, self.hidden,
                         self.parameter_dict)

        random_number = random.randint(0, self.minibatch)
        for i_epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (i_epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(
                gens.sorted_parallel(gen1, gen2, 100 * self.minibatch),
                self.minibatch)
            model.init_optimizer()

            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch = model.train(src_batch, trg_batch)

                if trained == 0:
                    self.print_out(random_number, i_epoch, trained, src_batch,
                                   trg_batch, hyp_batch)

                trained += K

            trace('saving model ...')
            model.save("ChainerMachineTranslation" + '.%03d' %
                       (self.epoch + 1))

        trace('finished.')
Exemplo n.º 18
0
def train_model(args):
    trace("making vocabularies ...")
    vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab)

    trace("start training ...")
    model = SegmentationModel.new(vocab, args.context, args.hidden)

    for epoch in range(args.epoch):
        trace("epoch %d/%d: " % (epoch + 1, args.epoch))
        trained = 0

        model.init_optimizer()

        with open(args.corpus) as fp:
            for text in fp:
                word_list = text.split()
                if not word_list:
                    continue

                text = " ".join(word_list)
                letters = "".join(word_list)
                scores = model.train(text)
                trained += 1
                hyp = make_hyp(letters, scores)

                trace(trained)
                trace(text)
                trace(hyp)
                trace(" ".join("%+.1f" % x for x in scores))

                if trained % 100 == 0:
                    trace("  %8d" % trained)

        trace("saveing model ...")
        model.save(args.model + ".%03d" % (epoch + 1))

    trace("finished.")
def train_model(args):
    trace('making vocabularies ...')
    vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab)

    trace('start training ...')
    model = RNNSegmentationModel.new(vocab, args.embed, args.hidden)

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0

        model.init_optimizer()

        with open(args.corpus) as fp:
            for text in fp:
                word_list = text.split()
                if not word_list:
                    continue

                text = ' '.join(word_list)
                letters = ''.join(word_list)
                scores = model.train(text)
                trained += 1
                hyp = make_hyp(letters, scores)
                
                trace(trained)
                trace(text)
                trace(hyp)
                trace(' '.join('%+.1f' % x for x in scores))
                
                if trained % 100 == 0:
                    trace('  %8d' % trained)

        trace('saveing model ...')
        model.save(args.model + '.%03d' % (epoch + 1))

    trace('finished.')
Exemplo n.º 20
0
def train(args):
    trace('loading corpus ...')
    with open(args.source) as fp:
        trees = [make_tree(l) for l in fp]

    trace('extracting leaf nodes ...')
    word_lists = [extract_words(t) for t in trees]
    lower_lists = [[w.lower() for w in words] for words in word_lists]

    trace('extracting gold operations ...')
    op_lists = [make_operations(t) for t in trees]

    trace('making vocabulary ...')
    word_vocab = Vocabulary.new(lower_lists, args.vocab)
    phrase_set = set()
    semiterminal_set = set()
    for tree in trees:
        phrase_set |= set(extract_phrase_labels(tree))
        semiterminal_set |= set(extract_semiterminals(tree))
    phrase_vocab = Vocabulary.new([list(phrase_set)],
                                  len(phrase_set),
                                  add_special_tokens=False)
    semiterminal_vocab = Vocabulary.new([list(semiterminal_set)],
                                        len(semiterminal_set),
                                        add_special_tokens=False)

    trace('converting data ...')
    word_lists = [convert_word_list(x, word_vocab) for x in word_lists]
    op_lists = [
        convert_op_list(x, phrase_vocab, semiterminal_vocab) for x in op_lists
    ]

    trace('start training ...')
    parser = Parser(
        args.vocab,
        args.embed,
        args.char_embed,
        args.queue,
        args.stack,
        args.srstate,
        len(phrase_set),
        len(semiterminal_set),
    )
    if args.use_gpu:
        parser.to_gpu()
    opt = optimizers.SGD(lr=0.1)
    opt.setup(parser)
    opt.add_hook(optimizer.GradientClipping(10))
    opt.add_hook(optimizer.WeightDecay(0.0001))

    batch_set = list(zip(word_lists, op_lists))

    for epoch in range(args.epoch):
        n = 0
        random.shuffle(batch_set)

        for samples in batch(batch_set, args.minibatch):
            parser.zerograds()
            loss = XP.fzeros(())

            for word_list, op_list in zip(*samples):
                trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
                loss += parser.forward_train(word_list, op_list)
                n += 1

            loss.backward()
            opt.update()

        trace('saving model ...')
        prefix = args.model + '.%03.d' % (epoch + 1)
        word_vocab.save(prefix + '.words')
        phrase_vocab.save(prefix + '.phrases')
        semiterminal_vocab.save(prefix + '.semiterminals')
        parser.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', parser)

        opt.lr *= 0.92

    trace('finished.')
Exemplo n.º 21
0
def train(args):
  trace('loading corpus ...')
  with open(args.source) as fp:
    trees = [make_tree(l) for l in fp]

  trace('extracting leaf nodes ...')
  word_lists = [extract_words(t) for t in trees]
  lower_lists = [[w.lower() for w in words] for words in word_lists]

  trace('extracting gold operations ...')
  op_lists = [make_operations(t) for t in trees]

  trace('making vocabulary ...')
  word_vocab = Vocabulary.new(lower_lists, args.vocab)
  phrase_set = set()
  semiterminal_set = set()
  for tree in trees:
    phrase_set |= set(extract_phrase_labels(tree))
    semiterminal_set |= set(extract_semiterminals(tree))
  phrase_vocab = Vocabulary.new([list(phrase_set)], len(phrase_set), add_special_tokens=False)
  semiterminal_vocab = Vocabulary.new([list(semiterminal_set)], len(semiterminal_set), add_special_tokens=False)

  trace('converting data ...')
  word_lists = [to_vram_words(convert_word_list(x, word_vocab)) for x in word_lists]
  op_lists = [to_vram_ops(convert_op_list(x, phrase_vocab, semiterminal_vocab)) for x in op_lists]

  trace('start training ...')
  parser = Parser(
    args.vocab, args.embed, args.char_embed, args.queue,
    args.stack, args.srstate, len(phrase_set), len(semiterminal_set),
  )
  if args.use_gpu:
    parser.to_gpu()
  opt = optimizers.SGD(lr = 0.1)
  opt.setup(parser)
  opt.add_hook(optimizer.GradientClipping(10))
  opt.add_hook(optimizer.WeightDecay(0.0001))

  batch_set = list(zip(word_lists, op_lists))

  for epoch in range(args.epoch):
    n = 0
    random.shuffle(batch_set)
    
    for samples in batch(batch_set, args.minibatch):
      parser.zerograds()
      loss = XP.fzeros(())
      embed_cache = {}

      for word_list, op_list in zip(*samples):
        trace('epoch %3d, sample %6d:' % (epoch + 1, n + 1))
        loss += parser.forward(word_list, op_list, 0, embed_cache)
        n += 1
      
      loss.backward()
      opt.update()

    trace('saving model ...')
    prefix = args.model + '.%03.d' % (epoch + 1)
    word_vocab.save(prefix + '.words')
    phrase_vocab.save(prefix + '.phrases')
    semiterminal_vocab.save(prefix + '.semiterminals')
    parser.save_spec(prefix + '.spec')
    serializers.save_hdf5(prefix + '.weights', parser)

    opt.lr *= 0.92

  trace('finished.')
Exemplo n.º 22
0
def train(args):
    trace('making vocabularies ...')
    src_vocab = Vocabulary.new(gens.input_word_list(), args.vocab)
    trg_vocab = Vocabulary.new(gens.output_word_list(), args.vocab)
    trace('making model ...')
    encdec = EncoderDecoder(args.vocab, args.embed, args.hidden)

    if args.load_model != "":
        print("model load  %s ... " % (args.load_model))
        src_vocab = Vocabulary.load(args.load_model + '.srcvocab')
        trg_vocab = Vocabulary.load(args.load_model + '.trgvocab')
        encdec = EncoderDecoder.load_spec(args.load_model + '.spec')
        serializers.load_hdf5(args.load_model + '.weights', encdec)

    if args.use_gpu:
        encdec.to_gpu()

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        trained = 0
        gen1 = gens.input_word_list()
        gen2 = gens.output_word_list()
        gen3 = gens.batch(
            gens.sorted_parallel(gen1, gen2, 100 * args.minibatch),
            args.minibatch)
        opt = optimizers.AdaGrad(lr=0.01)
        opt.setup(encdec)
        opt.add_hook(optimizer.GradientClipping(5))

        for src_batch, trg_batch in gen3:
            src_batch = fill_batch(src_batch)
            trg_batch = fill_batch(trg_batch)
            K = len(src_batch)
            hyp_batch, loss = forward(src_batch, trg_batch, src_vocab,
                                      trg_vocab, encdec, True, 0)
            loss.backward()
            opt.update()

            for k in range(K):
                trace('epoch %3d/%3d, sample %8d' %
                      (epoch + 1, args.epoch, trained + k + 1))
                trace(
                    '  src = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in src_batch[k]]))
                trace(
                    '  trg = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in trg_batch[k]]))
                trace(
                    '  hyp = ' +
                    ' '.join([x if x != '</s>' else '*'
                              for x in hyp_batch[k]]))

            trained += K

        if epoch % args.model_save_timing == 0:

            trace('saving model ...')
            prefix = args.model + '.%03.d' % (epoch + 1)
            src_vocab.save(prefix + '.srcvocab')
            trg_vocab.save(prefix + '.trgvocab')
            encdec.save_spec(prefix + '.spec')
            serializers.save_hdf5(prefix + '.weights', encdec)

    trace('finished.')