コード例 #1
0
ファイル: lexicalize.py プロジェクト: tonydeep/tgen
    def train(self, train_sents, valid_sents=None):
        """Train the RNNLM on the given data (list of lists of tokens).
        @param train_sents: training data (list of lists of tokens, lexicalized)
        @param valid_sents: validation data (list of lists of tokens, lexicalized, may be None \
            if no validation should be performed)
        """
        self._init_training(train_sents, valid_sents)

        top_perp = float('nan')

        for iter_no in xrange(1, self.passes + 1):
            # preparing parameters
            iter_alpha = self.alpha * np.exp(-self.alpha_decay * iter_no)
            self._train_order = range(len(self._train_data))
            if self.randomize:
                rnd.shuffle(self._train_order)
            # training
            self._training_pass(iter_no, iter_alpha)

            # validation
            if (self.validation_freq and iter_no > self.min_passes
                    and iter_no % self.validation_freq == 0):
                perp = self._valid_perplexity()
                log_info("Perplexity: %.3f" % perp)
                # if we have the best model so far, save it as a checkpoint (overwrite previous)
                if math.isnan(top_perp) or perp < top_perp:
                    top_perp = perp
                    self._save_checkpoint()

        self._restore_checkpoint()  # restore the best parameters so far
コード例 #2
0
 def train(self, das_file, ttree_file, data_portion=1.0):
     """Run training on the given training data."""
     self._init_training(das_file, ttree_file, data_portion)
     for iter_no in xrange(1, self.passes + 1):
         self.train_order = range(len(self.train_trees))
         if self.randomize:
             rnd.shuffle(self.train_order)
         self._training_pass(iter_no)
コード例 #3
0
ファイル: classif.py プロジェクト: pdsujnow/tgen
 def train(self, das_file, ttree_file, data_portion=1.0):
     """Run training on the given training data."""
     self._init_training(das_file, ttree_file, data_portion)
     for iter_no in xrange(1, self.passes + 1):
         self.train_order = range(len(self.train_trees))
         if self.randomize:
             rnd.shuffle(self.train_order)
         self._training_pass(iter_no)
コード例 #4
0
ファイル: tfclassif.py プロジェクト: yylong711/tgen
    def train(self,
              das,
              trees,
              data_portion=1.0,
              valid_das=None,
              valid_trees=None):
        """Run training on the given training data.

        @param das: name of source file with training DAs, or list of DAs
        @param trees: name of source file with corresponding trees/sentences, or list of trees
        @param data_portion: portion of the training data to be used (defaults to 1.0)
        @param valid_das: validation data DAs
        @param valid_trees: list of lists of corresponding paraphrases (same length as valid_das)
        """

        log_info('Training reranking classifier...')

        # initialize training
        self._init_training(das, trees, data_portion)
        if self.mode in ['tokens', 'tagged_lemmas'
                         ] and valid_trees is not None:
            valid_trees = [
                self._tokens_to_flat_trees(
                    paraphrases, use_tags=self.mode == 'tagged_lemmas')
                for paraphrases in valid_trees
            ]

        # start training
        top_comb_cost = float('nan')

        for iter_no in xrange(1, self.passes + 1):
            self.train_order = range(len(self.train_trees))
            if self.randomize:
                rnd.shuffle(self.train_order)
            pass_cost, pass_diff = self._training_pass(iter_no)

            if self.validation_freq and iter_no > self.min_passes and iter_no % self.validation_freq == 0:

                valid_diff = 0
                if valid_das:
                    valid_diff = np.sum([
                        np.sum(self.dist_to_da(d, t))
                        for d, t in zip(valid_das, valid_trees)
                    ])

                # cost combining validation and training data performance
                # (+ "real" cost with negligible weight)
                comb_cost = 1000 * valid_diff + 100 * pass_diff + pass_cost
                log_info('Combined validation cost: %8.3f' % comb_cost)

                # if we have the best model so far, save it as a checkpoint (overwrite previous)
                if math.isnan(top_comb_cost) or comb_cost < top_comb_cost:
                    top_comb_cost = comb_cost
                    self._save_checkpoint()

        # restore last checkpoint (best performance on devel data)
        self.restore_checkpoint()
コード例 #5
0
    def train(self,
              das_file,
              ttree_file,
              data_portion=1.0,
              context_file=None,
              validation_files=None):
        """
        The main training process – initialize and perform a specified number of
        training passes, validating every couple iterations.

        @param das_file: training data file with DAs
        @param ttree_file: training data file with output t-trees/sentences
        @param data_portion: portion of training data to be actually used, defaults to 1.0
        @param context_file: path to training file with contexts (trees/sentences)
        @param validation_files: paths to validation data (DAs, trees/sentences, possibly contexts)
        """
        # load and prepare data and initialize the neural network
        self._init_training(das_file, ttree_file, data_portion, context_file,
                            validation_files)

        # do the training passes
        for iter_no in xrange(1, self.passes + 1):

            self.train_order = range(len(self.train_enc))
            if self.randomize:
                rnd.shuffle(self.train_order)

            self._training_pass(iter_no)

            # validate every couple iterations
            if iter_no % self.validation_freq == 0 and self.validation_size > 0:

                cur_train_out = self.process_das(
                    self.train_das[:self.batch_size])
                log_info("Current train output:\n" + "\n".join([
                    " ".join(n.t_lemma for n in tree.nodes[1:]) if self.
                    use_tokens else unicode(tree) for tree in cur_train_out
                ]))

                cur_valid_out = self.process_das(
                    self.valid_das[:self.batch_size])
                cur_cost = self._compute_valid_cost(cur_valid_out,
                                                    self.valid_trees)
                log_info("Current validation output:\n" + "\n".join([
                    " ".join(n.t_lemma for n in tree.nodes[1:]) if self.
                    use_tokens else unicode(tree) for tree in cur_valid_out
                ]))
                log_info('IT %d validation cost: %5.4f' % (iter_no, cur_cost))

                # if we have the best model so far, save it as a checkpoint (overwrite previous)
                if math.isnan(
                        self.top_k_costs[0]) or cur_cost < self.top_k_costs[0]:
                    self._save_checkpoint()

                if self._should_stop(iter_no, cur_cost):
                    log_info("Stoping criterion met.")
                    break
コード例 #6
0
ファイル: lexicalize.py プロジェクト: qjay612/tgen
    def train(self, train_sents):
        """Train the RNNLM on the given data (list of lists of tokens).
        @param train_sents: training data (list of lists of tokens, lexicalized)
        """
        self._init_training(train_sents)

        for iter_no in xrange(1, self.passes + 1):
            iter_alpha = self.alpha * np.exp(-self.alpha_decay * iter_no)
            self._train_order = range(len(self._train_data))
            if self.randomize:
                rnd.shuffle(self._train_order)
            self._training_pass(iter_no, iter_alpha)
コード例 #7
0
 def exposed_training_pass(self, w, pass_no, rnd_seed, data_offset,
                           data_len):
     """(Worker) Run one pass over a part of the training data.
     @param w: initial perceptron weights (pickled)
     @param pass_no: pass number (for logging purposes)
     @param rnd_seed: random generator seed for shuffling training examples
     @param data_offset: training data portion start
     @param data_len: training data portion size
     @return: updated perceptron weights after passing the selected data portion (pickled)
     """
     log_info('Training pass %d with data portion %d + %d' %
              (pass_no, data_offset, data_len))
     # use the local ranker instance
     ranker = self.ranker_inst
     # import current feature weights
     tstart = time.time()
     ranker.set_weights(pickle.loads(w))
     log_info('Weights loading: %f secs.' % (time.time() - tstart))
     # save rest of the training data to temporary variables, set just the
     # required portion for computation
     all_train_das = ranker.train_das
     ranker.train_das = ranker.train_das[data_offset:data_offset + data_len]
     all_train_trees = ranker.train_trees
     ranker.train_trees = ranker.train_trees[data_offset:data_offset +
                                             data_len]
     all_train_feats = ranker.train_feats
     ranker.train_feats = ranker.train_feats[data_offset:data_offset +
                                             data_len]
     all_train_sents = ranker.train_sents
     ranker.train_sents = ranker.train_sents[data_offset:data_offset +
                                             data_len]
     all_train_order = ranker.train_order
     ranker.train_order = range(len(ranker.train_trees))
     if ranker.randomize:
         rnd.seed(rnd_seed)
         rnd.shuffle(ranker.train_order)
     # do the actual computation (update w)
     ranker._training_pass(pass_no)
     # return the rest of the training data to member variables
     ranker.train_das = all_train_das
     ranker.train_trees = all_train_trees
     ranker.train_feats = all_train_feats
     ranker.train_sents = all_train_sents
     ranker.train_order = all_train_order
     # return the result of the computation
     log_info('Training pass %d / %d / %d done.' %
              (pass_no, data_offset, data_len))
     tstart = time.time()
     dump = pickle.dumps((ranker.get_weights(), ranker.get_diagnostics()),
                         pickle.HIGHEST_PROTOCOL)
     log_info('Weights saving: %f secs.' % (time.time() - tstart))
     return dump
コード例 #8
0
ファイル: rank.py プロジェクト: UFAL-DSG/tgen
 def train(self, das_file, ttree_file, data_portion=1.0):
     """Run training on the given training data."""
     self._init_training(das_file, ttree_file, data_portion)
     for iter_no in xrange(1, self.passes + 1):
         self.train_order = range(len(self.train_trees))
         if self.randomize:
             rnd.shuffle(self.train_order)
         log_info("Train order: " + str(self.train_order))
         self._training_pass(iter_no)
         if self.evaluator.tree_accuracy() == 1:  # if tree accuracy is 1, we won't learn anything anymore
             break
     # averaged perceptron – average the weights obtained after each pass
     if self.averaging is True:
         self.set_weights_iter_average()
コード例 #9
0
ファイル: seq2seq.py プロジェクト: pdsujnow/tgen
    def train(self, das_file, ttree_file, data_portion=1.0,
              context_file=None, validation_files=None):
        """
        The main training process – initialize and perform a specified number of
        training passes, validating every couple iterations.

        @param das_file: training data file with DAs
        @param ttree_file: training data file with output t-trees/sentences
        @param data_portion: portion of training data to be actually used, defaults to 1.0
        @param context_file: path to training file with contexts (trees/sentences)
        @param validation_files: paths to validation data (DAs, trees/sentences, possibly contexts)
        """
        # load and prepare data and initialize the neural network
        self._init_training(das_file, ttree_file, data_portion, context_file, validation_files)

        # do the training passes
        for iter_no in xrange(1, self.passes + 1):

            self.train_order = range(len(self.train_enc))
            if self.randomize:
                rnd.shuffle(self.train_order)

            self._training_pass(iter_no)

            # validate every couple iterations
            if iter_no % self.validation_freq == 0 and self.validation_size > 0:

                cur_train_out = self.process_das(self.train_das[:self.batch_size])
                log_info("Current train output:\n" +
                         "\n".join([" ".join(n.t_lemma for n in tree.nodes[1:])
                                    if self.use_tokens
                                    else unicode(tree)
                                    for tree in cur_train_out]))

                cur_valid_out = self.process_das(self.valid_das[:self.batch_size])
                cur_cost = self._compute_valid_cost(cur_valid_out, self.valid_trees)
                log_info("Current validation output:\n" +
                         "\n".join([" ".join(n.t_lemma for n in tree.nodes[1:])
                                    if self.use_tokens
                                    else unicode(tree)
                                    for tree in cur_valid_out]))
                log_info('IT %d validation cost: %5.4f' % (iter_no, cur_cost))

                # if we have the best model so far, save it as a checkpoint (overwrite previous)
                if math.isnan(self.top_k_costs[0]) or cur_cost < self.top_k_costs[0]:
                    self._save_checkpoint()

                if self._should_stop(iter_no, cur_cost):
                    log_info("Stoping criterion met.")
                    break
コード例 #10
0
 def train(self, das_file, ttree_file, data_portion=1.0):
     """Run training on the given training data."""
     self._init_training(das_file, ttree_file, data_portion)
     for iter_no in range(1, self.passes + 1):
         self.train_order = list(range(len(self.train_trees)))
         if self.randomize:
             rnd.shuffle(self.train_order)
         log_info("Train order: " + str(self.train_order))
         self._training_pass(iter_no)
         if self.evaluator.tree_accuracy(
         ) == 1:  # if tree accuracy is 1, we won't learn anything anymore
             break
     # averaged perceptron – average the weights obtained after each pass
     if self.averaging is True:
         self.set_weights_iter_average()
コード例 #11
0
ファイル: tfclassif.py プロジェクト: UFAL-DSG/tgen
    def train(self, das, trees, data_portion=1.0, valid_das=None, valid_trees=None):
        """Run training on the given training data.

        @param das: name of source file with training DAs, or list of DAs
        @param trees: name of source file with corresponding trees/sentences, or list of trees
        @param data_portion: portion of the training data to be used (defaults to 1.0)
        @param valid_das: validation data DAs
        @param valid_trees: list of lists of corresponding paraphrases (same length as valid_das)
        """

        log_info('Training reranking classifier...')

        # initialize training
        self._init_training(das, trees, data_portion)
        if self.mode in ['tokens', 'tagged_lemmas'] and valid_trees is not None:
            valid_trees = [self._tokens_to_flat_trees(paraphrases,
                                                      use_tags=self.mode == 'tagged_lemmas')
                           for paraphrases in valid_trees]

        # start training
        top_comb_cost = float('nan')

        for iter_no in xrange(1, self.passes + 1):
            self.train_order = range(len(self.train_trees))
            if self.randomize:
                rnd.shuffle(self.train_order)
            pass_cost, pass_diff = self._training_pass(iter_no)

            if self.validation_freq and iter_no > self.min_passes and iter_no % self.validation_freq == 0:

                valid_diff = 0
                if valid_das:
                    valid_diff = np.sum([np.sum(self.dist_to_da(d, t))
                                         for d, t in zip(valid_das, valid_trees)])

                # cost combining validation and training data performance
                # (+ "real" cost with negligible weight)
                comb_cost = 1000 * valid_diff + 100 * pass_diff + pass_cost
                log_info('Combined validation cost: %8.3f' % comb_cost)

                # if we have the best model so far, save it as a checkpoint (overwrite previous)
                if math.isnan(top_comb_cost) or comb_cost < top_comb_cost:
                    top_comb_cost = comb_cost
                    self._save_checkpoint()

        # restore last checkpoint (best performance on devel data)
        self.restore_checkpoint()
コード例 #12
0
 def exposed_training_pass(self, w, pass_no, rnd_seed, data_offset, data_len):
     """(Worker) Run one pass over a part of the training data.
     @param w: initial perceptron weights (pickled)
     @param pass_no: pass number (for logging purposes)
     @param rnd_seed: random generator seed for shuffling training examples
     @param data_offset: training data portion start
     @param data_len: training data portion size
     @return: updated perceptron weights after passing the selected data portion (pickled)
     """
     log_info('Training pass %d with data portion %d + %d' %
              (pass_no, data_offset, data_len))
     # use the local ranker instance
     ranker = self.ranker_inst
     # import current feature weights
     tstart = time.time()
     ranker.set_weights(pickle.loads(w))
     log_info('Weights loading: %f secs.' % (time.time() - tstart))
     # save rest of the training data to temporary variables, set just the
     # required portion for computation
     all_train_das = ranker.train_das
     ranker.train_das = ranker.train_das[data_offset:data_offset + data_len]
     all_train_trees = ranker.train_trees
     ranker.train_trees = ranker.train_trees[data_offset:data_offset + data_len]
     all_train_feats = ranker.train_feats
     ranker.train_feats = ranker.train_feats[data_offset:data_offset + data_len]
     all_train_sents = ranker.train_sents
     ranker.train_sents = ranker.train_sents[data_offset:data_offset + data_len]
     all_train_order = ranker.train_order
     ranker.train_order = range(len(ranker.train_trees))
     if ranker.randomize:
         rnd.seed(rnd_seed)
         rnd.shuffle(ranker.train_order)
     # do the actual computation (update w)
     ranker._training_pass(pass_no)
     # return the rest of the training data to member variables
     ranker.train_das = all_train_das
     ranker.train_trees = all_train_trees
     ranker.train_feats = all_train_feats
     ranker.train_sents = all_train_sents
     ranker.train_order = all_train_order
     # return the result of the computation
     log_info('Training pass %d / %d / %d done.' % (pass_no, data_offset, data_len))
     tstart = time.time()
     dump = pickle.dumps((ranker.get_weights(), ranker.get_diagnostics()), pickle.HIGHEST_PROTOCOL)
     log_info('Weights saving: %f secs.' % (time.time() - tstart))
     return dump