def training_start(self, model, data):

        training_start_time = time.time()
        logger.info("Start training")

        # Print a model summary to make sure everything is ok with it
        model_summary = torch_summarize(model)
        logger.debug(model_summary)

        evaluator = BaseEvaluator(self.config)
        logger.debug("Preparing training data")

        train_batches = data.prepare_training_data(data.train, self.batch_size)
        dev_batches = data.prepare_training_data(data.dev, self.batch_size)

        id2word = data.vocab.id2tok
        dev_lexicalizations = data.lexicalizations['dev']
        dev_multi_ref_fn = '%s.multi-ref' % data.fnames['dev']

        self.set_optimizer(model, self.config['optimizer'])
        self.set_train_criterion(len(id2word), PAD_ID)
        #print(data.dev[0])
        #exit()
        # Moving the model to GPU, if available
        if self.use_cuda:
            model = model.cuda()

        for epoch_idx in range(1, self.n_epochs + 1):

            epoch_start = time.time()
            pred_fn = os.path.join(self.model_dir, 'predictions.epoch%d' % epoch_idx)

            train_loss = self.train_epoch(epoch_idx, model, train_batches)
            dev_loss = self.compute_val_loss(model, dev_batches)

            predicted_ids, attention_weights = evaluator.evaluate_model(model, data.dev[0], data.uni_mr['dev'])
            predicted_tokens = evaluator.lexicalize_predictions(predicted_ids,
                                                                dev_lexicalizations,
                                                                id2word)

            save_predictions_txt(predicted_tokens, pred_fn)
            self.record_loss(train_loss, dev_loss)

            if self.evaluate_prediction:
                self.run_external_eval(dev_multi_ref_fn, pred_fn)

            if self.save_model:
                save_model(model, os.path.join(self.model_dir, 'weights.epoch%d' % epoch_idx))

            logger.info('Epoch %d/%d: time=%s' % (epoch_idx, self.n_epochs, asMinutes(time.time() - epoch_start)))

        self.plot_lcurve()

        if self.evaluate_prediction:
            score_fname = os.path.join(self.model_dir, 'scores.csv')
            scores = self.get_scores_to_save()
            save_scores(scores, self.score_file_header, score_fname)
            self.plot_training_results()

        logger.info('End training time=%s' % (asMinutes(time.time() - training_start_time)))
    def training_start(self, model, data, evaluator, nlgen):

        logger.debug("Preparing training data")

        dev_data_fname = data.fnames.dev_fn
        assert os.path.exists(dev_data_fname), logger.error(
            'File %s does not exist', dev_data_fname)

        # dev data for evaluation
        dev_data_ref_fname = data.fnames.dev_ref_fn
        dev_data_raw = read_conll_data_file(data.fnames.dev_fn)
        logger.info('Saving Syn reference --> %s', data.fnames.dev_ref_fn)
        save_txt(itemlist=conll2snt(dev_data_raw), fname=dev_data_ref_fname)

        train_batches = data.batchify_vectorized_data(
            data.train, self.batch_size)  # [(np_x, np_y_1hot), ...]
        dev_batches = data.batchify_vectorized_data(data.dev, self.batch_size)

        # need to move the model before setting the optimizer
        # see: http://pytorch.org/docs/stable/optim.html
        if self.use_cuda:
            model.cuda()

        self.set_optimizer(model, self.config['optimizer'])
        self.set_train_criterion(len(data.vocab.id2tok), PAD_ID)

        training_start_time = time.time()
        logger.info("Start training")

        best_score = 0
        best_model_fn = None
        best_weights = None

        for epoch_idx in range(1, self.n_epochs + 1):
            epoch_start = time.time()
            logger.info('Epoch %d/%d', epoch_idx, self.n_epochs)

            # compute loss on train and dev data
            train_loss = self.train_epoch(epoch_idx, model, train_batches)
            dev_loss = self.compute_val_loss(model, dev_batches)
            evaluator.record_loss(train_loss, dev_loss)

            # run on dev data in prediction mode (no oracle decoding)
            predictions_fname = self.get_predictions_fname(epoch_idx)
            depgraphs = nlgen.predict_from_raw_data(model, dev_data_raw,
                                                    data.vocab)
            nlgen.save_predictions(depgraphs, predictions_fname)

            # evaluate using metrics
            scores = evaluator.external_metric_eval(ref_fn=dev_data_ref_fname,
                                                    pred_fn=predictions_fname)
            avg_score = (scores.bleu + scores.edist) / 2
            model_fn = os.path.join(
                self.model_dir, 'weights.epoch%d_%0.3f_%0.3f' %
                (epoch_idx, scores.bleu, scores.edist))

            if avg_score > best_score:
                best_score = avg_score
                best_model_fn = model_fn
                best_weights = model.state_dict()

            logger.debug('Time = %s', asMinutes(time.time() - epoch_start))

        logger.info('Total training time=%s' %
                    (asMinutes(time.time() - training_start_time)))

        self.best_model_fn = best_model_fn
        logger.debug('Saving model to --> %s', best_model_fn)
        torch.save(best_weights, best_model_fn)

        score_fname = os.path.join(self.model_dir, 'scores.csv')
        scores = evaluator.get_scores_to_save()
        evaluator.save_scores(scores, self.score_file_header, score_fname)

        evaluator.plot_lcurve(fname=os.path.join(self.model_dir, "lcurve.pdf"),
                              title=self.model_type)
    def training_start(self, model, data, evaluator, morph_gen_algo, *args,
                       **kwargs):

        logger.debug("Preparing training data")

        dev_data_fname = data.fnames.dev_fn
        assert os.path.exists(dev_data_fname), logger.error(
            'File %s does not exist', dev_data_fname)

        train_batches = data.batchify_training_data(data.train,
                                                    self.batch_size,
                                                    is_dev_data=False)
        dev_batches = data.batchify_training_data(data.dev,
                                                  self.batch_size,
                                                  is_dev_data=True)

        if self.use_cuda:
            model.cuda()

        self.set_optimizer(model, self.config['optimizer'])
        self.set_train_criterion(model.output_size, data.vocab.PAD_ID)

        training_start_time = time.time()
        logger.info("Start training")

        best_score = 0
        best_model_fn = None
        best_weights = None

        for epoch_idx in range(1, self.n_epochs + 1):
            epoch_start = time.time()
            logger.info('Epoch %d/%d', epoch_idx, self.n_epochs)

            # compute loss on train and dev data
            train_loss = self.train_epoch(epoch_idx, model, train_batches)
            dev_loss = self.compute_val_loss(model, dev_batches)
            evaluator.record_loss(train_loss, dev_loss)

            # run on dev data in prediction mode (no oracle decoding)
            predictions_fname = self.get_predictions_fname(epoch_idx)
            predicted_forms = self.predict_epoch(model, dev_batches,
                                                 data.vocab)
            morph_gen_algo.save_predictions(predicted_forms, predictions_fname)

            # evaluate using metrics
            errors, accuracy, accuracy_nocase = evaluator.compute_string_match(
                data.dev_references, predicted_forms)
            logger.info('String match accuracy): %0.4f', accuracy)
            logger.info('String match accuracy (ignore case): %0.4f',
                        accuracy_nocase)

            model_fn = os.path.join(
                self.model_dir,
                'weights.epoch%d_%0.3f' % (epoch_idx, accuracy))
            if accuracy > best_score:
                best_score = accuracy
                best_model_fn = model_fn
                best_weights = model.state_dict()

            errors_fn = os.path.join(
                self.model_dir, 'morph-model.epoch%d.error-dg.pkl' % epoch_idx)
            evaluator.save_errors(errors=errors, fname=errors_fn)

            logger.debug('Time = %s', asMinutes(time.time() - epoch_start))

        logger.info('Total training time=%s' %
                    (asMinutes(time.time() - training_start_time)))

        self.best_model_fn = best_model_fn
        logger.debug('Saving model to --> %s', best_model_fn)
        torch.save(best_weights, best_model_fn)

        evaluator.plot_lcurve(fname=os.path.join(self.model_dir, "lcurve.pdf"),
                              title=self.model_type)

        # Saving scores
        score_fname = os.path.join(self.model_dir, 'scores.csv')
        scores = evaluator.get_scores_to_save()
        evaluator.save_scores(scores, self.score_file_header, score_fname)