Exemple #1
0
    def training_states_and_actions(
        self, trackers: List[DialogueStateTracker], domain: Domain
    ) -> Tuple[List[List[Optional[Dict[Text, float]]]], List[List[Text]]]:
        """Transforms list of trackers to lists of states and actions.

        Training data is padded up to the max_history with -1.
        """

        trackers_as_states = []
        trackers_as_actions = []

        # from multiple states that create equal featurizations
        # we only need to keep one.
        hashed_examples = set()

        logger.debug("Creating states and action examples from "
                     "collected trackers (by {}({}))..."
                     "".format(
                         type(self).__name__,
                         type(self.state_featurizer).__name__))
        pbar = tqdm(trackers,
                    desc="Processed trackers",
                    disable=is_logging_disabled())
        for tracker in pbar:
            states = self._create_states(tracker, domain, True)

            idx = 0
            for event in tracker.applied_events():
                if isinstance(event, ActionExecuted):
                    if not event.unpredictable:
                        # only actions which can be
                        # predicted at a stories start
                        sliced_states = self.slice_state_history(
                            states[:idx + 1], self.max_history)

                        if self.remove_duplicates:
                            hashed = self._hash_example(
                                sliced_states, event.action_name)

                            # only continue with tracker_states that created a
                            # hashed_featurization we haven't observed
                            if hashed not in hashed_examples:
                                hashed_examples.add(hashed)
                                trackers_as_states.append(sliced_states)
                                trackers_as_actions.append([event.action_name])
                        else:
                            trackers_as_states.append(sliced_states)
                            trackers_as_actions.append([event.action_name])

                        pbar.set_postfix({
                            "# actions":
                            "{:d}".format(len(trackers_as_actions))
                        })
                    idx += 1

        logger.debug("Created {} action examples.".format(
            len(trackers_as_actions)))

        return trackers_as_states, trackers_as_actions
    def training_states_and_actions(
        self, trackers: List[DialogueStateTracker], domain: Domain
    ) -> Tuple[List[List[State]], List[List[Text]]]:
        """Transforms list of trackers to lists of states and actions.

        Training data is padded up to the length of the longest dialogue with -1.

        Args:
            trackers: The trackers to transform
            domain: The domain

        Returns:
            A tuple of list of states and list of actions.
        """

        trackers_as_states = []
        trackers_as_actions = []

        logger.debug(
            "Creating states and action examples from "
            "collected trackers (by {}({}))..."
            "".format(type(self).__name__, type(self.state_featurizer).__name__)
        )
        pbar = tqdm(
            trackers,
            desc="Processed trackers",
            disable=common_utils.is_logging_disabled(),
        )
        for tracker in pbar:
            states = self._create_states(tracker, domain)

            delete_first_state = False
            actions = []
            for event in tracker.applied_events():
                if not isinstance(event, ActionExecuted):
                    continue

                if not event.unpredictable:
                    # only actions which can be
                    # predicted at a stories start
                    actions.append(event.action_name or event.action_text)
                else:
                    # unpredictable actions can be
                    # only the first in the story
                    if delete_first_state:
                        raise InvalidStory(
                            f"Found two unpredictable actions in one story "
                            f"'{tracker.sender_id}'. Check your story files."
                        )
                    delete_first_state = True

            if delete_first_state:
                states = states[1:]

            trackers_as_states.append(states[:-1])
            trackers_as_actions.append(actions)

        return trackers_as_states, trackers_as_actions
Exemple #3
0
    def training_states_and_actions(
            self, trackers: List[DialogueStateTracker],
            domain: Domain) -> Tuple[List[List[Dict]], List[List[Text]]]:
        """Transforms list of trackers to lists of states and actions.

        Training data is padded up to the length of the longest dialogue with -1.
        """

        trackers_as_states = []
        trackers_as_actions = []

        logger.debug("Creating states and action examples from "
                     "collected trackers (by {}({}))..."
                     "".format(
                         type(self).__name__,
                         type(self.state_featurizer).__name__))
        pbar = tqdm(trackers,
                    desc="Processed trackers",
                    disable=is_logging_disabled())
        for tracker in pbar:
            states = self._create_states(tracker,
                                         domain,
                                         is_binary_training=True)

            delete_first_state = False
            actions = []
            for event in tracker.applied_events():
                if isinstance(event, ActionExecuted):
                    if not event.unpredictable:
                        # only actions which can be
                        # predicted at a stories start
                        actions.append(event.action_name)
                    else:
                        # unpredictable actions can be
                        # only the first in the story
                        if delete_first_state:
                            raise Exception("Found two unpredictable "
                                            "actions in one story."
                                            "Check your story files.")
                        else:
                            delete_first_state = True

            if delete_first_state:
                states = states[1:]

            trackers_as_states.append(states[:-1])
            trackers_as_actions.append(actions)

        self.max_len = self._calculate_max_len(trackers_as_actions)
        logger.debug("The longest dialogue has {} actions.".format(
            self.max_len))

        return trackers_as_states, trackers_as_actions
Exemple #4
0
    def _create_lookup_from_states(
        self,
        trackers_as_states: List[List[Dict]],
        trackers_as_actions: List[List[Text]],
    ) -> Dict[Text, Text]:
        """Creates lookup dictionary from the tracker represented as states.

        Args:
            trackers_as_states: representation of the trackers as a list of states
            trackers_as_actions: representation of the trackers as a list of actions

        Returns:
            lookup dictionary
        """

        lookup = {}

        if not trackers_as_states:
            return lookup

        if self.max_history:
            assert len(trackers_as_states[0]) == self.max_history, (
                f"Trying to memorizefeaturized data with {len(trackers_as_states[0])} "
                f"historic turns. Expected: {self.max_history}")

        assert len(trackers_as_actions[0]) == 1, (
            f"The second dimension of trackers_as_action should be 1, "
            f"instead of {len(trackers_as_actions[0])}")

        ambiguous_feature_keys = set()

        pbar = tqdm(
            zip(trackers_as_states, trackers_as_actions),
            desc="Processed actions",
            disable=is_logging_disabled(),
        )
        for states, actions in pbar:
            action = actions[0]

            feature_key = self._create_feature_key(states)

            if feature_key not in ambiguous_feature_keys:
                if feature_key in lookup.keys():
                    if lookup[feature_key] != action:
                        # delete contradicting example created by
                        # partial history augmentation from memory
                        ambiguous_feature_keys.add(feature_key)
                        del lookup[feature_key]
                else:
                    lookup[feature_key] = action
            pbar.set_postfix({"# examples": "{:d}".format(len(lookup))})

        return lookup
Exemple #5
0
    def _add_states_to_lookup(
        self, trackers_as_states, trackers_as_actions, domain, online=False
    ) -> None:
        """Add states to lookup dict"""
        if not trackers_as_states:
            return

        assert len(trackers_as_states[0]) == self.max_history, (
            "Trying to mem featurized data with {} historic turns. Expected: "
            "{}".format(len(trackers_as_states[0]), self.max_history)
        )

        assert len(trackers_as_actions[0]) == 1, (
            "The second dimension of trackers_as_action should be 1, "
            "instead of {}".format(len(trackers_as_actions[0]))
        )

        ambiguous_feature_keys = set()

        pbar = tqdm(
            zip(trackers_as_states, trackers_as_actions),
            desc="Processed actions",
            disable=is_logging_disabled(),
        )
        for states, actions in pbar:
            action = actions[0]

            feature_key = self._create_feature_key(states)
            feature_item = domain.index_for_action(action)

            if feature_key not in ambiguous_feature_keys:
                if feature_key in self.lookup.keys():
                    if self.lookup[feature_key] != feature_item:
                        if online:
                            logger.info(
                                "Original stories are "
                                "different for {} -- {}\n"
                                "Memorized the new ones for "
                                "now. Delete contradicting "
                                "examples after exporting "
                                "the new stories."
                                "".format(states, action)
                            )
                            self.lookup[feature_key] = feature_item
                        else:
                            # delete contradicting example created by
                            # partial history augmentation from memory
                            ambiguous_feature_keys.add(feature_key)
                            del self.lookup[feature_key]
                else:
                    self.lookup[feature_key] = feature_item
            pbar.set_postfix({"# examples": "{:d}".format(len(self.lookup))})
Exemple #6
0
    def generate(self) -> List[TrackerWithCachedStates]:
        if self.config.remove_duplicates and self.config.unique_last_num_states:
            logger.debug("Generated trackers will be deduplicated "
                         "based on their unique last {} states."
                         "".format(self.config.unique_last_num_states))

        self._mark_first_action_in_story_steps_as_unpredictable()

        active_trackers = defaultdict(list)

        init_tracker = TrackerWithCachedStates(
            "",
            self.domain.slots,
            max_event_history=self.config.tracker_limit,
            domain=self.domain,
        )
        active_trackers[STORY_START].append(init_tracker)

        # trackers that are sent to a featurizer
        finished_trackers = []
        # keep story end trackers separately for augmentation
        story_end_trackers = []

        phase = 0  # one phase is one traversal of all story steps.
        min_num_aug_phases = 3 if self.config.augmentation_factor > 0 else 0
        logger.debug(
            "Number of augmentation rounds is {}".format(min_num_aug_phases))

        # placeholder to track gluing process of checkpoints
        used_checkpoints = set()
        previous_unused = set()
        everything_reachable_is_reached = False

        # we will continue generating data until we have reached all
        # checkpoints that seem to be reachable. This is a heuristic,
        # if we did not reach any new checkpoints in an iteration, we
        # assume we have reached all and stop.
        while not everything_reachable_is_reached or phase < min_num_aug_phases:
            phase_name = self._phase_name(everything_reachable_is_reached,
                                          phase)

            num_active_trackers = self._count_trackers(active_trackers)

            if num_active_trackers:
                logger.debug("Starting {} ... (with {} trackers)"
                             "".format(phase_name, num_active_trackers))
            else:
                logger.debug("There are no trackers for {}".format(phase_name))
                break

            # track unused checkpoints for this phase
            unused_checkpoints = set()  # type: Set[Text]

            pbar = tqdm(
                self.story_graph.ordered_steps(),
                desc="Processed Story Blocks",
                disable=is_logging_disabled(),
            )
            for step in pbar:
                incoming_trackers = []  # type: List[TrackerWithCachedStates]
                for start in step.start_checkpoints:
                    if active_trackers[start.name]:
                        ts = start.filter_trackers(active_trackers[start.name])
                        incoming_trackers.extend(ts)
                        used_checkpoints.add(start.name)
                    elif start.name not in used_checkpoints:
                        # need to skip - there was no previous step that
                        # had this start checkpoint as an end checkpoint
                        # it will be processed in next phases
                        unused_checkpoints.add(start.name)

                if not incoming_trackers:
                    # if there are no trackers,
                    # we can skip the rest of the loop
                    continue

                # these are the trackers that reached this story
                # step and that need to handle all events of the step

                if self.config.remove_duplicates:
                    incoming_trackers, end_trackers = self._remove_duplicate_trackers(
                        incoming_trackers)
                    # append end trackers to finished trackers
                    finished_trackers.extend(end_trackers)

                if everything_reachable_is_reached:
                    # augmentation round
                    incoming_trackers = self._subsample_trackers(
                        incoming_trackers,
                        self.config.max_number_of_augmented_trackers)

                # update progress bar
                pbar.set_postfix(
                    {"# trackers": "{:d}".format(len(incoming_trackers))})

                trackers, end_trackers = self._process_step(
                    step, incoming_trackers)
                # add end trackers to finished trackers
                finished_trackers.extend(end_trackers)

                # update our tracker dictionary with the trackers
                # that handled the events of the step and
                # that can now be used for further story steps
                # that start with the checkpoint this step ended with

                for end in step.end_checkpoints:

                    start_name = self._find_start_checkpoint_name(end.name)

                    active_trackers[start_name].extend(trackers)

                    if start_name in used_checkpoints:
                        # add end checkpoint as unused
                        # if this checkpoint was processed as
                        # start one before
                        unused_checkpoints.add(start_name)

                if not step.end_checkpoints:
                    unique_ends = self._remove_duplicate_story_end_trackers(
                        trackers)
                    story_end_trackers.extend(unique_ends)

            num_finished = len(finished_trackers) + len(story_end_trackers)
            logger.debug("Finished phase ({} training samples found).".format(
                num_finished))

            # prepare next round
            phase += 1

            if not everything_reachable_is_reached:
                # check if we reached all nodes that can be reached
                # if we reached at least one more node this round
                # than last one, we assume there is still
                # something left to reach and we continue

                unused_checkpoints = self._add_unused_end_checkpoints(
                    set(active_trackers.keys()), unused_checkpoints,
                    used_checkpoints)
                active_trackers = self._filter_active_trackers(
                    active_trackers, unused_checkpoints)
                num_active_trackers = self._count_trackers(active_trackers)

                everything_reachable_is_reached = (unused_checkpoints
                                                   == previous_unused
                                                   or num_active_trackers == 0)
                previous_unused = unused_checkpoints

                if everything_reachable_is_reached:
                    # should happen only once

                    previous_unused -= used_checkpoints
                    # add trackers with unused checkpoints
                    # to finished trackers
                    for start_name in previous_unused:
                        finished_trackers.extend(active_trackers[start_name])

                    logger.debug("Data generation rounds finished.")
                    logger.debug("Found {} unused checkpoints".format(
                        len(previous_unused)))
                    phase = 0
                else:
                    logger.debug("Found {} unused checkpoints "
                                 "in current phase."
                                 "".format(len(unused_checkpoints)))
                    logger.debug("Found {} active trackers "
                                 "for these checkpoints."
                                 "".format(num_active_trackers))

            if everything_reachable_is_reached:
                # augmentation round, so we process only
                # story end checkpoints
                # reset used checkpoints
                used_checkpoints = set()  # type: Set[Text]

                # generate active trackers for augmentation
                active_trackers = self._create_start_trackers_for_augmentation(
                    story_end_trackers)

        finished_trackers.extend(story_end_trackers)
        self._issue_unused_checkpoint_notification(previous_unused)
        logger.debug("Found {} training trackers.".format(
            len(finished_trackers)))

        if self.config.augmentation_factor > 0:
            augmented_trackers, original_trackers = [], []
            for t in finished_trackers:
                if t.is_augmented:
                    augmented_trackers.append(t)
                else:
                    original_trackers.append(t)
            augmented_trackers = self._subsample_trackers(
                augmented_trackers,
                self.config.max_number_of_augmented_trackers)
            logger.debug("Subsampled to {} augmented training trackers."
                         "".format(len(augmented_trackers)))
            logger.debug("There are {} original trackers.".format(
                len(original_trackers)))
            finished_trackers = original_trackers + augmented_trackers

        return finished_trackers
Exemple #7
0
    def fit(
        self,
        model_data: RasaModelData,
        epochs: int,
        batch_size: Union[List[int], int],
        evaluate_on_num_examples: int,
        evaluate_every_num_epochs: int,
        batch_strategy: Text,
        silent: bool = False,
        eager: bool = False,
    ) -> None:
        """Fit model data"""

        tf.random.set_seed(self.random_seed)
        np.random.seed(self.random_seed)

        disable = silent or is_logging_disabled()

        evaluation_model_data = None
        if evaluate_on_num_examples > 0:
            if not disable:
                logger.info(
                    f"Validation accuracy is calculated every "
                    f"{evaluate_every_num_epochs} epochs."
                )

            model_data, evaluation_model_data = model_data.split(
                evaluate_on_num_examples, self.random_seed
            )

        (
            train_dataset_function,
            tf_train_on_batch_function,
        ) = self._get_tf_train_functions(eager, model_data, batch_strategy)
        (
            evaluation_dataset_function,
            tf_evaluation_on_batch_function,
        ) = self._get_tf_evaluation_functions(eager, evaluation_model_data)

        val_results = {}  # validation is not performed every epoch
        progress_bar = tqdm(range(epochs), desc="Epochs", disable=disable)

        training_steps = 0

        for epoch in progress_bar:
            epoch_batch_size = self.linearly_increasing_batch_size(
                epoch, batch_size, epochs
            )

            training_steps = self._batch_loop(
                train_dataset_function,
                tf_train_on_batch_function,
                epoch_batch_size,
                True,
                training_steps,
                self.train_summary_writer,
            )

            if self.tensorboard_log_on_epochs:
                self._log_metrics_for_tensorboard(epoch, self.train_summary_writer)

            postfix_dict = self._get_metric_results()

            if evaluate_on_num_examples > 0:
                if self._should_evaluate(evaluate_every_num_epochs, epochs, epoch):
                    self._batch_loop(
                        evaluation_dataset_function,
                        tf_evaluation_on_batch_function,
                        epoch_batch_size,
                        False,
                        training_steps,
                        self.test_summary_writer,
                    )

                    if self.tensorboard_log_on_epochs:
                        self._log_metrics_for_tensorboard(
                            epoch, self.test_summary_writer
                        )

                    val_results = self._get_metric_results(prefix="val_")

                postfix_dict.update(val_results)

            progress_bar.set_postfix(postfix_dict)

        if self.model_summary_file is not None:
            self._write_model_summary()

        self._training = None  # training phase should be defined when building a graph
        if not disable:
            logger.info("Finished training.")
    def _train_tf(
        self,
        X: np.ndarray,
        Y: np.ndarray,
        intents_for_X: np.ndarray,
        loss: "Tensor",
        is_training: "Tensor",
        train_op: "Tensor",
    ) -> None:
        """Train tf graph"""

        self.session.run(tf.global_variables_initializer())

        if self.evaluate_on_num_examples:
            logger.info("Accuracy is updated every {} epochs"
                        "".format(self.evaluate_every_num_epochs))

        pbar = tqdm(range(self.epochs),
                    desc="Epochs",
                    disable=is_logging_disabled())
        train_acc = 0
        last_loss = 0
        for ep in pbar:
            indices = np.random.permutation(len(X))

            batch_size = self._linearly_increasing_batch_size(ep)
            batches_per_epoch = len(X) // batch_size + int(
                len(X) % batch_size > 0)

            ep_loss = 0
            for i in range(batches_per_epoch):
                end_idx = (i + 1) * batch_size
                start_idx = i * batch_size
                batch_a = X[indices[start_idx:end_idx]]
                batch_pos_b = Y[indices[start_idx:end_idx]]
                intents_for_b = intents_for_X[indices[start_idx:end_idx]]
                # add negatives
                batch_b = self._create_batch_b(batch_pos_b, intents_for_b)

                sess_out = self.session.run(
                    {
                        "loss": loss,
                        "train_op": train_op
                    },
                    feed_dict={
                        self.a_in: batch_a,
                        self.b_in: batch_b,
                        is_training: True,
                    },
                )
                ep_loss += sess_out.get("loss") / batches_per_epoch

            if self.evaluate_on_num_examples:
                if (ep == 0 or (ep + 1) % self.evaluate_every_num_epochs == 0
                        or (ep + 1) == self.epochs):
                    train_acc = self._output_training_stat(
                        X, intents_for_X, is_training)
                    last_loss = ep_loss

                pbar.set_postfix({
                    "loss": "{:.3f}".format(ep_loss),
                    "acc": "{:.3f}".format(train_acc),
                })
            else:
                pbar.set_postfix({"loss": "{:.3f}".format(ep_loss)})

        if self.evaluate_on_num_examples:
            logger.info("Finished training embedding classifier, "
                        "loss={:.3f}, train accuracy={:.3f}"
                        "".format(last_loss, train_acc))
Exemple #9
0
def train_tf_dataset(
    train_init_op: "tf.Operation",
    eval_init_op: "tf.Operation",
    batch_size_in: "tf.Tensor",
    loss: "tf.Tensor",
    acc: "tf.Tensor",
    train_op: "tf.Tensor",
    session: "tf.Session",
    is_training: "tf.Session",
    epochs: int,
    batch_size: Union[List[int], int],
    evaluate_on_num_examples: int,
    evaluate_every_num_epochs: int,
) -> None:
    """Train tf graph"""

    session.run(tf.global_variables_initializer())

    if evaluate_on_num_examples:
        logger.info("Validation accuracy is calculated every {} epochs"
                    "".format(evaluate_every_num_epochs))
    pbar = tqdm(range(epochs), desc="Epochs", disable=is_logging_disabled())

    train_loss = 0
    train_acc = 0
    val_loss = 0
    val_acc = 0
    for ep in pbar:

        ep_batch_size = linearly_increasing_batch_size(ep, batch_size, epochs)

        session.run(train_init_op, feed_dict={batch_size_in: ep_batch_size})

        ep_train_loss = 0
        ep_train_acc = 0
        batches_per_epoch = 0
        while True:
            try:
                _, batch_train_loss, batch_train_acc = session.run(
                    [train_op, loss, acc], feed_dict={is_training: True})
                batches_per_epoch += 1
                ep_train_loss += batch_train_loss
                ep_train_acc += batch_train_acc

            except tf.errors.OutOfRangeError:
                break

        train_loss = ep_train_loss / batches_per_epoch
        train_acc = ep_train_acc / batches_per_epoch

        postfix_dict = {
            "loss": "{:.3f}".format(train_loss),
            "acc": "{:.3f}".format(train_acc),
        }

        if eval_init_op is not None:
            if (ep + 1) % evaluate_every_num_epochs == 0 or (ep + 1) == epochs:
                val_loss, val_acc = output_validation_stat(
                    eval_init_op,
                    loss,
                    acc,
                    session,
                    is_training,
                    batch_size_in,
                    ep_batch_size,
                )

            postfix_dict.update({
                "val_loss": "{:.3f}".format(val_loss),
                "val_acc": "{:.3f}".format(val_acc),
            })

        pbar.set_postfix(postfix_dict)

    final_message = ("Finished training embedding policy, "
                     "train loss={:.3f}, train accuracy={:.3f}"
                     "".format(train_loss, train_acc))
    if eval_init_op is not None:
        final_message += (
            ", validation loss={:.3f}, validation accuracy={:.3f}"
            "".format(val_loss, val_acc))
    logger.info(final_message)
    def training_states_and_actions(
        self, trackers: List[DialogueStateTracker], domain: Domain
    ) -> Tuple[List[List[State]], List[List[Text]]]:
        """Transforms list of trackers to lists of states and actions.

        Training data is padded up to the length of the longest dialogue with -1.

        Args:
            trackers: The trackers to transform
            domain: The domain

        Returns:
            A tuple of list of states and list of actions.
        """

        trackers_as_states = []
        trackers_as_actions = []

        # from multiple states that create equal featurizations
        # we only need to keep one.
        hashed_examples = set()

        logger.debug(
            "Creating states and action examples from "
            "collected trackers (by {}({}))..."
            "".format(type(self).__name__, type(self.state_featurizer).__name__)
        )
        pbar = tqdm(
            trackers,
            desc="Processed trackers",
            disable=common_utils.is_logging_disabled(),
        )
        for tracker in pbar:
            states = self._create_states(tracker, domain)

            states_length_for_action = 0
            for event in tracker.applied_events():
                if not isinstance(event, ActionExecuted):
                    continue

                states_length_for_action += 1

                # use only actions which can be predicted at a stories start
                if event.unpredictable:
                    continue

                sliced_states = self.slice_state_history(
                    states[:states_length_for_action], self.max_history
                )
                if self.remove_duplicates:
                    hashed = self._hash_example(
                        sliced_states, event.action_name or event.action_text, tracker
                    )

                    # only continue with tracker_states that created a
                    # hashed_featurization we haven't observed
                    if hashed not in hashed_examples:
                        hashed_examples.add(hashed)
                        trackers_as_states.append(sliced_states)
                        trackers_as_actions.append(
                            [event.action_name or event.action_text]
                        )
                else:
                    trackers_as_states.append(sliced_states)
                    trackers_as_actions.append([event.action_name or event.action_text])

                pbar.set_postfix({"# actions": "{:d}".format(len(trackers_as_actions))})

        logger.debug("Created {} action examples.".format(len(trackers_as_actions)))

        return trackers_as_states, trackers_as_actions
Exemple #11
0
    def train(self, training_data, config, **kwargs):
        # type: (TrainingData, Optional[rasaNLUModelConfig], **Any) -> None
        """Train the embedding intent classifier on a data set."""

        intent_dict = self._create_intent_dict(training_data)

        if len(intent_dict) < 2:
            logger.error("Can not train an intent classifier. "
                         "Need at least 2 different classes. "
                         "Skipping training of intent classifier.")
            return

        self.inv_intent_dict = {v: k for k, v in intent_dict.items()}
        self.encoded_all_intents = self._create_encoded_intents(intent_dict)

        X, Y, intents_for_X = self._prepare_data_for_training(
            training_data, intent_dict)

        num_classes = len(intent_dict)

        self.graph = tf.Graph()
        with self.graph.as_default():

            self.a_in = tf.placeholder(tf.float32, (None, X.shape[-1]),
                                       name='a')
            self.b_in = tf.placeholder(tf.float32, (None, Y.shape[-1]),
                                       name='b')

            is_training = tf.placeholder_with_default(False, shape=())

            self.drop_out = tf.placeholder(tf.float32, (), name='drop_out')

            # Create a graph for training
            logits_train = conv_net(self.a_in,
                                    num_classes,
                                    self.num_hidden_layers,
                                    self.hidden_layer_size,
                                    self.C2,
                                    self.drop_out,
                                    is_training=True)

            # Define loss and optimizer
            # (with train logits, for dropout to take effect)
            loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=logits_train,
                    labels=self.b_in)) + tf.losses.get_regularization_loss()

            self.y_predict = tf.nn.softmax(logits_train)

            train_op = tf.train.AdamOptimizer(
                learning_rate=self.learning_rate).minimize(loss)

            # train tensorflow graph
            config_proto = self.get_config_proto(self.component_config)
            self.session = tf.Session(graph=self.graph, config=config_proto)
            self.session.run(tf.global_variables_initializer())

            pbar = tqdm(range(self.epochs),
                        desc="Epochs",
                        disable=is_logging_disabled())
            train_acc = 0
            last_loss = 0
            for ep in pbar:
                indices = np.random.permutation(len(X))

                batch_size = self.batch_size
                batches_per_epoch = (len(X) // batch_size +
                                     int(len(X) % batch_size > 0))

                ep_loss = 0
                for i in range(batches_per_epoch):
                    end_idx = (i + 1) * batch_size
                    start_idx = i * batch_size
                    batch_a = X[indices[start_idx:end_idx]]
                    batch_b = Y[indices[start_idx:end_idx]]

                    sess_out = self.session.run(
                        {
                            'loss': loss,
                            'train_op': train_op
                        },
                        feed_dict={
                            self.a_in: batch_a,
                            self.b_in: batch_b,
                            is_training: True,
                            self.drop_out: self.droprate
                        })

                    ep_loss += sess_out.get('loss') / batches_per_epoch

                if self.evaluate_on_num_examples:
                    if (ep == 0
                            or (ep + 1) % self.evaluate_every_num_epochs == 0
                            or (ep + 1) == self.epochs):
                        train_acc = self._output_training_stat(
                            X, intents_for_X, is_training)
                        last_loss = ep_loss

                        pbar.set_postfix({
                            "loss": "{:.3f}".format(ep_loss),
                            "acc": "{:.3f}".format(train_acc)
                        })
                else:
                    pbar.set_postfix({"loss": "{:.3f}".format(ep_loss)})