def do(self, *args):
        init_ = next(self.main_loop.epoch_iterator)["features"][
            0: self.text_length, 0:1]
        # time x batch
        whole_sentence_code = init_
        vocab = get_character(self.dataset)
        # whole_sentence
        whole_sentence = ''
        for char in vocab[whole_sentence_code[:, 0]]:
            whole_sentence += char

        last_output_in = self.generate_in(init_)
        last_output_out = self.generate_out(init_)
        last_output_forget = self.generate_forget(init_)
        layers = len(last_output_in)

        time = last_output_in[0].shape[0]

        for i in range(layers):
            plt.subplot(3, layers, i * 3)
            plt.plot(np.arange(time), last_output_in[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_in[i][:, 0, 1])
            plt.subplot(3, layers, i * 3 + 1)
            plt.plot(np.arange(time), last_output_out[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_out[i][:, 0, 1])
            plt.subplot(3, layers, i * 3 + 2)
            plt.plot(np.arange(time), last_output_forget[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_forget[i][:, 0, 1])
        plt.show()
    def do(self, *args):
        init_ = next(
            self.main_loop.epoch_iterator)["features"][0:self.text_length, 0:1]
        # time x batch
        whole_sentence_code = init_
        vocab = get_character(self.dataset)
        # whole_sentence
        whole_sentence = ''
        for char in vocab[whole_sentence_code[:, 0]]:
            whole_sentence += char

        last_output_in = self.generate_in(init_)
        last_output_out = self.generate_out(init_)
        last_output_forget = self.generate_forget(init_)
        layers = len(last_output_in)

        time = last_output_in[0].shape[0]

        for i in range(layers):
            plt.subplot(3, layers, i * 3)
            plt.plot(np.arange(time), last_output_in[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_in[i][:, 0, 1])
            plt.subplot(3, layers, i * 3 + 1)
            plt.plot(np.arange(time), last_output_out[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_out[i][:, 0, 1])
            plt.subplot(3, layers, i * 3 + 2)
            plt.plot(np.arange(time), last_output_forget[i][:, 0, 0])
            plt.plot(np.arange(time), last_output_forget[i][:, 0, 1])
        plt.show()
    def do(self, *args):

        # init is TIME X 1
        # This is because in interactive mode,
        # self.main_loop.epoch_iterator is not accessible.
        if self.interactive_mode:
            # TEMPORARY HACK
            it = self.main_loop.data_stream.get_epoch_iterator()
            init_ = next(
                it)[0][
                0: self.initial_text_length,
                0:1]
        else:
            init_ = next(
                self.main_loop.epoch_iterator)["features"][
                0: self.initial_text_length,
                0:1]
        inputs_ = init_
        all_output_probabilities = []
        logger.info("\nGeneration:")
        for i in range(self.generation_length):
            # time x batch x features (1 x 1 x vocab_size)
            last_output = self.generate(inputs_)[0][-1:, :, :]
            # time x features (1 x vocab_size) '0' is for removing one dim
            last_output_probabilities = softmax(last_output[0])
            all_output_probabilities += [last_output_probabilities]
            # 1 x 1
            if self.softmax_sampling == 'argmax':
                argmax = True
            else:
                argmax = False
            last_output_sample = sample(last_output_probabilities, argmax)
            inputs_ = np.vstack([inputs_, last_output_sample])
        # time x batch
        whole_sentence_code = inputs_
        vocab = get_character(self.dataset)
        # whole_sentence
        whole_sentence = ''
        for char in vocab[whole_sentence_code[:, 0]]:
            whole_sentence += char
        logger.info(whole_sentence[:init_.shape[0]] + ' ...')
        logger.info(whole_sentence)

        if self.ploting_path is not None:
            all_output_probabilities_array = np.zeros(
                (self.generation_length, all_output_probabilities[0].shape[1]))
            for i, output_probabilities in enumerate(all_output_probabilities):
                all_output_probabilities_array[i] = output_probabilities
            probability_plot(all_output_probabilities_array,
                             whole_sentence[init_.shape[0]:],
                             vocab, self.ploting_path)
    def do(self, *args):

        # init is TIME X 1
        # This is because in interactive mode,
        # self.main_loop.epoch_iterator is not accessible.
        if self.interactive_mode:
            # TEMPORARY HACK
            it = self.main_loop.data_stream.get_epoch_iterator()
            init_ = next(it)[0][0:self.initial_text_length, 0:1]
        else:
            init_ = next(self.main_loop.epoch_iterator)["features"][
                0:self.initial_text_length, 0:1]
        inputs_ = init_
        all_output_probabilities = []
        logger.info("\nGeneration:")
        for i in range(self.generation_length):
            # time x batch x features (1 x 1 x vocab_size)
            last_output = self.generate(inputs_)[0][-1:, :, :]
            # time x features (1 x vocab_size) '0' is for removing one dim
            last_output_probabilities = softmax(last_output[0])
            all_output_probabilities += [last_output_probabilities]
            # 1 x 1
            if self.softmax_sampling == 'argmax':
                argmax = True
            else:
                argmax = False
            last_output_sample = sample(last_output_probabilities, argmax)
            inputs_ = np.vstack([inputs_, last_output_sample])
        # time x batch
        whole_sentence_code = inputs_
        vocab = get_character(self.dataset)
        # whole_sentence
        whole_sentence = ''
        for char in vocab[whole_sentence_code[:, 0]]:
            whole_sentence += char
        logger.info(whole_sentence[:init_.shape[0]] + ' ...')
        logger.info(whole_sentence)

        if self.ploting_path is not None:
            all_output_probabilities_array = np.zeros(
                (self.generation_length, all_output_probabilities[0].shape[1]))
            for i, output_probabilities in enumerate(all_output_probabilities):
                all_output_probabilities_array[i] = output_probabilities
            probability_plot(all_output_probabilities_array,
                             whole_sentence[init_.shape[0]:], vocab,
                             self.ploting_path)