def __init__(self, **kwargs):
        super().__init__(include_accuracy=True, **kwargs)

        # Define encoder
        with tf.variable_scope('encoder'):
            cell_encoder = rnn_cell(num_units=self.config.num_units,
                                    num_cells=self.config.num_cells,
                                    keep_prob=self.keep_prob_ph,
                                    add_dropout=self.add_dropout)
            # self.encoder = BidirectionalEncoder(
            #     cell=cell_encoder,
            #     parallel_iterations=self.parallel_iterations,
            #     swap_memory=self.swap_memory
            # )

            self.encoder = Char2WordEncoder(
                cell=cell_encoder,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)

        # Define decoder
        with tf.variable_scope('decoder'):
            cell_decoder = rnn_cell(
                num_units=2 *
                self.config.num_units,  # Because we use bidirectional encoder
                num_cells=self.config.num_cells,
                keep_prob=self.keep_prob_ph,
                add_dropout=self.add_dropout)
            self.decoder = AttentionDecoder(
                cell=cell_decoder,
                max_iterations=self.config.max_dec_seq_length + 1,
                infer_token_prob=self.infer_token_prob,
                use_scheduled_sampling=self.config.scheduled_sampling,
                alphabet=self.alphabet,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)

        # Inputs for the one-step-at-a-time decoding
        self.decoder_inputs = tf.placeholder(tf.int32,
                                             shape=[None],
                                             name='decoder_inputs')
        self.decoder_state = tf.placeholder(
            tf.float32,
            shape=[None, 2, self.config.num_cells, 2 * self.config.num_units],
            name='decoder_state')
        self.decoder_attention = tf.placeholder(
            tf.float32,
            shape=[None, 2 * self.config.num_units],
            name='decoder_attention')

        if not self.prediction_mode:
            self.mixed_loss_maintainer = MixedLossMaintainer(
                epochs_filename=self.config.epochs_mixed_filename,
                verbose=False)
Ejemplo n.º 2
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Define decoder
        with tf.variable_scope('decoder'):
            cell_decoder = rnn_cell(num_units=self.config.num_units,
                                    num_cells=self.config.num_cells,
                                    keep_prob=self.keep_prob_ph,
                                    add_dropout=self.add_dropout)

            self.decoder = Decoder(
                cell=cell_decoder,
                max_iterations=self.config.max_dec_seq_length + 1,
                infer_token_prob=self.infer_token_prob,
                alphabet=self.alphabet,
                use_scheduled_sampling=False,
                check_seq_lengths=True,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)

        # Inputs for the one-step-at-a-time decoding
        self.decoder_inputs = tf.placeholder(tf.int32,
                                             shape=[None],
                                             name='decoder_inputs')
        self.decoder_state = tf.placeholder(
            tf.float32,
            shape=[None, 2, self.config.num_cells, self.config.num_units],
            name='decoder_state')
    def __init__(self, initial_state_attention=False, **kwargs):
        # Parameters
        self.initial_state_attention = initial_state_attention

        RNN.__init__(self, **kwargs)

        # Define encoder
        with tf.variable_scope('encoder'):
            cell_encoder = rnn_cell(num_units=self.config.num_units,
                                    num_cells=self.config.num_cells,
                                    keep_prob=self.keep_prob_ph,
                                    add_dropout=self.add_dropout)
            self.encoder = Char2WordEncoder(
                cell=cell_encoder,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)

        # Define decoder
        with tf.variable_scope('decoder'):
            cell_decoder = rnn_cell(
                num_units=2 *
                self.config.num_units,  # Because we use bidirectional encoder
                num_cells=self.config.num_cells,
                keep_prob=self.keep_prob_ph,
                add_dropout=self.add_dropout)
            self.decoder = AttentionDecoder(
                cell=cell_decoder,
                max_iterations=self.config.max_dec_seq_length + 1,
                infer_token_prob=self.infer_token_prob,
                use_scheduled_sampling=self.config.scheduled_sampling,
                alphabet=self.alphabet,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)

        # Inputs for the one-step-at-a-time decoding
        self.decoder_inputs = tf.placeholder(tf.int32,
                                             shape=[None],
                                             name='decoder_inputs')
        self.decoder_state = tf.placeholder(
            tf.float32,
            shape=[None, 2, self.config.num_cells, 2 * self.config.num_units],
            name='decoder_state')
        self.decoder_attention = tf.placeholder(
            tf.float32,
            shape=[None, 2 * self.config.num_units],
            name='decoder_attention')
    def __init__(self, **kwargs):
        RNN.__init__(self, include_accuracy=True, **kwargs)

        # Define encoder
        with tf.variable_scope('encoder'):
            cell_encoder = rnn_cell(num_units=self.config.num_units,
                                    num_cells=self.config.num_cells,
                                    keep_prob=self.keep_prob_ph,
                                    add_dropout=self.add_dropout)

            self.encoder = BidirectionalEncoder(
                cell=cell_encoder,
                parallel_iterations=self.parallel_iterations,
                swap_memory=self.swap_memory)