コード例 #1
0
    def __init__(self, config):
        super(HRED, self).__init__()

        self.config = config
        self.encoder = layers.EncoderRNN(config.vocab_size,
                                         config.embedding_size,
                                         config.encoder_hidden_size,
                                         config.rnn,
                                         config.num_layers,
                                         config.bidirectional,
                                         config.dropout)

        context_input_size = (config.num_layers
                              * config.encoder_hidden_size
                              * self.encoder.num_directions)

        if config.context_input_only:
            context_input_size += (config.emo_output_size \
                                   + config.infersent_output_size)

        self.context_encoder = layers.ContextRNN(context_input_size,
                                                 config.context_size,
                                                 config.rnn,
                                                 config.num_layers,
                                                 config.dropout)

        self.decoder = layers.DecoderRNN(config.vocab_size,
                                         config.embedding_size,
                                         config.decoder_hidden_size,
                                         config.rnncell,
                                         config.num_layers,
                                         config.dropout,
                                         config.word_drop,
                                         config.max_unroll,
                                         config.sample,
                                         config.temperature,
                                         config.beam_size)

        self.context2decoder = layers.FeedForward(config.context_size,
                                                  config.num_layers * config.decoder_hidden_size,
                                                  num_layers=1,
                                                  activation=config.activation)

        if config.tie_embedding:
            self.decoder.embedding = self.encoder.embedding
        
        if config.emotion:
            self.context2emoji = layers.FeedForward(
                config.context_size, config.emo_output_size,
                num_layers=config.emo_num_layers,
                hidden_size=config.emo_embedding_size,
                activation=config.emo_activation)

        if config.infersent:
            self.context2infersent = layers.FeedForward(
                config.context_size, config.infersent_output_size,
                num_layers=config.infersent_num_layers,
                hidden_size=config.infersent_embedding_size,
                activation=config.infersent_activation,
                no_activation_last_layer=True)
コード例 #2
0
    def __init__(self, config):
        super(VHCR, self).__init__()

        self.config = config
        self.encoder = layers.EncoderRNN(config.vocab_size,
                                         config.embedding_size,
                                         config.encoder_hidden_size,
                                         config.rnn,
                                         config.num_layers,
                                         config.bidirectional,
                                         config.dropout)

        context_inference_size = (config.num_layers
                              * config.encoder_hidden_size
                              * self.encoder.num_directions)
        posterior_input_size = (config.num_layers 
                                * config.encoder_hidden_size 
                                * self.encoder.num_directions 
                                + config.context_size
                                + config.z_conv_size)
                                
        if config.context_input_only:
            context_inference_size += (config.emo_output_size
                                       + config.infersent_output_size)
            posterior_input_size += (config.emo_output_size 
                                     + config.infersent_output_size)
        
        context_encoder_size = context_inference_size + config.z_conv_size

        self.context_encoder = layers.ContextRNN(context_encoder_size,
                                                 config.context_size,
                                                 config.rnn,
                                                 config.num_layers,
                                                 config.dropout)

        self.unk_sent = nn.Parameter(torch.randn(context_encoder_size - config.z_conv_size))

        self.z_conv2context = layers.FeedForward(config.z_conv_size,
                                                 config.num_layers * config.context_size,
                                                 num_layers=1,
                                                 activation=config.activation)

        self.context_inference = layers.ContextRNN(context_inference_size,
                                                   config.context_size,
                                                   config.rnn,
                                                   config.num_layers,
                                                   config.dropout,
                                                   bidirectional=True)

        self.decoder = layers.DecoderRNN(config.vocab_size,
                                        config.embedding_size,
                                        config.decoder_hidden_size,
                                        config.rnncell,
                                        config.num_layers,
                                        config.dropout,
                                        config.word_drop,
                                        config.max_unroll,
                                        config.sample,
                                        config.temperature,
                                        config.beam_size)

        self.context2decoder = layers.FeedForward(
            config.context_size + config.z_sent_size + config.z_conv_size,
            config.num_layers * config.decoder_hidden_size,
            num_layers=1,
            activation=config.activation)

        self.softplus = nn.Softplus()

        self.conv_posterior_h = layers.FeedForward(
            config.num_layers * self.context_inference.num_directions * config.context_size,
            config.context_size,
            num_layers=2,
            hidden_size=config.context_size,
            activation=config.activation)
        self.conv_posterior_mu = nn.Linear(config.context_size,
                                            config.z_conv_size)
        self.conv_posterior_var = nn.Linear(config.context_size,
                                             config.z_conv_size)

        self.sent_prior_h = layers.FeedForward(config.context_size + config.z_conv_size,
                                               config.context_size,
                                               num_layers=1,
                                               hidden_size=config.z_sent_size,
                                               activation=config.activation)
        self.sent_prior_mu = nn.Linear(config.context_size,
                                       config.z_sent_size)
        self.sent_prior_var = nn.Linear(config.context_size,
                                        config.z_sent_size)

        self.sent_posterior_h = layers.FeedForward(
            posterior_input_size,
            config.context_size,
            num_layers=2,
            hidden_size=config.context_size,
            activation=config.activation)
        self.sent_posterior_mu = nn.Linear(config.context_size,
                                           config.z_sent_size)
        self.sent_posterior_var = nn.Linear(config.context_size,
                                            config.z_sent_size)

        if config.tie_embedding:
            self.decoder.embedding = self.encoder.embedding

        if config.emotion:
            self.context2emoji = layers.FeedForward(
                config.context_size, config.emo_output_size,
                num_layers=config.emo_num_layers,
                hidden_size=config.emo_embedding_size,
                activation=config.emo_activation)

        if config.infersent:
            self.context2infersent = layers.FeedForward(
                config.context_size, config.infersent_output_size,
                num_layers=config.infersent_num_layers,
                hidden_size=config.infersent_embedding_size,
                activation=config.infersent_activation,
                no_activation_last_layer=True)