Exemple #1
0
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'),
                      'target': T.tensor3('target')}
        self.exprs['inpt'].tag.test_value = np.zeros((5, 2, self.n_inpt)
            ).astype(theano.config.floatX)
        self.exprs['target'].tag.test_value = np.zeros((5, 2, self.n_output)
            ).astype(theano.config.floatX)

        if self.imp_weight:
            self.exprs['imp_weight'] = T.tensor3('imp_weight')
            self.exprs['imp_weight'].tag.test_value = np.zeros(
                (5, 2, self.n_output)).astype(theano.config.floatX)


        P = self.parameters
        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]
        initial_hidden_means = [getattr(P, 'initial_hidden_means_%i' % i)
                                for i in range(n_layers)]
        initial_hidden_vars = [getattr(P, 'initial_hidden_vars_%i' % i) ** 2 + 1e-4
                               for i in range(n_layers)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]

        if self.skip_to_out:
            skip_to_outs = [getattr(P, 'hidden_%i_to_out' % i)
                            for i in range(n_layers)]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt]
                      + self.p_dropout_hiddens
                      + [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(varprop_rnn.exprs(
            self.exprs['inpt'], inpt_var, P.in_to_hidden, hidden_to_hiddens,
            P.hidden_to_out, hidden_biases,
            hidden_var_scales_sqrt, initial_hidden_means, initial_hidden_vars,
            recurrents, P.out_bias, out_var_scale_sqrt,
            self.hidden_transfers, self.out_transfer,
            in_to_out=in_to_out, skip_to_outs=skip_to_outs,
            p_dropouts=p_dropouts, hotk_inpt=False))


        imp_weight = False if not self.imp_weight else self.exprs['imp_weight']
        self.exprs.update(varprop_supervised_loss(
            self.exprs['target'], self.exprs['output'],
            self.loss, 2, imp_weight=imp_weight))
Exemple #2
0
    def _recog_exprs(self, inpt):
        """Return the exprssions of the recognition model."""
        P = self.parameters.recog

        n_layers = len(self.n_hiddens_recog)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_means = [
            getattr(P, 'initial_hidden_means_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_vars = [
            getattr(P, 'initial_hidden_vars_%i' % i)**2 + 1e-4
            for i in range(n_layers)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]

        p_dropouts = ([P.p_dropout.inpt] + P.p_dropout.hiddens +
                      [P.p_dropout.hidden_to_out])

        # Reparametrize to assert the rates lie in (0.01, 0.5).
        p_dropouts = [T.nnet.sigmoid(i) * 0.49 + 0.01 for i in p_dropouts]

        exprs = vprnn.exprs(inpt,
                            T.zeros_like(inpt),
                            P.in_to_hidden,
                            hidden_to_hiddens,
                            P.hidden_to_out,
                            hidden_biases, [1 for _ in hidden_biases],
                            initial_hidden_means,
                            initial_hidden_vars,
                            recurrents,
                            P.out_bias,
                            1,
                            self.recog_transfers,
                            self.assumptions.statify_latent,
                            p_dropouts=p_dropouts)

        # TODO also integrate variance!
        #to_shortcut = self.exprs['inpt']
        to_shortcut = exprs['hidden_mean_%i' % (n_layers - 1)]

        shortcut = T.concatenate(
            [T.zeros_like(to_shortcut[:1]), to_shortcut[:-1]])

        # Hic sunt dracones.
        # If we do not keep this line, Theano will die with a segfault.
        shortcut_empty = T.set_subtensor(
            T.zeros_like(shortcut)[:, :, :], shortcut)

        exprs['shortcut'] = shortcut_empty

        return exprs
Exemple #3
0
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'), 'target': T.tensor3('target')}
        P = self.parameters

        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]
        initial_hiddens = [
            getattr(P, 'initial_hiddens_%i' % i) for i in range(n_layers)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]

        if self.skip_to_out:
            skip_to_outs = [
                getattr(P, 'hidden_%i_to_out' % i) for i in range(n_layers)
            ]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt] + self.p_dropout_hiddens +
                      [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(
            varprop_rnn.exprs(self.exprs['inpt'],
                              inpt_var,
                              P.in_to_hidden,
                              hidden_to_hiddens,
                              P.hidden_to_out,
                              hidden_biases,
                              hidden_var_scales_sqrt,
                              initial_hiddens,
                              recurrents,
                              P.out_bias,
                              out_var_scale_sqrt,
                              self.hidden_transfers,
                              self.out_transfer,
                              in_to_out=in_to_out,
                              skip_to_outs=skip_to_outs,
                              p_dropouts=p_dropouts,
                              hotk_inpt=False))

        self.exprs.update(
            varprop_supervised_loss(self.exprs['target'], self.exprs['output'],
                                    self.loss, 2))
Exemple #4
0
    def _gen_exprs(self, inpt):
        """Return the exprssions of the recognition model."""
        P = self.parameters.gen

        n_layers = len(self.n_hiddens_gen)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_means = [
            getattr(P, 'initial_hidden_means_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_vars = [
            getattr(P, 'initial_hidden_vars_%i' % i)**2 + 1e-4
            for i in range(n_layers)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]

        p_dropout_inpt = T.zeros_like(inpt[:, :, :self.n_latent])
        p_dropout_inpt = T.fill(p_dropout_inpt, self.p_dropout_inpt)

        p_dropout_shortcut = T.zeros_like(inpt[:, :, self.n_latent:])
        p_dropout_shortcut = T.fill(p_dropout_shortcut, self.p_dropout_inpt)

        p_dropout_inpt = T.concatenate([p_dropout_inpt, p_dropout_shortcut],
                                       axis=2)

        p_dropouts = [p_dropout_inpt] + self.p_dropout_hiddens
        if self.p_dropout_hidden_to_out is None:
            p_dropouts.append(self.p_dropout_hiddens[-1])
        else:
            p_dropouts.append(self.p_dropout_hidden_to_out)

        exprs = vprnn.exprs(inpt,
                            T.zeros_like(inpt),
                            P.in_to_hidden,
                            hidden_to_hiddens,
                            P.hidden_to_out,
                            hidden_biases, [1 for _ in hidden_biases],
                            initial_hidden_means,
                            initial_hidden_vars,
                            recurrents,
                            P.out_bias,
                            1,
                            self.gen_transfers,
                            self.assumptions.statify_visible,
                            p_dropouts=p_dropouts)

        return exprs
Exemple #5
0
    def _recog_exprs(self, inpt):
        """Return the exprssions of the recognition model."""
        P = self.parameters.recog

        n_layers = len(self.n_hiddens_recog)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]
        initial_hidden_means = [getattr(P, 'initial_hidden_means_%i' % i)
                                for i in range(n_layers)]
        initial_hidden_vars = [getattr(P, 'initial_hidden_vars_%i' % i) ** 2 + 1e-4
                               for i in range(n_layers)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]

        p_dropouts = (
            [P.p_dropout.inpt] + P.p_dropout.hiddens
            + [P.p_dropout.hidden_to_out])

        # Reparametrize to assert the rates lie in (0.025, 1-0.025).
        p_dropouts = [T.nnet.sigmoid(i) * 0.49 + 0.01 for i in p_dropouts]

        exprs = vprnn.exprs(
            inpt, T.zeros_like(inpt), P.in_to_hidden, hidden_to_hiddens, P.hidden_to_out,
            hidden_biases, [1 for _ in hidden_biases],
            initial_hidden_means, initial_hidden_vars,
            recurrents,
            P.out_bias, 1, self.recog_transfers, self.assumptions.statify_latent,
            p_dropouts=p_dropouts)

        # TODO also integrate variance!
        #to_shortcut = self.exprs['inpt']
        to_shortcut = exprs['hidden_mean_%i' % (n_layers - 1)]

        shortcut = T.concatenate([T.zeros_like(to_shortcut[:1]),
                                  to_shortcut[:-1]])

        # Hic sunt dracones.
        # If we do not keep this line, Theano will die with a segfault.
        shortcut_empty = T.set_subtensor(T.zeros_like(shortcut)[:, :, :], shortcut)

        exprs['shortcut'] = shortcut_empty

        return exprs
Exemple #6
0
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'),
                      'target': T.tensor3('target')}
        P = self.parameters

        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]
        initial_hiddens = [getattr(P, 'initial_hiddens_%i' % i)
                           for i in range(n_layers)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]

        if self.skip_to_out:
            skip_to_outs = [getattr(P, 'hidden_%i_to_out' % i)
                            for i in range(n_layers)]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt]
                      + self.p_dropout_hiddens
                      + [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(varprop_rnn.exprs(
            self.exprs['inpt'], inpt_var, P.in_to_hidden, hidden_to_hiddens,
            P.hidden_to_out, hidden_biases,
            hidden_var_scales_sqrt, initial_hiddens,
            recurrents, P.out_bias, out_var_scale_sqrt,
            self.hidden_transfers, self.out_transfer,
            in_to_out=in_to_out, skip_to_outs=skip_to_outs,
            p_dropouts=p_dropouts, hotk_inpt=False))

        self.exprs.update(varprop_supervised_loss(
            self.exprs['target'], self.exprs['output'], self.loss, 2))
Exemple #7
0
    def _gen_exprs(self, inpt):
        """Return the exprssions of the recognition model."""
        P = self.parameters.gen

        n_layers = len(self.n_hiddens_gen)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]
        initial_hidden_means = [getattr(P, 'initial_hidden_means_%i' % i)
                                for i in range(n_layers)]
        initial_hidden_vars = [getattr(P, 'initial_hidden_vars_%i' % i)
                               for i in range(n_layers)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]

        shortcut_size = self.n_hiddens_recog[-1]
        p_dropout_inpt = T.zeros_like(inpt[:, :, :self.n_latent])
        p_dropout_inpt = T.fill(p_dropout_inpt, self.p_dropout_inpt)

        p_dropout_shortcut = T.zeros_like(inpt[:, :, self.n_latent:])
        p_dropout_shortcut = T.fill(p_dropout_shortcut, self.p_dropout_inpt)

        p_dropout_inpt = T.concatenate([p_dropout_inpt, p_dropout_shortcut],
                                       axis=2)

        p_dropouts = [p_dropout_inpt] + self.p_dropout_hiddens
        if self.p_dropout_hidden_to_out is None:
            p_dropouts.append(self.p_dropout_hiddens[-1])
        else:
            p_dropouts.append(self.p_dropout_hidden_to_out)

        exprs = vprnn.exprs(
            inpt, T.zeros_like(inpt), P.in_to_hidden, hidden_to_hiddens, P.hidden_to_out,
            hidden_biases, [1 for _ in hidden_biases],
            initial_hidden_means, initial_hidden_vars,
            recurrents,
            P.out_bias, 1, self.gen_transfers, self.assumptions.statify_visible,
            p_dropouts=p_dropouts)

        return exprs
Exemple #8
0
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'), 'target': T.tensor3('target')}
        self.exprs['inpt'].tag.test_value = np.zeros(
            (5, 2, self.n_inpt)).astype(theano.config.floatX)
        self.exprs['target'].tag.test_value = np.zeros(
            (5, 2, self.n_output)).astype(theano.config.floatX)

        if self.imp_weight:
            self.exprs['imp_weight'] = T.tensor3('imp_weight')
            self.exprs['imp_weight'].tag.test_value = np.zeros(
                (5, 2, self.n_output)).astype(theano.config.floatX)

        P = self.parameters
        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]
        initial_hidden_means = [
            getattr(P, 'initial_hidden_means_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_vars = [
            getattr(P, 'initial_hidden_vars_%i' % i)**2 + 1e-4
            for i in range(n_layers)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]

        if self.skip_to_out:
            skip_to_outs = [
                getattr(P, 'hidden_%i_to_out' % i) for i in range(n_layers)
            ]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt] + self.p_dropout_hiddens +
                      [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(
            varprop_rnn.exprs(self.exprs['inpt'],
                              inpt_var,
                              P.in_to_hidden,
                              hidden_to_hiddens,
                              P.hidden_to_out,
                              hidden_biases,
                              hidden_var_scales_sqrt,
                              initial_hidden_means,
                              initial_hidden_vars,
                              recurrents,
                              P.out_bias,
                              out_var_scale_sqrt,
                              self.hidden_transfers,
                              self.out_transfer,
                              in_to_out=in_to_out,
                              skip_to_outs=skip_to_outs,
                              p_dropouts=p_dropouts,
                              hotk_inpt=False))

        imp_weight = False if not self.imp_weight else self.exprs['imp_weight']
        self.exprs.update(
            varprop_supervised_loss(self.exprs['target'],
                                    self.exprs['output'],
                                    self.loss,
                                    2,
                                    imp_weight=imp_weight))