示例#1
0
    def _init_exprs(self):
        self.exprs = {
            'inpt_mean': T.matrix('inpt_mean'),
            'target': T.matrix('target')
        }
        P = self.parameters

        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i)
            for i in range(len(self.n_hiddens) - 1)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i)
            for i in range(len(self.n_hiddens))
        ]
        inpt_var = T.zeros_like(self.exprs['inpt_mean']) + self.inpt_var

        self.exprs.update(
            varprop_mlp.exprs(self.exprs['inpt_mean'], inpt_var,
                              P.in_to_hidden, hidden_to_hiddens,
                              P.hidden_to_out, hidden_biases,
                              [1 for _ in hidden_biases], P.out_bias, 1,
                              self.hidden_transfers, self.out_transfer,
                              self.p_dropout_inpt, self.p_dropout_hiddens))

        self.exprs['inpt'] = self.exprs['inpt_mean']

        self.exprs.update(
            varprop_supervised_loss(self.exprs['target'], self.exprs['output'],
                                    self.loss))
示例#2
0
文件: mlp.py 项目: m0r17z/thesis
    def _init_exprs(self):
        self.exprs = {
            'inpt_mean': T.matrix('inpt_mean'),
            'target': T.matrix('target')}
        P = self.parameters

        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(len(self.n_hiddens) - 1)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(len(self.n_hiddens))]
        inpt_var = T.zeros_like(self.exprs['inpt_mean']) + self.inpt_var

        self.exprs.update(varprop_mlp.exprs(
            self.exprs['inpt_mean'], inpt_var, self.exprs['target'],
            P.in_to_hidden,
            hidden_to_hiddens,
            P.hidden_to_out,
            hidden_biases,
            [1 for _ in hidden_biases],
            P.out_bias,
            1,
            self.hidden_transfers, self.out_transfer,
            self.p_dropout_inpt, self.p_dropout_hiddens))

        self.exprs['inpt'] = self.exprs['inpt_mean']

        self.exprs.update(varprop_supervised_loss(
            self.exprs['target'], self.exprs['output'], self.loss))
示例#3
0
文件: rnn.py 项目: RuinCakeLie/breze
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'),
                      'target': T.tensor3('target')}
        self.exprs['inpt'].tag.test_value = np.zeros((5, 2, self.n_inpt)
            ).astype(theano.config.floatX)
        self.exprs['target'].tag.test_value = np.zeros((5, 2, self.n_output)
            ).astype(theano.config.floatX)

        if self.imp_weight:
            self.exprs['imp_weight'] = T.tensor3('imp_weight')
            self.exprs['imp_weight'].tag.test_value = np.zeros(
                (5, 2, self.n_output)).astype(theano.config.floatX)


        P = self.parameters
        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]
        initial_hidden_means = [getattr(P, 'initial_hidden_means_%i' % i)
                                for i in range(n_layers)]
        initial_hidden_vars = [getattr(P, 'initial_hidden_vars_%i' % i) ** 2 + 1e-4
                               for i in range(n_layers)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]

        if self.skip_to_out:
            skip_to_outs = [getattr(P, 'hidden_%i_to_out' % i)
                            for i in range(n_layers)]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt]
                      + self.p_dropout_hiddens
                      + [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(varprop_rnn.exprs(
            self.exprs['inpt'], inpt_var, P.in_to_hidden, hidden_to_hiddens,
            P.hidden_to_out, hidden_biases,
            hidden_var_scales_sqrt, initial_hidden_means, initial_hidden_vars,
            recurrents, P.out_bias, out_var_scale_sqrt,
            self.hidden_transfers, self.out_transfer,
            in_to_out=in_to_out, skip_to_outs=skip_to_outs,
            p_dropouts=p_dropouts, hotk_inpt=False))


        imp_weight = False if not self.imp_weight else self.exprs['imp_weight']
        self.exprs.update(varprop_supervised_loss(
            self.exprs['target'], self.exprs['output'],
            self.loss, 2, imp_weight=imp_weight))
示例#4
0
文件: rnn.py 项目: osdf/breze
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'), 'target': T.tensor3('target')}
        P = self.parameters

        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]
        initial_hiddens = [
            getattr(P, 'initial_hiddens_%i' % i) for i in range(n_layers)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]

        if self.skip_to_out:
            skip_to_outs = [
                getattr(P, 'hidden_%i_to_out' % i) for i in range(n_layers)
            ]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt] + self.p_dropout_hiddens +
                      [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(
            varprop_rnn.exprs(self.exprs['inpt'],
                              inpt_var,
                              P.in_to_hidden,
                              hidden_to_hiddens,
                              P.hidden_to_out,
                              hidden_biases,
                              hidden_var_scales_sqrt,
                              initial_hiddens,
                              recurrents,
                              P.out_bias,
                              out_var_scale_sqrt,
                              self.hidden_transfers,
                              self.out_transfer,
                              in_to_out=in_to_out,
                              skip_to_outs=skip_to_outs,
                              p_dropouts=p_dropouts,
                              hotk_inpt=False))

        self.exprs.update(
            varprop_supervised_loss(self.exprs['target'], self.exprs['output'],
                                    self.loss, 2))
示例#5
0
文件: rnn.py 项目: osdf/breze
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'),
                      'target': T.tensor3('target')}
        P = self.parameters

        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [getattr(P, 'hidden_to_hidden_%i' % i)
                             for i in range(n_layers - 1)]
        recurrents = [getattr(P, 'recurrent_%i' % i)
                      for i in range(n_layers)]
        initial_hiddens = [getattr(P, 'initial_hiddens_%i' % i)
                           for i in range(n_layers)]
        hidden_biases = [getattr(P, 'hidden_bias_%i' % i)
                         for i in range(n_layers)]

        if self.skip_to_out:
            skip_to_outs = [getattr(P, 'hidden_%i_to_out' % i)
                            for i in range(n_layers)]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt]
                      + self.p_dropout_hiddens
                      + [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(varprop_rnn.exprs(
            self.exprs['inpt'], inpt_var, P.in_to_hidden, hidden_to_hiddens,
            P.hidden_to_out, hidden_biases,
            hidden_var_scales_sqrt, initial_hiddens,
            recurrents, P.out_bias, out_var_scale_sqrt,
            self.hidden_transfers, self.out_transfer,
            in_to_out=in_to_out, skip_to_outs=skip_to_outs,
            p_dropouts=p_dropouts, hotk_inpt=False))

        self.exprs.update(varprop_supervised_loss(
            self.exprs['target'], self.exprs['output'], self.loss, 2))
示例#6
0
文件: rnn.py 项目: gabobert/breze
    def _init_exprs(self):
        self.exprs = {'inpt': T.tensor3('inpt'), 'target': T.tensor3('target')}
        self.exprs['inpt'].tag.test_value = np.zeros(
            (5, 2, self.n_inpt)).astype(theano.config.floatX)
        self.exprs['target'].tag.test_value = np.zeros(
            (5, 2, self.n_output)).astype(theano.config.floatX)

        if self.imp_weight:
            self.exprs['imp_weight'] = T.tensor3('imp_weight')
            self.exprs['imp_weight'].tag.test_value = np.zeros(
                (5, 2, self.n_output)).astype(theano.config.floatX)

        P = self.parameters
        n_layers = len(self.n_hiddens)
        hidden_to_hiddens = [
            getattr(P, 'hidden_to_hidden_%i' % i) for i in range(n_layers - 1)
        ]
        recurrents = [getattr(P, 'recurrent_%i' % i) for i in range(n_layers)]
        initial_hidden_means = [
            getattr(P, 'initial_hidden_means_%i' % i) for i in range(n_layers)
        ]
        initial_hidden_vars = [
            getattr(P, 'initial_hidden_vars_%i' % i)**2 + 1e-4
            for i in range(n_layers)
        ]
        hidden_biases = [
            getattr(P, 'hidden_bias_%i' % i) for i in range(n_layers)
        ]

        if self.skip_to_out:
            skip_to_outs = [
                getattr(P, 'hidden_%i_to_out' % i) for i in range(n_layers)
            ]
            in_to_out = P.in_to_out
        else:
            in_to_out = skip_to_outs = None

        inpt_var = T.zeros_like(self.exprs['inpt'])

        p_dropouts = ([self.p_dropout_inpt] + self.p_dropout_hiddens +
                      [self.p_dropout_hidden_to_out])

        hidden_var_scales_sqrt = [int(i) for i in self.use_varprop_at[:-1]]
        out_var_scale_sqrt = int(self.use_varprop_at[-1])

        self.exprs.update(
            varprop_rnn.exprs(self.exprs['inpt'],
                              inpt_var,
                              P.in_to_hidden,
                              hidden_to_hiddens,
                              P.hidden_to_out,
                              hidden_biases,
                              hidden_var_scales_sqrt,
                              initial_hidden_means,
                              initial_hidden_vars,
                              recurrents,
                              P.out_bias,
                              out_var_scale_sqrt,
                              self.hidden_transfers,
                              self.out_transfer,
                              in_to_out=in_to_out,
                              skip_to_outs=skip_to_outs,
                              p_dropouts=p_dropouts,
                              hotk_inpt=False))

        imp_weight = False if not self.imp_weight else self.exprs['imp_weight']
        self.exprs.update(
            varprop_supervised_loss(self.exprs['target'],
                                    self.exprs['output'],
                                    self.loss,
                                    2,
                                    imp_weight=imp_weight))