Beispiel #1
0
def test_linear_regression():
    inpt = T.matrix('inpt')
    inpt.tag.test_value = np.zeros((3, 10))
    inpt.tag.test_value
    target = T.matrix('target')
    target.tag.test_value = np.zeros((3, 2))

    l = AffineNonlinear(inpt, 10, 2, 'tanh')

    loss = squared(target, l.output).sum(1).mean()

    m = SupervisedModel(inpt=inpt, target=target, output=l.output, loss=loss,
                        parameters=l.parameters)

    f_predict = m.function([m.inpt], m.output)
    f_loss = m.function([m.inpt, m.target], m.loss)

    X = np.zeros((20, 10))
    Z = np.zeros((20, 2))

    Y = f_predict(X)

    assert Y.shape == (20, 2), 'ouput has wrong shape'

    l = f_loss(X, Z)

    assert np.array(l).ndim == 0, 'loss is not a scalar'
    def _init_exprs(self):
        inpt = tensor5('inpt')
        inpt.tag.test_value = np.zeros((
            2, self.image_depth, self.n_channel,
            self.image_height, self.image_width
        ))

        target = T.matrix('target')
        target.tag.test_value = np.zeros((
            2, self.n_output
        ))

        parameters = ParameterSet()

        if self.dropout:
            self.p_dropout_inpt = .2
            self.p_dropout_hiddens = [.5] * len(self.n_hiddens_full)
        else:
            self.p_dropout_inpt = None
            self.p_dropout_hiddens = None

        self.conv_net = cnn3d.ConvNet3d(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens_conv=self.n_hiddens_conv,
            filter_shapes=self.filter_shapes, pool_shapes=self.pool_shapes,
            n_hiddens_full=self.n_hiddens_full,
            hidden_transfers_conv=self.hidden_transfers_conv,
            hidden_transfers_full=self.hidden_transfers_full, n_output=self.n_output,
            out_transfer=self.out_transfer,
            border_modes=self.border_modes,
            declare=parameters.declare,
            implementation=self.implementation,
            dropout=self.dropout, p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        if not self.dropout:
            loss_id = self.loss_ident
        else:
            loss_id = lookup(self.loss_ident, vp_loss)

        self.loss_layer = SupervisedLoss(
            target, output, loss=loss_id,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
Beispiel #3
0
def test_linear_regression():
    inpt = T.matrix('inpt')
    inpt.tag.test_value = np.zeros((3, 10))
    inpt.tag.test_value
    target = T.matrix('target')
    target.tag.test_value = np.zeros((3, 2))

    l = AffineNonlinear(inpt, 10, 2, 'tanh')

    loss = squared(target, l.output).sum(1).mean()

    m = SupervisedModel(inpt=inpt,
                        target=target,
                        output=l.output,
                        loss=loss,
                        parameters=l.parameters)

    f_predict = m.function([m.inpt], m.output)
    f_loss = m.function([m.inpt, m.target], m.loss)

    X = np.zeros((20, 10))
    Z = np.zeros((20, 2))

    Y = f_predict(X)

    assert Y.shape == (20, 2), 'ouput has wrong shape'

    l = f_loss(X, Z)

    assert np.array(l).ndim == 0, 'loss is not a scalar'
    def _init_exprs(self):
        inpt = T.matrix("inpt")
        target = T.matrix("target")
        parameters = ParameterSet()
        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.Mlp(
            inpt,
            self.n_inpt,
            self.n_hiddens,
            self.n_output,
            self.hidden_transfers,
            self.out_transfer,
            declare=parameters.declare,
        )

        if self.imp_weight:
            imp_weight = T.matrix("imp_weight")
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target, self.mlp.output, loss=self.loss_ident, imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(
            self, inpt=inpt, target=target, output=self.mlp.output, loss=self.loss_layer.total, parameters=parameters
        )
        self.exprs["imp_weight"] = imp_weight
Beispiel #5
0
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        if self.pooling:
            comp_dim = 1
        else:
            comp_dim = 2

        parameters = ParameterSet()

        self.rnn = neural.Rnn(inpt,
                              self.n_inpt,
                              self.n_hiddens,
                              self.n_output,
                              self.hidden_transfers,
                              self.out_transfer,
                              pooling=self.pooling,
                              declare=parameters.declare)

        self.loss_layer = simple.SupervisedLoss(target,
                                                self.rnn.output,
                                                loss=self.loss_ident,
                                                imp_weight=imp_weight,
                                                declare=parameters.declare,
                                                comp_dim=comp_dim)

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=self.rnn.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Beispiel #6
0
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        parameters = ParameterSet()

        self.rnn = neural.FastDropoutRnn(
            inpt,
            self.n_inpt, self.n_hiddens, self.n_output,
            self.hidden_transfers, self.out_transfer,
            p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens,
            p_dropout_hidden_to_out=self.p_dropout_hidden_to_out,
            pooling=self.pooling,
            declare=parameters.declare)

        f_loss = lookup(self.loss_ident, vp_loss)
        output = T.concatenate(self.rnn.outputs, 2)
        self.loss_layer = simple.SupervisedLoss(
            target, output, loss=f_loss,
            imp_weight=imp_weight,
            declare=parameters.declare,
            comp_dim=2)

        SupervisedModel.__init__(
            self, inpt=inpt, target=target, output=output,
            loss=self.loss_layer.total,
            parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Beispiel #7
0
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        if self.pooling:
            comp_dim = 1
        else:
            comp_dim = 2

        parameters = ParameterSet()

        self.rnn = neural.Rnn(
            inpt,
            self.n_inpt, self.n_hiddens, self.n_output,
            self.hidden_transfers, self.out_transfer,
            pooling=self.pooling,
            declare=parameters.declare)

        self.loss_layer = simple.SupervisedLoss(
            target, self.rnn.output, loss=self.loss_ident,
            imp_weight=imp_weight,
            declare=parameters.declare,
            comp_dim=comp_dim)

        SupervisedModel.__init__(
            self, inpt=inpt, target=target, output=self.rnn.output,
            loss=self.loss_layer.total,
            parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Beispiel #8
0
    def _init_exprs(self):
        inpt = T.tensor4('inpt')
        inpt.tag.test_value = np.zeros((
            2, self.n_channel, self.image_height, self.image_width))
        target = T.matrix('target')
        target.tag.test_value = np.zeros((
            2, self.n_output))
        parameters = ParameterSet()

        self.cnn = neural.SimpleCnn2d(
            inpt,
            self.image_height, self.image_width,
            self.n_channel, self.n_hiddens, self.filter_shapes, self.n_output,
            self.hidden_transfers, self.out_transfer,
            declare=parameters.declare)

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target, self.cnn.output, loss=self.loss_ident,
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=self.cnn.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
    def _init_exprs(self):
        inpt = tensor5('inpt')
        #inpt.tag.test_value = np.zeros((
        #    2, self.image_depth, self.n_channel,
        #    self.image_height, self.image_width
        #))

        target = T.tensor3('target')
        #target.tag.test_value = np.zeros((
        #    2,self.image_depth*self.image_width*self.image_height, self.n_output
        #))

        parameters = ParameterSet()

        self.conv_net = cnn3d.FCN(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens_conv=self.n_hiddens_conv,
            hidden_transfers_conv=self.hidden_transfers_conv,
            n_hiddens_upconv=self.n_hiddens_upconv,
            hidden_transfers_upconv=self.hidden_transfers_upconv,
            d_filter_shapes=self.down_filter_shapes,
            u_filter_shapes=self.up_filter_shapes,
            down_pools=self.down_pools,
            up_pools=self.up_pools,
            out_transfer=self.out_transfer,
            b_modes_down=self.bm_down,
            b_modes_up=self.bm_up,
            implementation=self.implementation,
            strides_down=self.strides_d,
            up_factors=self.up_factors,
            declare=parameters.declare
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target, output, loss=self.loss_ident,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.sample_wise.mean(),
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
    def _init_exprs(self):
        inpt = tensor5('inpt')
        target = T.tensor3('target')

        parameters = ParameterSet()

        self.conv_net = cnn3d.SequentialModel(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channels=self.n_channels, out_transfer=self.out_transfer,
            layer_vars=self.layer_vars, using_bn=self.using_bn,
            declare=parameters.declare
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        if self.loss_id is not None:
            self.loss_layer = SupervisedLoss(
                target, output, loss=self.loss_id,
                imp_weight=imp_weight, declare=parameters.declare
            )
        else:
            self._make_loss_layer(
                lv=self.loss_layer_def, target=target,
                imp_weight=imp_weight, declare=parameters.declare
            )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
        if self.regularize:
            self.exprs['true_loss'] = self.exprs['loss'].copy()
            if self.l2 is not None:
                l2_reg = T.sum(T.sqr(self.parameters.flat)) * self.l2 / 2
                self.exprs['loss'] += l2_reg
            if self.l1 is not None:
                l1_reg = T.sum(T.abs_(self.parameters.flat)) * self.l1
                self.exprs['loss'] += l1_reg
Beispiel #11
0
    def _init_exprs(self):
        inpt = T.tensor4('inpt')
        inpt.tag.test_value = np.zeros(
            (2, self.n_channel, self.image_height, self.image_width))
        target = T.matrix('target')
        target.tag.test_value = np.zeros((2, self.n_output))
        parameters = ParameterSet()

        self.lenet = neural.Lenet(
            inpt,
            self.image_height,
            self.image_width,
            self.n_channel,
            self.n_hiddens_conv,
            self.filter_shapes,
            self.pool_shapes,
            self.n_hiddens_full,
            self.hidden_transfers_conv,
            self.hidden_transfers_full,
            self.n_output,
            self.out_transfer,
            declare=parameters.declare,
        )

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target,
            self.lenet.output,
            loss=self.loss_ident,
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=self.lenet.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
Beispiel #12
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.FastDropoutMlp(inpt,
                                         self.n_inpt,
                                         self.n_hiddens,
                                         self.n_output,
                                         self.hidden_transfers,
                                         self.out_transfer,
                                         self.p_dropout_inpt,
                                         self.p_dropout_hiddens,
                                         declare=parameters.declare)

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        output = T.concatenate(self.mlp.outputs, 1)

        self.loss_layer = SupervisedLoss(
            target,
            output,
            loss=lookup(self.loss_ident, vp_loss),
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
    def _init_exprs(self):
        inpt = tensor5('inpt')
        inpt.tag.test_value = np.zeros((
            2, self.image_depth, self.n_channel,
            self.image_height, self.image_width
        ))

        target = T.matrix('target')
        target.tag.test_value = np.zeros((
            2, self.n_output
        ))

        parameters = ParameterSet()
       
        self.lenet = cnn3d.Lenet3d(
            inpt, self.image_height,
            self.image_width, self.image_depth,
            self.n_channel, self.n_hiddens_conv,
            self.filter_shapes, self.pool_shapes,
            self.n_hiddens_full, self.hidden_transfers_conv,
            self.hidden_transfers_full, self.n_output,
            self.out_transfer,
            declare=parameters.declare,
            implementation=self.implementation,
            pool=self.pool
        )

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target, self.lenet.output, loss=self.loss_ident,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=self.lenet.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
Beispiel #14
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.predict_layer = AffineNonlinear(
            inpt, self.n_inpt, self.n_output, self.out_transfer_ident,
            use_bias=True, declare=parameters.declare)

        self.loss_layer = SupervisedLoss(
            target, self.predict_layer.output, loss=self.loss_ident,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=self.predict_layer.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
Beispiel #15
0
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        parameters = ParameterSet()

        self.rnn = neural.FastDropoutRnn(
            inpt,
            self.n_inpt,
            self.n_hiddens,
            self.n_output,
            self.hidden_transfers,
            self.out_transfer,
            p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens,
            p_dropout_hidden_to_out=self.p_dropout_hidden_to_out,
            pooling=self.pooling,
            declare=parameters.declare)

        f_loss = lookup(self.loss_ident, vp_loss)
        output = T.concatenate(self.rnn.outputs, 2)
        self.loss_layer = simple.SupervisedLoss(target,
                                                output,
                                                loss=f_loss,
                                                imp_weight=imp_weight,
                                                declare=parameters.declare,
                                                comp_dim=2)

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
Beispiel #16
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.FastDropoutMlp(
            inpt,
            self.n_inpt, self.n_hiddens, self.n_output,
            self.hidden_transfers, self.out_transfer,
            self.p_dropout_inpt, self.p_dropout_hiddens,
            declare=parameters.declare)

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        output = T.concatenate(self.mlp.outputs, 1)

        self.loss_layer = SupervisedLoss(
            target, output, loss=lookup(self.loss_ident, vp_loss),
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight