예제 #1
0
파일: test_util.py 프로젝트: Wiebke/breze
def test_model_function_mode():
    pars = ParameterSet()
    weights = pars.declare((2, 3))
    pars.alloc()
    inpt = T.matrix()
    output = T.dot(inpt, weights)
    pars.data[...] = np.random.standard_normal(pars.data.shape)

    model = Model()
    model.exprs = {'inpt': inpt, 'output': output}
    model.parameters = pars

    mode = theano.Mode()

    f = model.function(['inpt'], 'output', mode=mode)
    actual_mode = f.theano_func.maker.mode
    assert actual_mode is mode, 'wrong mode: %s' % actual_mode

    model.mode = theano.Mode()
    f = model.function(['inpt'], 'output')
    actual_mode = f.theano_func.maker.mode

    # Maybe a weird way to compare modes, but it seems to get the job done.
    equal = actual_mode.__dict__ == mode.__dict__
    assert equal, 'wrong mode: (%s != %s)' % (actual_mode, mode)
예제 #2
0
def test_model_function_mode():
    pars = ParameterSet()
    weights = pars.declare((2, 3))
    pars.alloc()
    inpt = T.matrix()
    output = T.dot(inpt, weights)
    pars.data[...] = np.random.standard_normal(pars.data.shape)

    model = Model()
    model.exprs = {'inpt': inpt, 'output': output}
    model.parameters = pars

    mode = theano.Mode()

    f = model.function(['inpt'], 'output', mode=mode)
    actual_mode = f.theano_func.maker.mode
    assert actual_mode is mode, 'wrong mode: %s' % actual_mode

    model.mode = theano.Mode()
    f = model.function(['inpt'], 'output')
    actual_mode = f.theano_func.maker.mode

    # Maybe a weird way to compare modes, but it seems to get the job done.
    equal = actual_mode.__dict__ == mode.__dict__
    assert equal, 'wrong mode: (%s != %s)' % (actual_mode, mode)
예제 #3
0
파일: test_util.py 프로젝트: Wiebke/breze
def test_parameter_set_init_overwrite():
    pars = ParameterSet()

    matrix = pars.declare((10, 10))
    pars.alloc()

    pars[matrix] = np.eye(10)
    assert np.allclose(pars.data.reshape(pars[matrix].shape), pars[matrix])
예제 #4
0
def test_parameter_set_init_overwrite():
    pars = ParameterSet()

    matrix = pars.declare((10, 10))
    pars.alloc()

    pars[matrix] = np.eye(10)
    assert np.allclose(pars.data.reshape(pars[matrix].shape), pars[matrix])
예제 #5
0
파일: test_util.py 프로젝트: Wiebke/breze
def test_parameter_set_init_declare():
    pars = ParameterSet()

    matrix = pars.declare((10, 10))
    vector = pars.declare((10,))
    pars.alloc()

    assert pars.data.shape == (110,), 'wrong size for flat pars allocated'
    assert (pars[matrix].shape == (10, 10)), ('wrong size for 2d array in pars '
                                                'allocated')
    assert (pars[vector].shape == (10,)), ('wrong size for 1d array in pars '
                                             'allocated')
예제 #6
0
파일: rnn.py 프로젝트: gabobert/breze
 def _init_pars(self):
     spec = varprop_rnn.parameters(self.n_inpt, self.n_hiddens,
                                   self.n_output, self.skip_to_out,
                                   self.hidden_transfers, self.out_transfer)
     self.parameters = ParameterSet(**spec)
     self.parameters.data[:] = np.random.standard_normal(
         self.parameters.data.shape).astype(theano.config.floatX)
예제 #7
0
    def _init_exprs(self):
        inpt = tensor5('inpt')
        inpt.tag.test_value = np.zeros((
            2, self.image_depth, self.n_channel,
            self.image_height, self.image_width
        ))

        target = T.matrix('target')
        target.tag.test_value = np.zeros((
            2, self.n_output
        ))

        parameters = ParameterSet()

        if self.dropout:
            self.p_dropout_inpt = .2
            self.p_dropout_hiddens = [.5] * len(self.n_hiddens_full)
        else:
            self.p_dropout_inpt = None
            self.p_dropout_hiddens = None

        self.conv_net = cnn3d.ConvNet3d(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens_conv=self.n_hiddens_conv,
            filter_shapes=self.filter_shapes, pool_shapes=self.pool_shapes,
            n_hiddens_full=self.n_hiddens_full,
            hidden_transfers_conv=self.hidden_transfers_conv,
            hidden_transfers_full=self.hidden_transfers_full, n_output=self.n_output,
            out_transfer=self.out_transfer,
            border_modes=self.border_modes,
            declare=parameters.declare,
            implementation=self.implementation,
            dropout=self.dropout, p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        if not self.dropout:
            loss_id = self.loss_ident
        else:
            loss_id = lookup(self.loss_ident, vp_loss)

        self.loss_layer = SupervisedLoss(
            target, output, loss=loss_id,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
예제 #8
0
파일: test_util.py 프로젝트: gabobert/breze
def test_model_function():
    pars = ParameterSet(weights=(2, 3))
    inpt = T.matrix()
    output = T.dot(inpt, pars.weights)
    pars.data[...] = np.random.standard_normal(pars.data.shape)

    model = Model()
    model.exprs = {'inpt': inpt, 'output': output}
    model.parameters = pars

    f = model.function(['inpt'], 'output')
    fx = model.function(['inpt'], 'output', explicit_pars=True)

    np.random.seed(1010)
    test_inpt = np.random.random((10, 2)).astype(theano.config.floatX)
    r1 = f(test_inpt)
    r2 = fx(pars.data, test_inpt)
    print r1
    print r2
    correct = np.allclose(r1, r2)

    assert correct, 'implicit pars and explicit pars have different output'

    f1 = model.function(['inpt'], ['output'])
    f2 = model.function([inpt], ['output'])
    f3 = model.function([inpt], [output])
    f4 = model.function(['inpt'], [output])

    assert np.allclose(f1(test_inpt), f2(test_inpt)), "f1 and f2 don't agree"
    assert np.allclose(f1(test_inpt), f3(test_inpt)), "f1 and f3 don't agree"
    assert np.allclose(f1(test_inpt), f4(test_inpt)), "f1 and f4 don't agree"
    assert np.allclose(f2(test_inpt), f3(test_inpt)), "f2 and f3 don't agree"
    assert np.allclose(f2(test_inpt), f4(test_inpt)), "f2 and f4 don't agree"
    assert np.allclose(f3(test_inpt), f4(test_inpt)), "f3 and f4 don't agree"
예제 #9
0
파일: rnn.py 프로젝트: zhezhe123/breze
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        if self.pooling:
            comp_dim = 1
        else:
            comp_dim = 2

        parameters = ParameterSet()

        self.rnn = neural.Rnn(inpt,
                              self.n_inpt,
                              self.n_hiddens,
                              self.n_output,
                              self.hidden_transfers,
                              self.out_transfer,
                              pooling=self.pooling,
                              declare=parameters.declare)

        self.loss_layer = simple.SupervisedLoss(target,
                                                self.rnn.output,
                                                loss=self.loss_ident,
                                                imp_weight=imp_weight,
                                                declare=parameters.declare,
                                                comp_dim=comp_dim)

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=self.rnn.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
예제 #10
0
 def _init_pars(self):
     spec = autoencoder.parameters(self.n_inpt,
                                   self.n_hiddens,
                                   tied_weights=self.tied_weights)
     self.parameters = ParameterSet(**spec)
     self.parameters.data[:] = np.random.standard_normal(
         self.parameters.data.shape).astype(theano.config.floatX)
예제 #11
0
def test_transfer_insize_outsize():
    inpt = T.tensor3('inpt')

    def t(x):
        return x
    t.in_size = 2
    t.out_size = 3

    P = ParameterSet()
    r = Recurrent(inpt, 4, t, P.declare)
    P.alloc()

    s = P[r.weights].shape
    assert  s == (12, 8), 'Shape is %s' % str(s)

    s = P[r.initial].shape
    assert s == (12,),  'Shape is %s' % str(s)
예제 #12
0
파일: test_util.py 프로젝트: gabobert/breze
def test_pickling_models():
    ma = T.matrix()
    m = Model()
    m.parameters = ParameterSet(bla=2)
    m.exprs = {'m_sqrd': m.parameters.bla.sum() * ma.sum()}
    m.f = m.function([ma], 'm_sqrd', explicit_pars=False)

    cPickle.dumps(m)
예제 #13
0
파일: test_util.py 프로젝트: Wiebke/breze
def test_parameter_set_data_change():
    pars = ParameterSet()
    matrix = pars.declare((10, 10))
    vector = pars.declare((10,))
    pars.alloc()
    pars[matrix] = 0
    pars[vector] = 0
    assert (pars.data == 0).all(), repr(pars.data)

    pars[matrix] += 1
    assert pars.data.sum() == 100

    pars[vector] += 2
    assert pars.data.sum() == 120

    pars.data *= 0.5
    assert pars.data.sum() == 60
예제 #14
0
def test_transfer_stateful():
    inpt = T.tensor3('inpt')

    def t(s, x):
        return T.zeros_like(s) + 1, x
    t.stateful = True

    P = ParameterSet()
    r = Recurrent(inpt, 4, t, P.declare)
    P.alloc()

    assert hasattr(r, 'state')

    f = theano.function([P.flat, inpt], r.state)

    s = f(P.data, np.zeros((3, 1, 4)))
    assert (s == 1).all(), 'hidden state has wrong value'
예제 #15
0
    def _init_pars(self):
        last_image_shape = self.image_shapes[-1]
        resulting_image_size = last_image_shape[-1] * last_image_shape[-2]

        spec = cnn.parameters(
            self.n_inpt, self.n_hidden_conv, self.n_hidden_full, self.n_output,
            resulting_image_size, self.filter_shapes)

        self.parameters = ParameterSet(**spec)
예제 #16
0
def test_transfer_insize_outsize():
    inpt = T.tensor3('inpt')

    def t(x):
        return x

    t.in_size = 2
    t.out_size = 3

    P = ParameterSet()
    r = Recurrent(inpt, 4, t, P.declare)
    P.alloc()

    s = P[r.weights].shape
    assert s == (12, 8), 'Shape is %s' % str(s)

    s = P[r.initial].shape
    assert s == (12, ), 'Shape is %s' % str(s)
예제 #17
0
def test_transfer_stateful():
    inpt = T.tensor3('inpt')

    def t(s, x):
        return T.zeros_like(s) + 1, x

    t.stateful = True

    P = ParameterSet()
    r = Recurrent(inpt, 4, t, P.declare)
    P.alloc()

    assert hasattr(r, 'state')

    f = theano.function([P.flat, inpt], r.state)

    s = f(P.data, np.zeros((3, 1, 4)))
    assert (s == 1).all(), 'hidden state has wrong value'
예제 #18
0
파일: test_util.py 프로젝트: gabobert/breze
def test_parameter_set_init():
    pars = ParameterSet(matrix=(10, 10), vector=10)
    assert pars.data.shape == (110, ), 'wrong size for flat pars allocated'
    assert (pars['matrix'].shape == (10,
                                     10)), ('wrong size for 2d array in pars '
                                            'allocated')
    assert (pars['vector'].shape == (10, )), (
        'wrong size for 1d array in pars '
        'allocated')
예제 #19
0
파일: test_util.py 프로젝트: gabobert/breze
def test_nested_parameter_set():
    spec = make_dictlist()
    p = ParameterSet(**spec)

    assert p['bar'].shape == (2, 2)
    assert p.bar.ndim == 2

    assert p.fank.fenk[0].ndim == 1
    assert p['fank']['funk'].shape == (2, 1)
예제 #20
0
파일: base.py 프로젝트: zhezhe123/breze
    def __init__(self, declare=None, name=None):
        self.make_name(name)

        if declare is None:
            self.parameters = ParameterSet()
            self.declare = self.parameters.declare
        else:
            self.declare = declare

        self._forward()
예제 #21
0
파일: cnn.py 프로젝트: osdf/breze
    def _init_pars(self):
        last_image_shape = self.image_shapes[-1]
        resulting_image_size = last_image_shape[-1] * last_image_shape[-2]

        spec = cnn.parameters(self.n_inpt, self.n_hidden_conv,
                              self.n_hidden_full, self.n_output,
                              resulting_image_size, self.filter_shapes)

        self.parameters = ParameterSet(**spec)
        self.parameters.data[:] = np.random.standard_normal(
            self.parameters.data.shape).astype(theano.config.floatX)
예제 #22
0
파일: base.py 프로젝트: zhezhe123/breze
    def _init_exprs(self):
        inpt, self.imp_weight = self._make_start_exprs()
        self.parameters = ParameterSet()

        n_dim = inpt.ndim

        self.vae = _VariationalAutoEncoder(inpt,
                                           self.n_inpt,
                                           self.n_latent,
                                           self.n_output,
                                           self.make_recog,
                                           self.make_prior,
                                           self.make_gen,
                                           getattr(self, 'make_cond', None),
                                           declare=self.parameters.declare)

        self.recog_sample = self.vae.recog_sample

        if self.use_imp_weight:
            imp_weight = T.addbroadcast(self.imp_weight, n_dim - 1)
        else:
            imp_weight = False

        rec_loss = self.vae.gen.nll(inpt)
        self.rec_loss_sample_wise = rec_loss.sum(axis=n_dim - 1)
        self.rec_loss = self.rec_loss_sample_wise.mean()

        output = self.vae.gen.stt

        # Create the KL divergence part of the loss.
        n_dim = inpt.ndim
        self.kl_coord_wise = kl_div(self.vae.recog, self.vae.prior)

        if self.use_imp_weight:
            self.kl_coord_wise *= imp_weight
        self.kl_sample_wise = self.kl_coord_wise.sum(axis=n_dim - 1)
        self.kl = self.kl_sample_wise.mean()

        # FIXME: this does not work with convolutional aes
        # self.loss_sample_wise = self.kl_sample_wise + self.rec_loss_sample_wise
        loss = self.kl + self.rec_loss

        UnsupervisedModel.__init__(self,
                                   inpt=inpt,
                                   output=output,
                                   loss=loss,
                                   parameters=self.parameters,
                                   imp_weight=self.imp_weight)

        # TODO: this has to become transform_expr or sth like that
        # TODO: convert distribution parameters to latent stt
        #self.transform_expr_name = self.vae.latent
        self.transform_expr_name = None
예제 #23
0
    def _init_exprs(self):
        inpt = tensor5('inpt')
        #inpt.tag.test_value = np.zeros((
        #    2, self.image_depth, self.n_channel,
        #    self.image_height, self.image_width
        #))

        target = T.tensor3('target')
        #target.tag.test_value = np.zeros((
        #    2,self.image_depth*self.image_width*self.image_height, self.n_output
        #))

        parameters = ParameterSet()

        self.conv_net = cnn3d.FCN(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channel=self.n_channel, n_hiddens_conv=self.n_hiddens_conv,
            hidden_transfers_conv=self.hidden_transfers_conv,
            n_hiddens_upconv=self.n_hiddens_upconv,
            hidden_transfers_upconv=self.hidden_transfers_upconv,
            d_filter_shapes=self.down_filter_shapes,
            u_filter_shapes=self.up_filter_shapes,
            down_pools=self.down_pools,
            up_pools=self.up_pools,
            out_transfer=self.out_transfer,
            b_modes_down=self.bm_down,
            b_modes_up=self.bm_up,
            implementation=self.implementation,
            strides_down=self.strides_d,
            up_factors=self.up_factors,
            declare=parameters.declare
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target, output, loss=self.loss_ident,
            imp_weight=imp_weight, declare=parameters.declare
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.sample_wise.mean(),
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
예제 #24
0
파일: test_util.py 프로젝트: gabobert/breze
def test_parameter_set_data_change():
    pars = ParameterSet(matrix=(10, 10), vector=10)
    pars['matrix'][...] = 0
    pars['vector'][...] = 0
    assert (pars.data == 0).all(), repr(pars.data)

    pars['matrix'] += 1
    assert pars.data.sum() == 100

    pars['vector'] += 2
    assert pars.data.sum() == 120

    pars.data *= 0.5
    pars.data.sum() == 60
예제 #25
0
파일: test_util.py 프로젝트: Wiebke/breze
def test_model_function():
    pars = ParameterSet()
    weights = pars.declare((2, 3))
    pars.alloc()
    inpt = T.matrix()
    output = T.dot(inpt, weights)
    pars.data[...] = np.random.standard_normal(pars.data.shape)

    model = Model()
    model.exprs = {'inpt': inpt, 'output': output}
    model.parameters = pars

    f = model.function(['inpt'], 'output')
    fx = model.function(['inpt'], 'output', explicit_pars=True)

    np.random.seed(1010)
    test_inpt = np.random.random((10, 2)).astype(theano.config.floatX)
    r1 = f(test_inpt)
    r2 = fx(pars.data, test_inpt)
    print r1
    print r2
    correct = np.allclose(r1, r2)

    assert correct, 'implicit pars and explicit pars have different output'

    f1 = model.function(['inpt'], ['output'])
    f2 = model.function([inpt], ['output'])
    f3 = model.function([inpt], [output])
    f4 = model.function(['inpt'], [output])

    assert np.allclose(f1(test_inpt), f2(test_inpt)), "f1 and f2 don't agree"
    assert np.allclose(f1(test_inpt), f3(test_inpt)), "f1 and f3 don't agree"
    assert np.allclose(f1(test_inpt), f4(test_inpt)), "f1 and f4 don't agree"
    assert np.allclose(f2(test_inpt), f3(test_inpt)), "f2 and f3 don't agree"
    assert np.allclose(f2(test_inpt), f4(test_inpt)), "f2 and f4 don't agree"
    assert np.allclose(f3(test_inpt), f4(test_inpt)), "f3 and f4 don't agree"
예제 #26
0
    def _init_exprs(self):
        inpt = tensor5('inpt')
        target = T.tensor3('target')

        parameters = ParameterSet()

        self.conv_net = cnn3d.SequentialModel(
            inpt=inpt, image_height=self.image_height,
            image_width=self.image_width, image_depth=self.image_depth,
            n_channels=self.n_channels, out_transfer=self.out_transfer,
            layer_vars=self.layer_vars, using_bn=self.using_bn,
            declare=parameters.declare
        )

        output = self.conv_net.output

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        if self.loss_id is not None:
            self.loss_layer = SupervisedLoss(
                target, output, loss=self.loss_id,
                imp_weight=imp_weight, declare=parameters.declare
            )
        else:
            self._make_loss_layer(
                lv=self.loss_layer_def, target=target,
                imp_weight=imp_weight, declare=parameters.declare
            )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        self.exprs['imp_weight'] = imp_weight
        if self.regularize:
            self.exprs['true_loss'] = self.exprs['loss'].copy()
            if self.l2 is not None:
                l2_reg = T.sum(T.sqr(self.parameters.flat)) * self.l2 / 2
                self.exprs['loss'] += l2_reg
            if self.l1 is not None:
                l1_reg = T.sum(T.abs_(self.parameters.flat)) * self.l1
                self.exprs['loss'] += l1_reg
예제 #27
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))

        P = self.parameters = ParameterSet()

        self.layer = AffineNonlinear(inpt, self.n_inpt, self.n_output,
                                     'identity', declare=P.declare,
                                     use_bias=False)

        self.loss_layer = sparsefiltering.SparseFilteringLoss(
            self.layer.output, self.feature_transfer)

        super(SparseFiltering, self).__init__(
            inpt=inpt, output=self.layer.output, loss=self.loss_layer.total,
            parameters=P)
예제 #28
0
파일: cnn.py 프로젝트: zhezhe123/breze
    def _init_exprs(self):
        inpt = T.tensor4('inpt')
        inpt.tag.test_value = np.zeros(
            (2, self.n_channel, self.image_height, self.image_width))
        target = T.matrix('target')
        target.tag.test_value = np.zeros((2, self.n_output))
        parameters = ParameterSet()

        self.lenet = neural.Lenet(
            inpt,
            self.image_height,
            self.image_width,
            self.n_channel,
            self.n_hiddens_conv,
            self.filter_shapes,
            self.pool_shapes,
            self.n_hiddens_full,
            self.hidden_transfers_conv,
            self.hidden_transfers_full,
            self.n_output,
            self.out_transfer,
            declare=parameters.declare,
        )

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
        else:
            imp_weight = None

        self.loss_layer = SupervisedLoss(
            target,
            self.lenet.output,
            loss=self.loss_ident,
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=self.lenet.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
예제 #29
0
def test_parameter_set_init_declare():
    pars = ParameterSet()

    matrix = pars.declare((10, 10))
    vector = pars.declare((10, ))
    pars.alloc()

    assert pars.data.shape == (110, ), 'wrong size for flat pars allocated'
    assert (pars[matrix].shape == (10,
                                   10)), ('wrong size for 2d array in pars '
                                          'allocated')
    assert (pars[vector].shape == (10, )), ('wrong size for 1d array in pars '
                                            'allocated')
예제 #30
0
파일: mlp.py 프로젝트: zhezhe123/breze
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.mlp = neural.FastDropoutMlp(inpt,
                                         self.n_inpt,
                                         self.n_hiddens,
                                         self.n_output,
                                         self.hidden_transfers,
                                         self.out_transfer,
                                         self.p_dropout_inpt,
                                         self.p_dropout_hiddens,
                                         declare=parameters.declare)

        if self.imp_weight:
            imp_weight = T.matrix('imp_weight')
            if theano.config.compute_test_value:
                imp_weight.tag.test_value = np.empty((2, self.n_output))
        else:
            imp_weight = None

        output = T.concatenate(self.mlp.outputs, 1)

        self.loss_layer = SupervisedLoss(
            target,
            output,
            loss=lookup(self.loss_ident, vp_loss),
            imp_weight=imp_weight,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
        self.exprs['imp_weight'] = imp_weight
예제 #31
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))

        P = self.parameters = ParameterSet()

        self.layer = AffineNonlinear(inpt,
                                     self.n_inpt,
                                     self.n_cluster,
                                     'softmax',
                                     declare=P.declare)

        self.loss_layer = RimLoss(self.layer.output, [self.layer.weights],
                                  self.c_rim)

        super(Rim, self).__init__(inpt=inpt,
                                  output=self.layer.output,
                                  loss=self.loss_layer.total,
                                  parameters=P)
예제 #32
0
def test_nested_exprs():
    ma = T.matrix()
    m = Model()
    m.parameters = ParameterSet()
    bla = m.parameters.declare(2)
    m.parameters.alloc()
    m.parameters[bla] = 1, 2
    m.exprs = {
        'norms': {
            'l1': abs(ma).sum(),
            'l2': T.sqrt((ma**2).sum()),
        },
        'ma_multiplied': [ma, 2 * ma],
        'bla': bla,
        'blubb': 1,
    }

    f = m.function([], 'bla', explicit_pars=False, on_unused_input='ignore')
    assert np.allclose(f(), [1, 2])

    f = m.function([ma], ('norms', 'l1'),
                   explicit_pars=False,
                   on_unused_input='ignore')

    assert f([[-1, 1]]) == 2

    f = m.function([ma], ('norms', 'l2'),
                   explicit_pars=False,
                   on_unused_input='ignore')

    assert np.allclose(f([[-1, 1]]), np.sqrt(2.))

    f = m.function([ma], ('ma_multiplied', 0),
                   explicit_pars=False,
                   on_unused_input='ignore')
    assert np.allclose(f([[-1, 1]]), [-1, 1])

    f = m.function([ma], ('ma_multiplied', 1),
                   explicit_pars=False,
                   on_unused_input='ignore')
    assert np.allclose(f([[-1, 1]]), [-2, 2])
예제 #33
0
    def _init_exprs(self):
        inpt = T.matrix('inpt')
        target = T.matrix('target')
        parameters = ParameterSet()

        if theano.config.compute_test_value:
            inpt.tag.test_value = np.empty((2, self.n_inpt))
            target.tag.test_value = np.empty((2, self.n_output))

        self.predict_layer = AffineNonlinear(
            inpt, self.n_inpt, self.n_output, self.out_transfer_ident,
            use_bias=True, declare=parameters.declare)

        self.loss_layer = SupervisedLoss(
            target, self.predict_layer.output, loss=self.loss_ident,
            declare=parameters.declare,
        )

        SupervisedModel.__init__(self, inpt=inpt, target=target,
                                 output=self.predict_layer.output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)
예제 #34
0
def test_parameter_set_data_change():
    pars = ParameterSet()
    matrix = pars.declare((10, 10))
    vector = pars.declare((10, ))
    pars.alloc()
    pars[matrix] = 0
    pars[vector] = 0
    assert (pars.data == 0).all(), repr(pars.data)

    pars[matrix] += 1
    assert pars.data.sum() == 100

    pars[vector] += 2
    assert pars.data.sum() == 120

    pars.data *= 0.5
    assert pars.data.sum() == 60
예제 #35
0
파일: rnn.py 프로젝트: zhezhe123/breze
    def _init_exprs(self):
        inpt, target, imp_weight = self._init_inpts()

        parameters = ParameterSet()

        self.rnn = neural.FastDropoutRnn(
            inpt,
            self.n_inpt,
            self.n_hiddens,
            self.n_output,
            self.hidden_transfers,
            self.out_transfer,
            p_dropout_inpt=self.p_dropout_inpt,
            p_dropout_hiddens=self.p_dropout_hiddens,
            p_dropout_hidden_to_out=self.p_dropout_hidden_to_out,
            pooling=self.pooling,
            declare=parameters.declare)

        f_loss = lookup(self.loss_ident, vp_loss)
        output = T.concatenate(self.rnn.outputs, 2)
        self.loss_layer = simple.SupervisedLoss(target,
                                                output,
                                                loss=f_loss,
                                                imp_weight=imp_weight,
                                                declare=parameters.declare,
                                                comp_dim=2)

        SupervisedModel.__init__(self,
                                 inpt=inpt,
                                 target=target,
                                 output=output,
                                 loss=self.loss_layer.total,
                                 parameters=parameters)

        if self.imp_weight:
            self.exprs['imp_weight'] = imp_weight
예제 #36
0
파일: test_util.py 프로젝트: gabobert/breze
def test_nested_pars():
    spec = {'a': [2, 3], 'b': {'a': (10, 10), 'b': (2, )}}

    ps = ParameterSet(**spec)
    assert ps.data.size == 2 + 3 + 10 * 10 + 2
예제 #37
0
파일: rnn.py 프로젝트: gabobert/breze
 def _init_pars(self):
     spec = lstm.parameters(self.n_inpt, self.n_hiddens, self.n_output)
     self.parameters = ParameterSet(**spec)
     self.parameters.data[:] = np.random.standard_normal(
         self.parameters.data.shape).astype(theano.config.floatX)