示例#1
0
class NoiseExtension(SimpleExtension, RNGMixin):
    def __init__(self, noise_parameters=None, **kwargs):
        kwargs.setdefault("before_training", True)
        kwargs.setdefault("after_training", True)
        self.noise_parameters = noise_parameters
        std = 1.0
        self.noise_init = IsotropicGaussian(std=std)
        theano_seed = self.rng.randint(np.iinfo(np.int32).max)
        self.theano_generator = UnitNoiseGenerator(
                std=std, theano_seed=theano_seed)
        self.noise_updates = OrderedDict(
            [(param, self.theano_generator.apply(param))
                for param in self.noise_parameters])
        super(NoiseExtension, self).__init__(**kwargs)

    def do(self, callback_name, *args):
        self.parse_args(callback_name, args)
        if callback_name == 'before_training':
            # Before training, intiaizlize noise
            for p in self.noise_parameters:
                self.noise_init.initialize(p, self.rng)
            # And set up update to change noise on every update
            self.main_loop.algorithm.add_updates(self.noise_updates)
        if callback_name == 'after_training':
            # After training, zero noise again.
            for p in self.noise_parameters:
                v = p.get_value()
                p.set_value(np.zeros(v.shape, dtype=v.dtype))
示例#2
0
 def __init__(self, noise_parameters=None, **kwargs):
     kwargs.setdefault("before_training", True)
     kwargs.setdefault("after_training", True)
     self.noise_parameters = noise_parameters
     std = 1.0
     self.noise_init = IsotropicGaussian(std=std)
     theano_seed = self.rng.randint(np.iinfo(np.int32).max)
     self.theano_generator = UnitNoiseGenerator(
             std=std, theano_seed=theano_seed)
     self.noise_updates = OrderedDict(
         [(param, self.theano_generator.apply(param))
             for param in self.noise_parameters])
     super(NoiseExtension, self).__init__(**kwargs)
示例#3
0
class CustomLSTMWeights(NdarrayInitialization):
    # Identity in the diagonal and IsotropicGaussian everywhere else
    def __init__(self, std=1, mean=0):
            self.gaussian_init = IsotropicGaussian(std = std, mean = mean)
            self.identity = Identity()

    def generate(self, rng, shape):
        if len(shape) != 2:
            raise ValueError
        assert shape[0] == shape[1]
        size = shape/4
        assert size*4 == shape[0]

        result = numpy.array([])
        for i in range(4):
            row = numpy.array([])
            for j in range(4):
                if i == j:
                    square = self.gaussian_init.generate(rng, (size,size))
                else:
                    square = self.identity.generate(rng, (size,size))
                row = numpy.hstack(row,square)
            result.vstack(row)
        return result.astype(theano.config.floatX)
示例#4
0
 def __init__(self, std=1, mean=0):
         self.gaussian_init = IsotropicGaussian(std = std, mean = mean)
         self.identity = Identity()
示例#5
0
 def check_gaussian(rng, mean, std, shape):
     weights = IsotropicGaussian(std, mean).generate(rng, shape)
     assert weights.shape == shape
     assert weights.dtype == theano.config.floatX
     assert_allclose(weights.mean(), mean, atol=1e-2)
     assert_allclose(weights.std(), std, atol=1e-2)
示例#6
0
文件: GruOp.py 项目: caomw/MLFun
gruFac = PyCheckFactorGruOp()



if __name__ == "__main__":
    theano.config.optimizer='None'
    import numpy as np
    from blocks.initialization import IsotropicGaussian, Constant
    x = tensor.tensor3("inp_variable")
    #x = tensor.tensor3("inp_variable")
    n_hid = 512
    n_in = 512

    np.random.seed(1)
    rng = np.random
    init = IsotropicGaussian(0.02)
    #init = Constant(0.00)

    inp_to_state = shared_floatx_zeros((n_in, n_hid))
    init.initialize(inp_to_state, rng)
    inp_to_update = shared_floatx_zeros((n_in, n_hid))
    init.initialize(inp_to_update, rng)
    inp_to_reset = shared_floatx_zeros((n_in, n_hid))
    init.initialize(inp_to_reset, rng)

    inp_to_state_b = shared_floatx_zeros((n_hid,))
    init.initialize(inp_to_state_b, rng)
    inp_to_update_b = shared_floatx_zeros((n_hid,))
    init.initialize(inp_to_update_b, rng)
    inp_to_reset_b = shared_floatx_zeros((n_hid,))
    init.initialize(inp_to_reset_b, rng)