Пример #1
0
def test_random_index_generator():

    igen = RandomIndexGenerator(size = 5, n_indices=3, seed = get_theano_rng(seed = 1234)).compile()
    ixs1, = igen()
    assert np.all(ixs1 < 5)
    ixs2, = igen()
    assert np.all(ixs2 < 5) and not np.array_equal(ixs1, ixs2)  # Seed ensures that this is the case.
Пример #2
0
def test_random_index_generator():

    igen = RandomIndexGenerator(size = 5, n_indices=3, seed = get_theano_rng(seed = 1234)).compile()
    ixs1, = igen()
    assert np.all(ixs1 < 5)
    ixs2, = igen()
    assert np.all(ixs2 < 5) and not np.array_equal(ixs1, ixs2)  # Seed ensures that this is the case.
Пример #3
0
    def __init__(self,
                 w,
                 b,
                 w_rev,
                 b_rev,
                 backward_activation='tanh',
                 forward_activation='tanh',
                 rng=None,
                 noise=1,
                 optimizer_constructor=lambda: SimpleGradientDescent(0.01),
                 cost_function=mean_squared_error,
                 use_bias=True):

        self.noise = noise
        self.rng = get_theano_rng(rng)
        self.w = theano.shared(w, name='w')
        self.b = theano.shared(b, name='b')
        self.w_rev = theano.shared(w_rev, name='w_rev')
        self.b_rev = theano.shared(b_rev, name='b_rev')
        self.backward_activation = get_named_activation_function(
            backward_activation) if backward_activation is not None else None
        self.forward_activation = get_named_activation_function(
            forward_activation)
        self.forward_optimizer = optimizer_constructor()
        self.backward_optimizer = optimizer_constructor()
        self.cost_function = cost_function
        self.use_bias = use_bias
Пример #4
0
 def __init__(self, pq_pair, optimizer=AdaMax(alpha=0.01), rng=None):
     """
     :param pq_pair: An IVeriationalPair object
     :param optimizer: An IGradientOptimizer object
     :param rng: A random number generator, or seed.
     """
     self.rng = get_theano_rng(rng)
     self.pq_pair = pq_pair
     self.optimizer = optimizer
Пример #5
0
 def __init__(self, pq_pair, optimizer = AdaMax(alpha = 0.01), rng = None):
     """
     :param pq_pair: An IVeriationalPair object
     :param optimizer: An IGradientOptimizer object
     :param rng: A random number generator, or seed.
     """
     self.rng = get_theano_rng(rng)
     self.pq_pair = pq_pair
     self.optimizer = optimizer
Пример #6
0
def test_matrix_indices():

    igen = RandomIndexGenerator(size = (5, 2), n_indices=3, seed = get_theano_rng(seed = 1234)).compile()
    ixs1 = igen()
    assert len(ixs1) == 2
    rows, cols = ixs1
    assert np.all(rows < 5)
    assert np.all(cols < 2)
    ixs2 = igen()
    rows, cols = ixs2
    assert np.all(rows < 5)
    assert np.all(cols < 2)
Пример #7
0
def test_matrix_indices():

    igen = RandomIndexGenerator(size = (5, 2), n_indices=3, seed = get_theano_rng(seed = 1234)).compile()
    ixs1 = igen()
    assert len(ixs1) == 2
    rows, cols = ixs1
    assert np.all(rows < 5)
    assert np.all(cols < 2)
    ixs2 = igen()
    rows, cols = ixs2
    assert np.all(rows < 5)
    assert np.all(cols < 2)
Пример #8
0
    def __init__(self, dropout_rate, rng = None, shape=None):
        """
        :param dropout_rate: The fraction of units to dropout (0, 1)
        :param rng: Random number generator
        :param shape: Optionally, the shape.  If not

        Returns
        -------

        """
        self.dropout_rate = dropout_rate
        self.rng = get_theano_rng(rng)
        self.shape = shape
Пример #9
0
    def __init__(self, w, b, w_rev, b_rev, backward_activation = 'tanh', forward_activation = 'tanh', rng = None, noise = 1,
                 optimizer_constructor = lambda: SimpleGradientDescent(0.01), cost_function = mean_squared_error):

        self.noise = noise
        self.rng = get_theano_rng(rng)
        self.w = theano.shared(w, name = 'w')
        self.b = theano.shared(b, name = 'b')
        self.w_rev = theano.shared(w_rev, name = 'w_rev')
        self.b_rev = theano.shared(b_rev, name = 'b_rev')
        self.backward_activation = get_named_activation_function(backward_activation) if backward_activation is not None else None
        self.forward_activation = get_named_activation_function(forward_activation)
        self.forward_optimizer = optimizer_constructor()
        self.backward_optimizer = optimizer_constructor()
        self.cost_function = cost_function
Пример #10
0
 def get_all_signals(self, input_, corruption_type = 'round', rng = None):
     scale = self.get_scale()
     scaled_input = input_*scale
     if corruption_type == 'round':
         epsilon = tt.round(scaled_input) - scaled_input
     elif corruption_type == 'randround':
         rng = get_theano_rng(rng)
         epsilon = tt.where(rng.uniform(scaled_input.shape)>(scaled_input % 1), tt.floor(scaled_input), tt.ceil(scaled_input))-scaled_input
         print 'STOCH ROUNDING'
     elif corruption_type == 'rand':
         rng = get_theano_rng(1234)
         epsilon = rng.uniform(scaled_input.shape)-.5
     else:
         raise Exception('fdsfsd')
     spikes = scaled_input + epsilon
     output = spikes / scale
     signals = dict(
         input=input_,
         scaled_input=scaled_input,
         spikes=spikes,
         epsilon=epsilon,
         output=output,
         )
     return signals
Пример #11
0
 def __init__(self,
              generator,
              discriminator,
              noise_dim,
              optimizer,
              rng=None):
     """
     :param generator: Takes A (n_samples, noise_dim, ) array of gaussian random noise, and creates a
         (n_samples, sample_dim) array of sample points.
     :param discriminator: Takes a (n_samples, sample_dim) array of sample points, outputs a scalar probability of
         the sample being from the data, as opposed to the generator.
     :return:
     """
     self.generator = generator
     self.discriminator = discriminator
     self.noise_dim = noise_dim
     self.optimizer = optimizer
     self.rng = get_theano_rng(rng)
Пример #12
0
 def __init__(self,
              ws,
              bs=None,
              comp_weight=1e-6,
              optimizer=None,
              layerwise_scales=False,
              parametrization='log',
              hidden_activations='relu',
              output_activation='softmax',
              rng=None):
     """
     Learns how to rescale the units to be an optimal rounding network.
     :param ws: A list of (n_in, n_out) weight matrices
     :param bs: A length of bias vectors (same length as ws)
     :param comp_weight: The weight (lambda in the paper) given to computation
     :param optimizer: The optimizer (an IGradientOptimizer object)
     :param layerwise_scales: Make scales layerwise (as opposed to unitwise)
     :param parametrization: What space to parametrize in ('log', 'direct', or 'softplus')
     :param hidden_activations: Hidden activation functions (as a string, eg 'relu')
     :param output_activation: Output activation function
     :param rng: Random number generator or seed.
     """
     if optimizer is None:
         optimizer = get_named_optimizer('sgd', 0.01)
     if bs is None:
         bs = [np.zeros(w.shape[1]) for w in ws]
     self.ws = [create_shared_variable(w) for w in ws]
     self.bs = [create_shared_variable(b) for b in bs]
     self.comp_weight = tt.constant(comp_weight, dtype=theano.config.floatX)
     self.optimizer = optimizer
     self.hidden_activations = hidden_activations
     self.output_activation = output_activation
     scale_dims = [()] * len(ws) if layerwise_scales else [
         ws[0].shape[0]
     ] + [w.shape[1] for w in ws[:-1]]
     self.k_params = \
         [create_shared_variable(np.ones(d)) for d in scale_dims] if parametrization=='direct' else \
         [create_shared_variable(np.zeros(d)) for d in scale_dims] if parametrization=='log' else \
         [create_shared_variable(np.zeros(d)+np.exp(1)-1) for d in scale_dims] if parametrization=='softplus' else \
         bad_value(parametrization)
     self.parametrization = parametrization
     self.rng = get_theano_rng(rng)
Пример #13
0
 def __init__(self, eta, rng=None):
     """
     :param eta: The learning rate
     """
     self._eta = eta
     self._rng = get_theano_rng(rng)
Пример #14
0
 def __init__(self, size, n_indices, seed=None):
     BaseIndexGenerator.__init__(self, size)
     self._n_indices = n_indices
     self._rng = get_theano_rng(seed)
Пример #15
0
 def __init__(self, size, n_indices, seed = None):
     BaseIndexGenerator.__init__(self, size)
     self._n_indices = n_indices
     self._rng = get_theano_rng(seed)
Пример #16
0
    def get_generation_function(self,
                                maintain_state=True,
                                stochastic=True,
                                rng=None):
        """
        Return a symbolic function that generates a sequence (and updates its internal state).
        :param stochastic: True to sample a onehot-vector from the output.  False to simply reinsert the
            distribution vector.
        :param rng: A seed, numpy or theano random number generator
        :return: A symbolic function of the form:
            (outputs, updates) = generate(primer, n_steps)
        """
        h_init, c_init = self.lstm.get_initial_state()
        x_init = create_shared_variable(0, shape=self.lstm.n_inputs)
        rng = get_theano_rng(rng)

        @symbolic_multi
        def generate(primer, n_steps):
            """
            Generate a sequence of outputs, and update the internal state.

            primer: A sequence to prime on.  This will overwrite the OUTPUT at
                each time step.  Note: this means the first iteration will run
                off the last output from the previous call to generate.
            n_steps: Number of steps (after the primer) to run.
            return: A sequence of length n_steps.
            """
            n_primer_steps = primer.shape[0]
            n_total_steps = n_primer_steps + n_steps

            def do_step(i, x_, h_, c_):
                """
                i: The step number (int)
                x_: An input vector
                h_: A hiddens state vector
                c_: A memory cell vector
                """
                y_prob, h, c = self.step(x_, h_, c_)
                y_candidate = ifelse(
                    int(stochastic),
                    rng.multinomial(n=1, pvals=y_prob[None, :])[0].astype(
                        theano.config.floatX), y_prob)
                # y_candidate = ifelse(int(stochastic), rng.multinomial(n=1, pvals=y_prob.dimshuffle('x', 1))[0].astype(theano.config.floatX), y_prob)
                y = ifelse(
                    i < n_primer_steps, primer[i], y_candidate
                )  # Note: If you get error here, you just need to prime with something on first call.
                return y, h, c

            (x_gen, h_gen, c_gen), updates = theano.scan(
                do_step,
                sequences=[tt.arange(n_total_steps)],
                outputs_info=[x_init, h_init, c_init],
            )

            if maintain_state:
                updates += [(x_init, x_gen[-1]), (h_init, h_gen[-1]),
                            (c_init, c_gen[-1])]

            for var, val in updates.items():
                add_update(var, val)

            return x_gen[n_primer_steps:],

        return generate
Пример #17
0
    def __init__(self,
                 x_dim,
                 z_dim,
                 encoder_hidden_sizes=[100],
                 decoder_hidden_sizes=[100],
                 hidden_activation='tanh',
                 w_init_mag=0.01,
                 binary_data=False,
                 optimizer=AdaMax(alpha=0.01),
                 rng=None,
                 gaussian_min_var=None):
        """
        :param x_dim: Dimensionsality of the data
        :param z_dim: Dimensionalality of the latent space
        :param encoder_hidden_sizes: A list of sizes of each hidden layer in the encoder (from X to Z)
        :param decoder_hidden_sizes: A list of sizes of each hidden layer in the dencoder (from Z to X)
        :param hidden_activation: Activation function for all hidden layers
        :param w_init_mag: Magnitude of initial weights
        :param binary_data: Chose if data is binary.  You can also use this if data is bound in [0, 1] - then we can think
            of it as being the expected value.
        :param optimizer: An IGradientOptimizer object for doing parameter updates
            ... see plato.tools.optimization.optimizers
        :param rng: A random number generator or random seed.
        """
        np_rng = get_rng(rng)

        encoder_layer_sizes = [x_dim] + encoder_hidden_sizes
        self.encoder_hidden_layers = [
            Layer(w_init_mag * np_rng.randn(n_in, n_out),
                  nonlinearity=hidden_activation) for n_in, n_out in zip(
                      encoder_layer_sizes[:-1], encoder_layer_sizes[1:])
        ]
        self.encoder_mean_layer = Layer(
            w_init_mag * np_rng.randn(encoder_layer_sizes[-1], z_dim),
            nonlinearity='linear')
        self.encoder_log_var_layer = Layer(
            w_init_mag * np_rng.randn(encoder_layer_sizes[-1], z_dim),
            nonlinearity='linear')

        decoder_layer_sizes = [z_dim] + decoder_hidden_sizes
        self.decoder_hidden_layers = [
            Layer(w_init_mag * np_rng.randn(n_in, n_out),
                  nonlinearity=hidden_activation) for n_in, n_out in zip(
                      decoder_layer_sizes[:-1], decoder_layer_sizes[1:])
        ]
        if binary_data:
            self.decoder_mean_layer = Layer(
                w_init_mag * np_rng.randn(decoder_layer_sizes[-1], x_dim),
                nonlinearity='sigm')
        else:
            self.decoder_mean_layer = Layer(
                w_init_mag * np_rng.randn(decoder_layer_sizes[-1], x_dim),
                nonlinearity='linear')
            self.decoder_log_var_layer = Layer(
                w_init_mag * np_rng.randn(decoder_layer_sizes[-1], x_dim),
                nonlinearity='linear')

        self.rng = get_theano_rng(np_rng)
        self.binary_data = binary_data
        self.x_size = x_dim
        self.z_size = z_dim
        self.optimizer = optimizer
        self.gaussian_min_var = gaussian_min_var
Пример #18
0
 def __init__(self, w, b_vis, b_hid, rng):
     self.rng = get_theano_rng(rng)
     self.w = create_shared_variable(w)
     self.b_vis = create_shared_variable(b_vis)
     self.b_hid = create_shared_variable(b_hid)
Пример #19
0
 def __init__(self, eta, rng = None):
     """
     :param eta: The learning rate
     """
     self._eta = eta
     self._rng = get_theano_rng(rng)