Example #1
0
 def __init__(self, theano_rng = None, seed=None, input_space=None):
     super(SampleBernoulli, self).__init__()
     assert theano_rng is None or seed is None
     theano_rng = make_theano_rng(theano_rng if theano_rng is not None else seed,
                                  2012+11+22, which_method='binomial')
     self.__dict__.update(locals())
     del self.self
Example #2
0
    def get_monitoring_channels(self, data):
        """
        .. todo::

            WRITEME
        """
        V = data
        theano_rng = make_theano_rng(None, 42, which_method="binomial")

        #TODO: re-enable this in the case where self.transformer
        #is a matrix multiply
        #norms = theano_norms(self.weights)

        H = self.mean_h_given_v(V)

        h = H.mean(axis=0)

        return { 'bias_hid_min' : T.min(self.bias_hid),
                 'bias_hid_mean' : T.mean(self.bias_hid),
                 'bias_hid_max' : T.max(self.bias_hid),
                 'bias_vis_min' : T.min(self.bias_vis),
                 'bias_vis_mean' : T.mean(self.bias_vis),
                 'bias_vis_max': T.max(self.bias_vis),
                 'h_min' : T.min(h),
                 'h_mean': T.mean(h),
                 'h_max' : T.max(h),
                 #'W_min' : T.min(self.weights),
                 #'W_max' : T.max(self.weights),
                 #'W_norms_min' : T.min(norms),
                 #'W_norms_max' : T.max(norms),
                 #'W_norms_mean' : T.mean(norms),
                'reconstruction_error' : self.reconstruction_error(V, theano_rng) }
Example #3
0
    def __init__(self, sigma, mu, seed=42):
        """
        .. todo::

            WRITEME properly
        
        Parameters
        -----------
        sigma: a numpy ndarray of shape (n,n)
        mu: a numpy ndarray of shape (n,)
        seed: the seed for the theano random number generator used to sample from this distribution"""
        self.sigma = sigma
        self.mu = mu
        if not (len(mu.shape) == 1):
            raise Exception('mu has shape ' + str(mu.shape) +
                            ' (it should be a vector)')

        self.sigma_inv = solve(self.sigma,
                               N.identity(mu.shape[0]),
                               sym_pos=True)
        self.L = cholesky(self.sigma)

        self.s_rng = make_theano_rng(seed, which_method='normal')

        #Compute logZ
        #log Z = log 1/( (2pi)^(-k/2) |sigma|^-1/2 )
        # = log 1 - log (2pi^)(-k/2) |sigma|^-1/2
        # = 0 - log (2pi)^(-k/2) - log |sigma|^-1/2
        # = (k/2) * log(2pi) + (1/2) * log |sigma|
        k = float(self.mu.shape[0])
        self.logZ = 0.5 * (k * N.log(2. * N.pi) + N.log(det(sigma)))
Example #4
0
 def __init__(self, num_arms, mean_std=1.0, std_std=1.0):
     self.rng = make_np_rng(None, [2013, 11, 12], which_method="randn")
     self.means = sharedX(self.rng.randn(num_arms) * mean_std)
     self.stds = sharedX(np.abs(self.rng.randn(num_arms) * std_std))
     self.theano_rng = make_theano_rng(None,
                                       self.rng.randint(2**16),
                                       which_method="normal")
Example #5
0
    def __init__(self, sigma, mu, seed=42):
        """
        .. todo::

            WRITEME properly
        
        Parameters
        -----------
        sigma: a numpy ndarray of shape (n,n)
        mu: a numpy ndarray of shape (n,)
        seed: the seed for the theano random number generator used to sample from this distribution"""
        self.sigma = sigma
        self.mu = mu
        if not (len(mu.shape) == 1):
            raise Exception('mu has shape ' + str(mu.shape) +
                            ' (it should be a vector)')

        self.sigma_inv = solve(self.sigma, N.identity(mu.shape[0]),
                               sym_pos=True)
        self.L = cholesky(self.sigma)

        self.s_rng = make_theano_rng(seed, which_method='normal')

        #Compute logZ
        #log Z = log 1/( (2pi)^(-k/2) |sigma|^-1/2 )
        # = log 1 - log (2pi^)(-k/2) |sigma|^-1/2
        # = 0 - log (2pi)^(-k/2) - log |sigma|^-1/2
        # = (k/2) * log(2pi) + (1/2) * log |sigma|
        k = float(self.mu.shape[0])
        self.logZ = 0.5 * (k * N.log(2. * N.pi) + N.log(det(sigma)))
Example #6
0
 def __init__(self, num_chains, num_gibbs_steps, supervised=False,
              toronto_neg=False, theano_rng=None):
     self.__dict__.update(locals())
     del self.self
     self.theano_rng = make_theano_rng(theano_rng, 2012+10+14,
             which_method="binomial")
     assert supervised in [True, False]
Example #7
0
 def __init__(self, dbm):
     super(DBMSampler, self).__init__()
     self.theano_rng = make_theano_rng(None,
                                       2012 + 10 + 14,
                                       which_method="binomial")
     self.dbm = dbm
     assert len(self.dbm.hidden_layers) == 1
Example #8
0
    def __init__(self, init_beta, nvis):
        self.__dict__.update(locals())
        del self.self

        self.beta = sharedX(np.ones((nvis,))*init_beta)
        assert self.beta.ndim == 1

        self.s_rng = make_theano_rng(None, 17, which_method='normal')
Example #9
0
    def __init__(self, init_beta, nvis):
        self.__dict__.update(locals())
        del self.self

        self.beta = sharedX(np.ones((nvis, )) * init_beta)
        assert self.beta.ndim == 1

        self.s_rng = make_theano_rng(None, 17, which_method='normal')
Example #10
0
def rbm_ais_gibbs_for_v(rbmA_params, rbmB_params, beta, v_sample, seed=23098):
    """
    .. todo::

        WRITEME

    Parameters
    ----------
    rbmA_params : list
        Parameters of the baserate model (usually infinite temperature).
        List should be of length 3 and contain numpy.ndarrays
        corresponding to model parameters (weights, visbias, hidbias).

    rbmB_params : list
        Similar to `rbmA_params`, but for model at temperature 1.

    beta : theano.shared
        Scalar, represents inverse temperature at which we wish to sample from.

    v_sample : theano.shared
        Matrix of shape (n_runs, nvis), state of current particles.

    seed : int, optional
        Optional seed parameter for sampling from binomial units.
    """

    (weights_a, visbias_a, hidbias_a) = rbmA_params
    (weights_b, visbias_b, hidbias_b) = rbmB_params

    theano_rng = make_theano_rng(seed, which_method='binomial')

    # equation 15 (Salakhutdinov & Murray 2008)
    ph_a = nnet.sigmoid(
        (1 - beta) * (tensor.dot(v_sample, weights_a) + hidbias_a))
    ha_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_a)),
                                    n=1,
                                    p=ph_a,
                                    dtype=config.floatX)

    # equation 16 (Salakhutdinov & Murray 2008)
    ph_b = nnet.sigmoid(beta * (tensor.dot(v_sample, weights_b) + hidbias_b))
    hb_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_b)),
                                    n=1,
                                    p=ph_b,
                                    dtype=config.floatX)

    # equation 17 (Salakhutdinov & Murray 2008)
    pv_act = (1 - beta) * (tensor.dot(ha_sample, weights_a.T) + visbias_a) + \
             beta * (tensor.dot(hb_sample, weights_b.T) + visbias_b)
    pv = nnet.sigmoid(pv_act)
    new_v_sample = theano_rng.binomial(size=(v_sample.shape[0],
                                             len(visbias_b)),
                                       n=1,
                                       p=pv,
                                       dtype=config.floatX)

    return new_v_sample
Example #11
0
 def __init__(self, theano_rng = None, seed=None,
              input_space=None, corruption_prob=0.1):
     super(RandomizeSNPs, self).__init__()
     assert theano_rng is None or seed is None
     theano_rng = make_theano_rng(theano_rng if theano_rng is not None else seed,
                                  2012+11+22, which_method='binomial')
     self.__dict__.update(locals())
     del self.self
     self.set_fn()
Example #12
0
 def __init__(self, layers, batch_size=None, input_space=None,
              input_source='features', nvis=None, seed=None,
              layer_name=None, **kwargs):
     input_source = self.add_mask_source(input_space, input_source)
     super(RNN, self).__init__(layers, batch_size, input_space,
                               input_source, nvis, seed, layer_name,
                               **kwargs)
     self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
                                       which_method=["normal", "uniform"])
 def __init__(self, dim, radius):
     self.dim = dim
     self.radius = radius
     self.s_rng = make_theano_rng(None, 42, which_method='normal')
     log_C = ((float(self.dim) / 2.) * N.log(N.pi) -
              gammaln(1. + float(self.dim) / 2.))
     self.logZ = N.log(self.dim) + log_C + (self.dim - 1) * N.log(radius)
     assert not N.isnan(self.logZ)
     assert not N.isinf(self.logZ)
 def __init__(self, dim, radius):
     self.dim = dim
     self.radius = radius
     self.s_rng = make_theano_rng(None, 42, which_method='normal')
     log_C = ((float(self.dim) / 2.) * N.log(N.pi) -
              gammaln(1. + float(self.dim) / 2.))
     self.logZ = N.log(self.dim) + log_C + (self.dim - 1) * N.log(radius)
     assert not N.isnan(self.logZ)
     assert not N.isinf(self.logZ)
Example #15
0
 def __init__(self, theano_rng=None, seed=None, input_space=None):
     super(SampleBernoulli, self).__init__()
     assert theano_rng is None or seed is None
     theano_rng = make_theano_rng(
         theano_rng if theano_rng is not None else seed,
         2012 + 11 + 22,
         which_method='binomial')
     self.__dict__.update(locals())
     del self.self
Example #16
0
    def __init__(self, nvis, nhid, act_enc, act_dec, tied_weights=False, irange=1e-3, rng=9001):
        super(Autoencoder, self).__init__()
        assert nvis > 0, "Number of visible units must be non-negative"
        assert nhid > 0, "Number of hidden units must be positive"

        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nhid)

        # Save a few parameters needed for resizing
        self.nhid = nhid
        self.irange = irange
        self.tied_weights = tied_weights
        self.rng = make_np_rng(rng, which_method="randn")
        self._initialize_hidbias()
        if nvis > 0:
            self._initialize_visbias(nvis)
            self._initialize_weights(nvis)
        else:
            self.visbias = None
            self.weights = None

        seed = int(self.rng.randint(2 ** 30))

        # why a theano rng? should we remove it?
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        if tied_weights and self.weights is not None:
            self.w_prime = self.weights.T
        else:
            self._initialize_w_prime(nvis)

        def _resolve_callable(conf, conf_attr):
            """
            .. todo::

                WRITEME
            """
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], "__call__"):
                return conf[conf_attr]
            elif conf[conf_attr] in globals() and hasattr(globals()[conf[conf_attr]], "__call__"):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" % (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), "act_enc")
        self.act_dec = _resolve_callable(locals(), "act_dec")
        self._params = [self.visbias, self.hidbias, self.weights]
        if not self.tied_weights:
            self._params.append(self.w_prime)
Example #17
0
 def __init__(self, layers, batch_size=None, input_space=None,
              input_source='features', nvis=None, seed=None,
              layer_name=None, **kwargs):
     input_source = self.add_mask_source(input_space, input_source)
     self.use_monitoring_channels = kwargs.pop('use_monitoring_channels', 0)
     super(RNN, self).__init__(layers, batch_size, input_space,
                               input_source, nvis, seed, layer_name,
                               **kwargs)
     self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
                                       which_method=["normal", "uniform"])
Example #18
0
    def __init__(self, dbm):
        """
        .. todo::

            WRITEME
        """
        super(DBMSampler, self).__init__()
        self.theano_rng = make_theano_rng(None, 2012+10+14, which_method="binomial")
        self.dbm = dbm
        assert len(self.dbm.hidden_layers) == 1
Example #19
0
def test_gibbs_step_for_v():
    # Just tests that gibbs_step_for_v can be called
    # without crashing

    model = RBM(nvis=2, nhid=3)

    theano_rng = make_theano_rng(17, which_method='binomial')

    X = T.matrix()

    Y = model.gibbs_step_for_v(X, theano_rng)
Example #20
0
def test_gibbs_step_for_v():
    #Just tests that gibbs_step_for_v can be called
    #without crashing (protection against refactoring
    #damage, aren't interpreted languages great?)

    model = RBM(nvis = 2, nhid = 3)

    theano_rng = make_theano_rng(17, which_method='binomial')

    X = T.matrix()

    Y = model.gibbs_step_for_v(X, theano_rng)
Example #21
0
def rbm_ais_gibbs_for_v(rbmA_params, rbmB_params, beta, v_sample, seed=23098):
    """
    .. todo::

        WRITEME

    Parameters
    ----------
    rbmA_params : list
        Parameters of the baserate model (usually infinite temperature). List \
        should be of length 3 and contain numpy.ndarrays corresponding to \
        model parameters (weights, visbias, hidbias).

    rbmB_params : list
        Similar to `rbmA_params`, but for model at temperature 1.

    beta : theano.shared
        Scalar, represents inverse temperature at which we wish to sample from.

    v_sample : theano.shared
        Matrix of shape (n_runs, nvis), state of current particles.

    seed : int
        Optional seed parameter for sampling from binomial units.
    """

    (weights_a, visbias_a, hidbias_a) = rbmA_params
    (weights_b, visbias_b, hidbias_b) = rbmB_params

    theano_rng = make_theano_rng(seed, which_method='binomial')

    # equation 15 (Salakhutdinov & Murray 2008)
    ph_a = nnet.sigmoid((1 - beta) * (tensor.dot(v_sample, weights_a) +
                        hidbias_a))
    ha_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_a)),
                                    n=1, p=ph_a, dtype=config.floatX)

    # equation 16 (Salakhutdinov & Murray 2008)
    ph_b = nnet.sigmoid(beta * (tensor.dot(v_sample, weights_b) + hidbias_b))
    hb_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_b)),
                                    n=1, p=ph_b, dtype=config.floatX)

    # equation 17 (Salakhutdinov & Murray 2008)
    pv_act = (1 - beta) * (tensor.dot(ha_sample, weights_a.T) + visbias_a) + \
             beta * (tensor.dot(hb_sample, weights_b.T) + visbias_b)
    pv = nnet.sigmoid(pv_act)
    new_v_sample = theano_rng.binomial(
        size=(v_sample.shape[0], len(visbias_b)),
        n=1, p=pv, dtype=config.floatX
    )

    return new_v_sample
Example #22
0
    def __init__(self, nsteps, seed=42):
        """
            Parametes
            ---------
            nsteps: int
                number of Markov chain steps for the negative sample
            seed: int
                seed for the random number generator
        """

        super(CDk, self).__init__()
        self.nsteps  = nsteps
        self.rng = make_theano_rng(seed, which_method='binomial')
Example #23
0
    def set_rng(self, rng):
        """
        SSetup the theano random generator for this class.

        Parameters
        ----------
        rng : np.random.RandomState
            Random generator from which to generate the seed of
            the theano random generator
        """
        self.rng = rng
        self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
                                          which_method=["normal", "uniform"])
Example #24
0
    def __init__(self, nsteps, seed=42):
        """
            Parametes
            ---------
            nsteps: int
                number of Markov chain steps for the negative sample
            seed: int
                seed for the random number generator
        """

        super(CDk, self).__init__()
        self.nsteps = nsteps
        self.rng = make_theano_rng(seed, which_method='binomial')
Example #25
0
    def __init__(self, num_chains, num_gibbs_steps, supervised=False, toronto_neg=False, theano_rng=None):
        """
        .. todo::

            WRITEME properly

            toronto_neg: If True, use a bit of mean field in the negative phase
                        Ruslan Salakhutdinov's matlab code does this.
        """
        self.__dict__.update(locals())
        del self.self
        self.theano_rng = make_theano_rng(theano_rng, 2012 + 10 + 14, which_method="binomial")
        assert supervised in [True, False]
Example #26
0
def test_theano_rng():
    """
        Tests that the four possible ways of creating
        a theano RNG give the same results with the same seed
    """

    rngs = [make_theano_rng(rng_or_seed=42, which_method='uniform'),
            make_theano_rng(rng_or_seed=RandomStreams(42),
                            which_method='uniform'),
            make_theano_rng(default_seed=42),
            make_theano_rng()]

    functions = [theano.function([], rng.uniform(size=(100,)))
                 for rng in rngs]

    random_numbers = functions[0]()
    equals = numpy.ones((100,))
    for function in functions[1:]:
        equal = random_numbers == function()
        equals *= equal

    assert equals.all()
Example #27
0
    def set_rng(self, rng):
        """
        SSetup the theano random generator for this class.

        Parameters
        ----------
        rng : np.random.RandomState
            Random generator from which to generate the seed of
            the theano random generator
        """
        self.rng = rng
        self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
                                          which_method=["normal", "uniform"])
Example #28
0
 def __init__(self,
              theano_rng=None,
              seed=None,
              input_space=None,
              corruption_prob=0.1):
     super(RandomizeSNPs, self).__init__()
     assert theano_rng is None or seed is None
     theano_rng = make_theano_rng(
         theano_rng if theano_rng is not None else seed,
         2012 + 11 + 22,
         which_method='binomial')
     self.__dict__.update(locals())
     del self.self
     self.set_fn()
Example #29
0
def test_theano_rng():
    """
        Tests that the four possible ways of creating
        a theano RNG give the same results with the same seed
    """

    rngs = [
        make_theano_rng(rng_or_seed=42, which_method='uniform'),
        make_theano_rng(rng_or_seed=RandomStreams(42), which_method='uniform'),
        make_theano_rng(default_seed=42),
        make_theano_rng()
    ]

    functions = [
        theano.function([], rng.uniform(size=(100, ))) for rng in rngs
    ]

    random_numbers = functions[0]()
    equals = numpy.ones((100, ))
    for function in functions[1:]:
        equal = random_numbers == function()
        equals *= equal

    assert equals.all()
Example #30
0
    def set_vae(self, vae):
        """
        Assigns this `Prior` instance to a VAE.

        Parameters
        ----------
        vae : pylearn2.models.vae.VAE
            VAE to assign to
        """
        if self.get_vae() is not None:
            raise RuntimeError("this `Prior` instance already belongs to "
                               "another VAE")
        self.vae = vae
        self.rng = self.vae.rng
        self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
                                          which_method=["normal", "uniform"])
        self.batch_size = vae.batch_size
Example #31
0
    def set_vae(self, vae):
        """
        Assigns this `Prior` instance to a VAE.

        Parameters
        ----------
        vae : pylearn2.models.vae.VAE
            VAE to assign to
        """
        if self.get_vae() is not None:
            raise RuntimeError("this `Prior` instance already belongs to "
                               "another VAE")
        self.vae = vae
        self.rng = self.vae.rng
        self.theano_rng = make_theano_rng(int(self.rng.randint(2**30)),
                                          which_method=["normal", "uniform"])
        self.batch_size = vae.batch_size
Example #32
0
    def __init__(self, init_beta, nvis):
        """
        .. todo::

            WRITEME properly

        A conditional distribution that adds
        gaussian noise with diagonal precision
        matrix beta to another variable that it
        conditions on
        """

        self.__dict__.update(locals())
        del self.self

        self.beta = sharedX(np.ones((nvis, )) * init_beta)
        assert self.beta.ndim == 1

        self.s_rng = make_theano_rng(None, 17, which_method='normal')
Example #33
0
    def __init__(self, init_beta, nvis):
        """
        .. todo::

            WRITEME properly

        A conditional distribution that adds
        gaussian noise with diagonal precision
        matrix beta to another variable that it
        conditions on
        """

        self.__dict__.update(locals())
        del self.self

        self.beta = sharedX(np.ones((nvis,))*init_beta)
        assert self.beta.ndim == 1

        self.s_rng = make_theano_rng(None, 17, which_method='normal')
Example #34
0
    def __init__(self,
                 num_chains,
                 num_gibbs_steps,
                 supervised=False,
                 toronto_neg=False,
                 theano_rng=None):
        """
        .. todo::

            WRITEME properly

            toronto_neg: If True, use a bit of mean field in the negative phase
                        Ruslan Salakhutdinov's matlab code does this.
        """
        self.__dict__.update(locals())
        del self.self
        self.theano_rng = make_theano_rng(theano_rng,
                                          2012 + 10 + 14,
                                          which_method="binomial")
        assert supervised in [True, False]
Example #35
0
    def __init__(self, sigma, mu, seed=42):
        self.sigma = sigma
        self.mu = mu
        if not (len(mu.shape) == 1):
            raise Exception('mu has shape ' + str(mu.shape) +
                            ' (it should be a vector)')

        self.sigma_inv = solve(self.sigma, N.identity(mu.shape[0]),
                               sym_pos=True)
        self.L = cholesky(self.sigma)

        self.s_rng = make_theano_rng(seed, which_method='normal')

        #Compute logZ
        #log Z = log 1/( (2pi)^(-k/2) |sigma|^-1/2 )
        # = log 1 - log (2pi^)(-k/2) |sigma|^-1/2
        # = 0 - log (2pi)^(-k/2) - log |sigma|^-1/2
        # = (k/2) * log(2pi) + (1/2) * log |sigma|
        k = float(self.mu.shape[0])
        self.logZ = 0.5 * (k * N.log(2. * N.pi) + N.log(det(sigma)))
Example #36
0
    def __init__(self, sigma, mu, seed=42):
        self.sigma = sigma
        self.mu = mu
        if not (len(mu.shape) == 1):
            raise Exception('mu has shape ' + str(mu.shape) +
                            ' (it should be a vector)')

        self.sigma_inv = solve(self.sigma,
                               N.identity(mu.shape[0]),
                               sym_pos=True)
        self.L = cholesky(self.sigma)

        self.s_rng = make_theano_rng(seed, which_method='normal')

        #Compute logZ
        #log Z = log 1/( (2pi)^(-k/2) |sigma|^-1/2 )
        # = log 1 - log (2pi^)(-k/2) |sigma|^-1/2
        # = 0 - log (2pi)^(-k/2) - log |sigma|^-1/2
        # = (k/2) * log(2pi) + (1/2) * log |sigma|
        k = float(self.mu.shape[0])
        self.logZ = 0.5 * (k * N.log(2. * N.pi) + N.log(det(sigma)))
Example #37
0
    def get_decide_func(self):
        """
        Returns a theano function that takes a minibatch
        (num_examples, num_features) of contexts and returns
        a minibatch (num_examples, num_classes) of one-hot codes
        for actions.
        """

        X = T.matrix()
        y_hat = self.mlp.fprop(X)

        theano_rng = make_theano_rng(None,
                                     2013 + 11 + 20,
                                     which_method="multinomial")
        if self.stochastic:
            a = theano_rng.multinomial(pvals=y_hat, dtype='float32')
        else:
            mx = T.max(y_hat, axis=1).dimshuffle(0, 'x')
            a = T.eq(y_hat, mx)

        if self.epsilon is not None:
            a = theano_rng.multinomial(
                pvals=(1. - self.epsilon) * a +
                self.epsilon * T.ones_like(y_hat) / y_hat.shape[1],
                dtype='float32')

        if self.epsilon_stochastic is not None:
            a = theano_rng.multinomial(
                pvals=(1. - self.epsilon_stochastic) * a +
                self.epsilon_stochastic * y_hat,
                dtype='float32')

        logger.info("Compiling classifier agent learning function")
        t1 = time.time()
        f = function([X], a)
        t2 = time.time()

        logger.info("...done, took {0}".format(t2 - t1))

        return f
Example #38
0
    def __init__(self, rbm, particles, rng):
        """
        Construct a Sampler.

        Parameters
        ----------
        rbm : object
            An instance of `RBM` or a derived class, or one implementing \
            the `gibbs_step_for_v` interface.
        particles : numpy.ndarray
            An initial state for the set of persistent Narkov chain particles \
            that will be updated at every step of learning.
        rng : RandomState object
            NumPy random number generator object used to initialize a \
            RandomStreams object used in training.
        """
        self.__dict__.update(rbm=rbm)

        rng = make_np_rng(rng, which_method="randn")
        seed = int(rng.randint(2 ** 30))
        self.s_rng = make_theano_rng(seed, which_method="binomial")
        self.particles = sharedX(particles, name='particles')
Example #39
0
    def redo_everything(self):
        """
        .. todo::

            WRITEME properly

        compiles learn_func if necessary
        makes new negative chains
        does not reset weights or biases
        TODO: figure out how to make the semantics of this cleaner / more in line with other models
        """

        #compile learn_func if necessary
        if self.autonomous:
            self.redo_theano()

        #make the negative chains
        if not self.use_cd:
            self.V_chains = self.make_chains(self.bias_vis)
            self.V_chains.name = 'dbm_V_chains'

            self.H_chains = [ self.make_chains(bias_hid) for bias_hid in self.bias_hid ]
            for i, H_chain in enumerate(self.H_chains):
                H_chain.name = 'dbm_H[%d]_chain' % i

            if self.num_classes > 0:
                P = np.zeros((self.negative_chains, self.num_classes)) \
                        + T.nnet.softmax( self.bias_class )
                temp_theano_rng = make_theano_rng(87, which_method='multinomial')
                sample_from = Sampler(temp_theano_rng, 'multinomial')
                values = function([],sample_from(P))()
                self.Y_chains = sharedX(values, 'Y_chains')
            else:
                self.Y_chains = None

        if hasattr(self, 'init_beta') and self.init_beta is not None:
            self.beta = sharedX( np.zeros( self.bias_vis.get_value().shape) + self.init_beta, name = 'beta')
Example #40
0
    def get_monitoring_channels(self, data):
        """
        .. todo::

            WRITEME
        """
        V = data
        theano_rng = make_theano_rng(None, 42, which_method="binomial")

        H = self.mean_h_given_v(V)

        h = H.mean(axis=0)

        return { 'bias_hid_min' : T.min(self.bias_hid),
                 'bias_hid_mean' : T.mean(self.bias_hid),
                 'bias_hid_max' : T.max(self.bias_hid),
                 'bias_vis_min' : T.min(self.bias_vis),
                 'bias_vis_mean' : T.mean(self.bias_vis),
                 'bias_vis_max': T.max(self.bias_vis),
                 'h_min' : T.min(h),
                 'h_mean': T.mean(h),
                 'h_max' : T.max(h),
                'reconstruction_error' : self.reconstruction_error(V,
                    theano_rng) }
Example #41
0
    def get_decide_func(self):
        """
        Returns a theano function that takes a minibatch
        (num_examples, num_features) of contexts and returns
        a minibatch (num_examples, num_classes) of one-hot codes
        for actions.
        """

        X = T.matrix()
        y_hat = self.mlp.fprop(X)

        theano_rng = make_theano_rng(None, 2013+11+20, which_method="multinomial")
        if self.stochastic:
            a = theano_rng.multinomial(pvals=y_hat, dtype='float32')
        else:
            mx = T.max(y_hat, axis=1).dimshuffle(0, 'x')
            a = T.eq(y_hat, mx)

        if self.epsilon is not None:
            a = theano_rng.multinomial(pvals = (1. - self.epsilon) * a +
                    self.epsilon * T.ones_like(y_hat) / y_hat.shape[1],
                    dtype = 'float32')

        if self.epsilon_stochastic is not None:
            a = theano_rng.multinomial(pvals = (1. - self.epsilon_stochastic) * a +
                    self.epsilon_stochastic * y_hat,
                    dtype = 'float32')

        print "Compiling classifier agent learning function"
        t1 = time.time()
        f = function([X], a)
        t2 = time.time()

        print "...done, took", t2 - t1

        return f
Example #42
0
    def __call__(self, X, Y=None, X_space=None):
        """
        .. todo::

            WRITEME

        Note that calling this repeatedly will yield the same random numbers each time.
        """
        assert X_space is not None
        self.called = True
        assert X.dtype == config.floatX
        theano_rng = make_theano_rng(getattr(self, 'seed', None),
                                     default_seed,
                                     which_method="binomial")

        if X.ndim == 2 and self.sync_channels:
            raise NotImplementedError()

        p = self.drop_prob

        if not hasattr(self, 'drop_prob_y') or self.drop_prob_y is None:
            yp = p
        else:
            yp = self.drop_prob_y

        batch_size = X_space.batch_size(X)

        if self.balance:
            flip = theano_rng.binomial(size=(batch_size, ),
                                       p=0.5,
                                       n=1,
                                       dtype=X.dtype)

            yp = flip * (1 - p) + (1 - flip) * p

            dimshuffle_args = ['x'] * X.ndim

            if X.ndim == 2:
                dimshuffle_args[0] = 0
                assert not self.sync_channels
            else:
                dimshuffle_args[X_space.axes.index('b')] = 0
                if self.sync_channels:
                    del dimshuffle_args[X_space.axes.index('c')]

            flip = flip.dimshuffle(*dimshuffle_args)

            p = flip * (1 - p) + (1 - flip) * p

        #size needs to have a fixed length at compile time or the
        #theano random number generator will be angry
        size = tuple([X.shape[i] for i in xrange(X.ndim)])
        if self.sync_channels:
            del size[X_space.axes.index('c')]

        drop_mask = theano_rng.binomial(size=size, p=p, n=1, dtype=X.dtype)

        X_name = make_name(X, 'anon_X')
        drop_mask.name = 'drop_mask(%s)' % X_name

        if Y is not None:
            assert isinstance(yp, float) or yp.ndim < 2
            drop_mask_Y = theano_rng.binomial(size=(batch_size, ),
                                              p=yp,
                                              n=1,
                                              dtype=X.dtype)
            assert drop_mask_Y.ndim == 1
            Y_name = make_name(Y, 'anon_Y')
            drop_mask_Y.name = 'drop_mask_Y(%s)' % Y_name
            #drop_mask = Print('drop_mask',attrs=['sum'])(drop_mask)
            #drop_mask_Y = Print('drop_mask_Y',attrs=['sum'])(drop_mask_Y)
            return drop_mask, drop_mask_Y

        return drop_mask
Example #43
0
    def get_fixed_var_descr(self, model, data):
        """
        .. todo::

            WRITEME
        """

        X, Y = data

        assert Y is not None

        batch_size = model.batch_size

        drop_mask_X = sharedX(
            model.get_input_space().get_origin_batch(batch_size))
        drop_mask_X.name = 'drop_mask'

        X_space = model.get_input_space()

        updates = OrderedDict()
        rval = FixedVarDescr()
        inputs = [X, Y]

        if not self.supervised:
            update_X = self.mask_gen(X, X_space=X_space)
        else:
            drop_mask_Y = sharedX(np.ones(batch_size, ))
            drop_mask_Y.name = 'drop_mask_Y'
            update_X, update_Y = self.mask_gen(X, Y, X_space)
            updates[drop_mask_Y] = update_Y
            rval.fixed_vars['drop_mask_Y'] = drop_mask_Y
        if self.mask_gen.sync_channels:
            n = update_X.ndim
            assert n == drop_mask_X.ndim - 1
            update_X.name = 'raw_update_X'
            zeros_like_X = T.zeros_like(X)
            zeros_like_X.name = 'zeros_like_X'
            update_X = zeros_like_X + update_X.dimshuffle(0, 1, 2, 'x')
            update_X.name = 'update_X'
        updates[drop_mask_X] = update_X

        rval.fixed_vars['drop_mask'] = drop_mask_X

        if hasattr(model.inference_procedure, 'V_dropout'):
            include_prob = model.inference_procedure.include_prob
            include_prob_V = model.inference_procedure.include_prob_V
            include_prob_Y = model.inference_procedure.include_prob_Y

            theano_rng = make_theano_rng(None,
                                         2012 + 10 + 20,
                                         which_method="binomial")
            for elem in flatten([model.inference_procedure.V_dropout]):
                updates[elem] = theano_rng.binomial(
                    p=include_prob_V, size=elem.shape, dtype=elem.dtype,
                    n=1) / include_prob_V
            if "Softmax" in str(type(model.hidden_layers[-1])):
                hid = model.inference_procedure.H_dropout[:-1]
                y = model.inference_procedure.H_dropout[-1]
                updates[y] = theano_rng.binomial(
                    p=include_prob_Y, size=y.shape, dtype=y.dtype,
                    n=1) / include_prob_Y
            else:
                hid = model.inference_procedure.H_dropout
            for elem in flatten(hid):
                updates[elem] = theano_rng.binomial(
                    p=include_prob, size=elem.shape, dtype=elem.dtype,
                    n=1) / include_prob

        rval.on_load_batch = [utils.function(inputs, updates=updates)]

        return rval
Example #44
0
    def __init__(self,
                 nvis,
                 nhid,
                 act_enc,
                 act_dec,
                 tied_weights=False,
                 irange=1e-3,
                 rng=9001):
        super(Autoencoder, self).__init__()
        assert nvis > 0, "Number of visible units must be non-negative"
        assert nhid > 0, "Number of hidden units must be positive"

        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nhid)

        # Save a few parameters needed for resizing
        self.nhid = nhid
        self.irange = irange
        self.tied_weights = tied_weights
        self.rng = make_np_rng(rng, which_method="randn")
        self._initialize_hidbias()
        if nvis > 0:
            self._initialize_visbias(nvis)
            self._initialize_weights(nvis)
        else:
            self.visbias = None
            self.weights = None

        seed = int(self.rng.randint(2**30))

        # why a theano rng? should we remove it?
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        if tied_weights and self.weights is not None:
            self.w_prime = self.weights.T
        else:
            self._initialize_w_prime(nvis)

        def _resolve_callable(conf, conf_attr):
            """
            .. todo::

                WRITEME
            """
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], '__call__'):
                return conf[conf_attr]
            elif (conf[conf_attr] in globals()
                  and hasattr(globals()[conf[conf_attr]], '__call__')):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" %
                                 (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), 'act_enc')
        self.act_dec = _resolve_callable(locals(), 'act_dec')
        self._params = [
            self.visbias,
            self.hidbias,
            self.weights,
        ]
        if not self.tied_weights:
            self._params.append(self.w_prime)
    def __init__(
        self,
        nmap,
        input_space=None,
        nvisx=None,
        nvisy=None,
        input_source=("featuresX", "featuresY"),
        act_enc=None,
        act_dec=None,
        irange=1e-3,
        rng=9001,
    ):
        Block.__init__(self)
        Model.__init__(self)
        assert nmap > 0, "Number of mapping units must be positive"

        if nvisx is not None and nvisy is not None or input_space is not None:
            if nvisx is not None and nvisy is not None:
                assert nvisx > 0, "Number of visx units must be non-negative"
                assert nvisy > 0, "Number of visy units must be non-negative"
                input_space = CompositeSpace([VectorSpace(nvisx), VectorSpace(nvisy)])
                self.nvisx = nvisx
                self.nvisy = nvisy
            elif isinstance(input_space.components[0], Conv2DSpace):
                rx, cx = input_space.components[0].shape
                chx = input_space.components[0].num_channels
                ry, cy = input_space.components[1].shape
                chy = input_space.components[1].num_channels
                self.nvisx = rx * cx * chx
                self.nvisy = ry * cy * chy
            else:
                raise NotImplementedError(str(type(self)) + " does not support that input_space.")
        # Check whether the input_space and input_source structures match
        try:
            DataSpecsMapping((input_space, input_source))
        except ValueError:
            raise ValueError(
                "The structures of `input_space`, %s, and "
                "`input_source`, %s do not match. If you "
                "specified a CompositeSpace as an input, "
                "be sure to specify the data sources as well." % (input_space, input_source)
            )

        self.input_space = input_space
        self.input_source = input_source
        self.nmap = nmap
        self.output_space = VectorSpace(self.nmap)
        self._initialize_visbiasX(self.nvisx)  # self.visbiasX
        self._initialize_visbiasY(self.nvisy)  # self.visbiasY
        self._initialize_mapbias()  # self.mapbias
        self.irange = irange
        self.rng = make_np_rng(rng, which_method="randn")
        seed = int(self.rng.randint(2 ** 30))
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        def _resolve_callable(conf, conf_attr):
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], "__call__"):
                return conf[conf_attr]
            elif conf[conf_attr] in globals() and hasattr(globals()[conf[conf_attr]], "__call__"):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" % (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), "act_enc")
        self.act_dec = _resolve_callable(locals(), "act_dec")
Example #46
0
 def __init__(self, nsteps, seed=42):
     super(CDk, self).__init__()
     self.nsteps = nsteps
     self.rng = make_theano_rng(seed, which_method='binomial')
Example #47
0
    def __init__(self, nvis, nhid, act_enc, act_dec,
                 tied_weights=False, irange=1e-3, rng=9001):
        """
        Allocate an autoencoder object.

        Parameters
        ----------
        nvis : int
            Number of visible units (input dimensions) in this model. \
            A value of 0 indicates that this block will be left partially \
            initialized until later (e.g., when the dataset is loaded and \
            its dimensionality is known).  Note: There is currently a bug \
            when nvis is set to 0. For now, you should not set nvis to 0.
        nhid : int
            Number of hidden units in this model.
        act_enc : callable or string
            Activation function (elementwise nonlinearity) to use for the \
            encoder. Strings (e.g. 'tanh' or 'sigmoid') will be looked up as \
            functions in `theano.tensor.nnet` and `theano.tensor`. Use `None` \
            for linear units.
        act_dec : callable or string
            Activation function (elementwise nonlinearity) to use for the \
            decoder. Strings (e.g. 'tanh' or 'sigmoid') will be looked up as \
            functions in `theano.tensor.nnet` and `theano.tensor`. Use `None` \
            for linear units.
        tied_weights : bool, optional
            If `False` (default), a separate set of weights will be allocated \
            (and learned) for the encoder and the decoder function. If \
            `True`, the decoder weight matrix will be constrained to be equal \
            to the transpose of the encoder weight matrix.
        irange : float, optional
            Width of the initial range around 0 from which to sample initial \
            values for the weights.
        rng : RandomState object or seed
            NumPy random number generator object (or seed to create one) used \
            to initialize the model parameters.
        """
        super(Autoencoder, self).__init__()
        assert nvis > 0, "Number of visible units must be non-negative"
        assert nhid > 0, "Number of hidden units must be positive"

        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nhid)

        # Save a few parameters needed for resizing
        self.nhid = nhid
        self.irange = irange
        self.tied_weights = tied_weights
        self.rng = make_np_rng(rng, which_method="randn")
        self._initialize_hidbias()
        if nvis > 0:
            self._initialize_visbias(nvis)
            self._initialize_weights(nvis)
        else:
            self.visbias = None
            self.weights = None

        seed = int(self.rng.randint(2 ** 30))

        # why a theano rng? should we remove it?
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        if tied_weights and self.weights is not None:
            self.w_prime = self.weights.T
        else:
            self._initialize_w_prime(nvis)

        def _resolve_callable(conf, conf_attr):
            """
            .. todo::

                WRITEME
            """
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], '__call__'):
                return conf[conf_attr]
            elif (conf[conf_attr] in globals()
                  and hasattr(globals()[conf[conf_attr]], '__call__')):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" %
                                    (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), 'act_enc')
        self.act_dec = _resolve_callable(locals(), 'act_dec')
        self._params = [
            self.visbias,
            self.hidbias,
            self.weights,
        ]
        if not self.tied_weights:
            self._params.append(self.w_prime)
Example #48
0
    def get_fixed_var_descr(self, model, data):
        """
        .. todo::

            WRITEME
        """

        X, Y = data

        assert Y is not None

        batch_size = model.batch_size

        drop_mask_X = sharedX(
                model.get_input_space().get_origin_batch(batch_size))
        drop_mask_X.name = 'drop_mask'

        X_space = model.get_input_space()

        updates = OrderedDict()
        rval = FixedVarDescr()
        inputs=[X, Y]

        if not self.supervised:
            update_X = self.mask_gen(X, X_space = X_space)
        else:
            drop_mask_Y = sharedX(np.ones(batch_size,))
            drop_mask_Y.name = 'drop_mask_Y'
            update_X, update_Y = self.mask_gen(X, Y, X_space)
            updates[drop_mask_Y] = update_Y
            rval.fixed_vars['drop_mask_Y'] =  drop_mask_Y
        if self.mask_gen.sync_channels:
            n = update_X.ndim
            assert n == drop_mask_X.ndim - 1
            update_X.name = 'raw_update_X'
            zeros_like_X = T.zeros_like(X)
            zeros_like_X.name = 'zeros_like_X'
            update_X = zeros_like_X + update_X.dimshuffle(0,1,2,'x')
            update_X.name = 'update_X'
        updates[drop_mask_X] = update_X

        rval.fixed_vars['drop_mask'] = drop_mask_X

        if hasattr(model.inference_procedure, 'V_dropout'):
            include_prob = model.inference_procedure.include_prob
            include_prob_V = model.inference_procedure.include_prob_V
            include_prob_Y = model.inference_procedure.include_prob_Y

            theano_rng = make_theano_rng(None, 2012+10+20,
                    which_method="binomial")
            for elem in flatten([model.inference_procedure.V_dropout]):
                updates[elem] = theano_rng.binomial(p=include_prob_V,
                        size=elem.shape, dtype=elem.dtype, n=1) / \
                                include_prob_V
            if "Softmax" in str(type(model.hidden_layers[-1])):
                hid = model.inference_procedure.H_dropout[:-1]
                y = model.inference_procedure.H_dropout[-1]
                updates[y] = theano_rng.binomial(p=include_prob_Y,
                        size=y.shape, dtype=y.dtype, n=1) / include_prob_Y
            else:
                hid = model.inference_procedure.H_dropout
            for elem in flatten(hid):
                updates[elem] =  theano_rng.binomial(p=include_prob,
                        size=elem.shape, dtype=elem.dtype, n=1) / include_prob

        rval.on_load_batch = [utils.function(inputs, updates=updates)]

        return rval
Example #49
0
 def __init__(self, nsteps, seed=42):
     super(CDk, self).__init__()
     self.nsteps  = nsteps
     self.rng = make_theano_rng(seed, which_method='binomial')
Example #50
0
def stochastic_max_pool_bc01(bc01, pool_shape, pool_stride, image_shape, rng = None):
    """
    .. todo::

        WRITEME properly

    Stochastic max pooling for training as defined in:

    Stochastic Pooling for Regularization of Deep Convolutional Neural Networks
    Matthew D. Zeiler, Rob Fergus

    Parameters
    ----------
    bc01 : theano 4-tensor
        in format (batch size, channels, rows, cols),
        IMPORTANT: All values should be positive
    pool_shape : tuple
        shape of the pool region (rows, cols)
    pool_stride : tuple
        strides between pooling regions (row stride, col stride)
    image_shape : tuple
        avoid doing some of the arithmetic in theano
    rng : theano random stream
    """
    r, c = image_shape
    pr, pc = pool_shape
    rs, cs = pool_stride

    batch = bc01.shape[0]
    channel = bc01.shape[1]

    rng = make_theano_rng(rng, 2022, which_method='multinomial')

    # Compute index in pooled space of last needed pool
    # (needed = each input pixel must appear in at least one pool)
    def last_pool(im_shp, p_shp, p_strd):
        rval = int(numpy.ceil(float(im_shp - p_shp) / p_strd))
        assert p_strd * rval + p_shp >= im_shp
        assert p_strd * (rval - 1) + p_shp < im_shp
        return rval
    # Compute starting row of the last pool
    last_pool_r = last_pool(image_shape[0] ,pool_shape[0], pool_stride[0]) * pool_stride[0]
    # Compute number of rows needed in image for all indexes to work out
    required_r = last_pool_r + pr

    last_pool_c = last_pool(image_shape[1] ,pool_shape[1], pool_stride[1]) * pool_stride[1]
    required_c = last_pool_c + pc

    # final result shape
    res_r = int(numpy.floor(last_pool_r/rs)) + 1
    res_c = int(numpy.floor(last_pool_c/cs)) + 1

    for bc01v in get_debug_values(bc01):
        assert not contains_inf(bc01v)
        assert bc01v.shape[2] == image_shape[0]
        assert bc01v.shape[3] == image_shape[1]

    # padding
    padded = tensor.alloc(0.0, batch, channel, required_r, required_c)
    name = bc01.name
    if name is None:
        name = 'anon_bc01'
    bc01 = tensor.set_subtensor(padded[:,:, 0:r, 0:c], bc01)
    bc01.name = 'zero_padded_' + name

    # unraveling
    window = tensor.alloc(0.0, batch, channel, res_r, res_c, pr, pc)
    window.name = 'unravlled_winodows_' + name

    for row_within_pool in xrange(pool_shape[0]):
        row_stop = last_pool_r + row_within_pool + 1
        for col_within_pool in xrange(pool_shape[1]):
            col_stop = last_pool_c + col_within_pool + 1
            win_cell = bc01[:,:,row_within_pool:row_stop:rs, col_within_pool:col_stop:cs]
            window  =  tensor.set_subtensor(window[:,:,:,:, row_within_pool, col_within_pool], win_cell)

    # find the norm
    norm = window.sum(axis = [4, 5])
    norm = tensor.switch(tensor.eq(norm, 0.0), 1.0, norm)
    norm = window / norm.dimshuffle(0, 1, 2, 3, 'x', 'x')
    # get prob
    prob = rng.multinomial(pvals = norm.reshape((batch * channel * res_r * res_c, pr * pc)), dtype='float32')
    # select
    res = (window * prob.reshape((batch, channel, res_r, res_c,  pr, pc))).max(axis=5).max(axis=4)
    res.name = 'pooled_' + name

    return tensor.cast(res, theano.config.floatX)
Example #51
0
 def __init__(self, num_arms, mean_std = 1.0, std_std = 1.0):
     self.rng = make_np_rng(None, [2013, 11, 12], which_method="randn")
     self.means = sharedX(self.rng.randn(num_arms) * mean_std)
     self.stds = sharedX(np.abs(self.rng.randn(num_arms) * std_std))
     self.theano_rng = make_theano_rng(None, self.rng.randint(2 ** 16), which_method="normal")
Example #52
0
    def __call__(self, X, Y = None, X_space=None):
        """
        Provides the mask for multi-prediction training. A 1 in the mask
        corresponds to a variable that should be used as an input to the
        inference process. A 0 corresponds to a variable that should be
        used as a prediction target of the multi-prediction training
        criterion.

        Parameters
        ----------
        X : Variable
            A batch of input features to mask for multi-prediction training
        Y : Variable
            A batch of input class labels to mask for multi-prediction
            Training

        Returns
        -------
        drop_mask : Variable
            A Theano expression for a random binary mask in the same shape as
            `X`
        drop_mask_Y : Variable, only returned if `Y` is not None
            A Theano expression for a random binary mask in the same shape as
            `Y`

        Notes
        -----
        Calling this repeatedly will yield the same random numbers each time.
        """
        assert X_space is not None
        self.called = True
        assert X.dtype == config.floatX
        theano_rng = make_theano_rng(getattr(self, 'seed', None), default_seed,
                                     which_method="binomial")

        if X.ndim == 2 and self.sync_channels:
            raise NotImplementedError()

        p = self.drop_prob

        if not hasattr(self, 'drop_prob_y') or self.drop_prob_y is None:
            yp = p
        else:
            yp = self.drop_prob_y

        batch_size = X_space.batch_size(X)

        if self.balance:
            flip = theano_rng.binomial(
                    size = (batch_size,),
                    p = 0.5,
                    n = 1,
                    dtype = X.dtype)

            yp = flip * (1-p) + (1-flip) * p

            dimshuffle_args = ['x'] * X.ndim

            if X.ndim == 2:
                dimshuffle_args[0] = 0
                assert not self.sync_channels
            else:
                dimshuffle_args[X_space.axes.index('b')] = 0
                if self.sync_channels:
                    del dimshuffle_args[X_space.axes.index('c')]

            flip = flip.dimshuffle(*dimshuffle_args)

            p = flip * (1-p) + (1-flip) * p

        # size needs to have a fixed length at compile time or the
        # theano random number generator will be angry
        size = tuple([ X.shape[i] for i in xrange(X.ndim) ])
        if self.sync_channels:
            del size[X_space.axes.index('c')]

        drop_mask = theano_rng.binomial(
                    size = size,
                    p = p,
                    n = 1,
                    dtype = X.dtype)

        X_name = make_name(X, 'anon_X')
        drop_mask.name = 'drop_mask(%s)' % X_name

        if Y is not None:
            assert isinstance(yp, float) or yp.ndim < 2
            drop_mask_Y = theano_rng.binomial(
                    size = (batch_size, ),
                    p = yp,
                    n = 1,
                    dtype = X.dtype)
            assert drop_mask_Y.ndim == 1
            Y_name = make_name(Y, 'anon_Y')
            drop_mask_Y.name = 'drop_mask_Y(%s)' % Y_name
            return drop_mask, drop_mask_Y

        return drop_mask
Example #53
0
    def test_rejective_sample(self, components=10, batch_size=1000):
        if self.mri is None:
            self.mri = self.test_build(components=components)
        mri = self.mri

        A = mri.A
        S = mri.S

        # Get the data info from the mri_gen class
        data_path, label_path = mri.resolve_dataset(mri.which_set,
                                                    mri.dataset_name)

        # Balance the classes
        y = np.atleast_2d(np.load(label_path)).T
        num_classes = np.amax(y) + 1
        class_counts = [(y == i).sum() for i in range(num_classes)]
        min_count = min(class_counts)
        balanced_idx = []
        for i in range(num_classes):
            idx = [idx for idx, j in enumerate(y) if j == i][:min_count]
            balanced_idx += idx
        balanced_idx.sort()
        assert len(balanced_idx) / min_count == num_classes
        assert len(balanced_idx) % min_count == 0

        y = y[balanced_idx]
        for i in range(num_classes):
            assert (y == i).sum() == min_count

        idx0 = [i for i, j in enumerate(y) if j == 0]
        idx1 = [i for i, j in enumerate(y) if j == 1]
        print idx0
        print idx1

        model0 = [np.histogram(a, density=True) for a in A[idx0].T]
        model1 = [np.histogram(a, density=True) for a in A[idx1].T]
        print model0
        model_complete = [np.histogram(a, density=True) for a in A.T]

        tr1 = make_theano_rng(100, which_method="uniform")
        tr2 = make_theano_rng(100, which_method="uniform")

        generator = MRI_generation.MRI_Generator(A,
                                                 S,
                                                 y,
                                                 np.zeros((mri.X.shape[1], )),
                                                 theano_rng=tr1)

        X = mri.X[:batch_size]

        assert np.all(generator.y == y), exp_act_str(y, generator.y)

        assert np.all(generator.hist_set0.eval() == [h for h, e in model0]), (
            exp_act_str(np.array([h for h, e in model0]),
                        np.array(generator.hist_set0.eval())))
        assert np.all(generator.hist_set1.eval() == [h for h, e in model1]), (
            exp_act_str(np.array([h for h, e in model1]),
                        np.array(generator.hist_set1.eval())))
        assert np.all(generator.edge_set0.eval() == [e for h, e in model0]), (
            exp_act_str(np.array([e for h, e in model0]),
                        np.array(generator.edge_set0.eval())))
        assert np.all(generator.edge_set1.eval() == [e for h, e in model1]), (
            exp_act_str(np.array([e for h, e in model1]),
                        np.array(generator.edge_set1.eval())))

        edges = [e for h, e in model0][0]
        h = [h for h, e in model0][0]

        tr1 = make_theano_rng(100, which_method="uniform")
        tr2 = make_theano_rng(100, which_method="uniform")

        es_act = tr1.uniform((batch_size, ),
                             low=generator.edge_set0[0][0],
                             high=generator.edge_set0[0][-1])
        es_exp = tr2.uniform(low=edges[0], high=edges[-1],
                             size=(batch_size, )).eval()

        ec_act, updates = theano.scan(generator.edge_compare,
                                      outputs_info=None,
                                      sequences=[es_act],
                                      non_sequences=[generator.edge_set0[0]])

        ec_exp = [(e > edges[1:]).argmin() for e in es_exp]
        ec_act_e = ec_act.eval()

        #print np.array(ec_exp), ec_act_e
        assert np.all(ec_act_e == ec_exp), exp_act_str(np.array(ec_exp),
                                                       ec_act_e)

        tr1 = make_theano_rng(100, which_method="uniform")
        tr2 = make_theano_rng(100, which_method="uniform")

        es_act = tr1.uniform((batch_size * 20, ),
                             low=generator.edge_set0[0][0],
                             high=generator.edge_set0[0][-1])
        es_exp = tr2.uniform(low=edges[0],
                             high=edges[-1],
                             size=(batch_size * 20, )).eval()
        ec_exp = [(e > edges[1:]).argmin() for e in es_exp]

        us_act = tr1.uniform((batch_size * 20, ))
        us_exp = tr2.uniform(size=(batch_size * 20, )).eval()

        tests, updates = theano.scan(
            generator.hist_compare,
            outputs_info=None,
            sequences=[sharedX(us_exp), generator.hist_set0[0][ec_exp]])
        hc_act = es_exp[tests.nonzero()[0].eval()]
        hc_exp = [
            e for e, u in zip(es_exp, us_exp)
            if u <= h[(e > edges[1:]).argmin()]
        ]
        hc_act_e = hc_act  #.eval()
        assert np.all(hc_act_e == hc_exp), exp_act_str(np.array(hc_exp),
                                                       np.array(hc_act_e))

        def get_column(h, edges, samples, rng):
            h = h * np.diff(edges)
            # es ~ U(min,max)
            es = rng.uniform(low=edges[0], high=edges[-1],
                             size=(samples, )).eval()
            # us ~ U(0,1)
            us = rng.uniform(size=(samples, )).eval()
            # Keep accepted samples
            column = [
                e for e, u in zip(es, us) if u <= h[(e > edges[1:]).argmin()]
            ]
            return np.array(column)

        def rejectiveSample(model, target_size, samples, rng):
            newA = []
            # For each column
            for ii in range(len(model)):
                h, edges = model[ii]
                column = get_column(h, edges, samples, rng)
                newA.append(column)
            A2 = np.array([a[:target_size] for a in newA]).T
            return (A2)

        tr1 = make_theano_rng(100, which_method="uniform")
        tr2 = make_theano_rng(100, which_method="uniform")
        generator.rng = tr1

        out = generator.get_column(generator.hist_set0[0],
                                   generator.edge_set0[0], batch_size * 20,
                                   batch_size // 2)

        column_act = out[0][0].eval()
        column_exp = get_column(h, edges, batch_size * 20,
                                tr2)[:batch_size // 2]

        #print column_exp, column_act
        assert np.all(column_exp == column_act), exp_act_str(
            column_exp, column_act)

        tr1 = make_theano_rng(100, which_method="uniform")
        tr2 = make_theano_rng(100, which_method="uniform")
        generator.rng = tr1

        A0_exp = rejectiveSample(model0, batch_size, batch_size * 20, tr2)

        [A0_act, br, es, us, indices], updates = theano.scan(
            generator.get_column,
            outputs_info=[None, None, None, None, None],
            sequences=[generator.hist_set0, generator.edge_set0],
            non_sequences=[batch_size * 20, batch_size])

        A0_act = A0_act.T.eval()

        A_comp = rejectiveSample(model_complete, batch_size, batch_size * 20,
                                 tr2)
        print plt.hist(A_comp[:, 0])

        f = plt.figure()
        plt.subplot(2, 1, 1)
        plt.hist(A0_act[:, 0])
        plt.subplot(2, 1, 2)
        plt.hist(A0_exp[:, 0])
        print A0_exp.shape
        print plt.hist(A0_exp[:, 0])
        f.savefig(self.hist_plot)
        assert False
        for i, (a_act, a_exp) in enumerate(zip(A0_act.T, A0_exp.T)):
            print i
            #print A0_act[0]
            #print A0_exp[0]
            assert np.all(a_act == a_exp), exp_act_str(a_exp, a_act)
        assert False
        actual = generator.perform(X)
        expected = np.concatenate([A0_exp.dot(S), A1_exp.dot(S)])

        assert np.all(actual == expected), exp_act_str(expected, actual)
Example #54
0
def stochastic_max_pool_x(x,
                          image_shape,
                          pool_shape=(2, 2),
                          pool_stride=(1, 1),
                          rng=None):
    """
    Parameters
    ----------
    x : theano 4-tensor
        in format (batch size, channels, rows, cols)
    image_shape : tuple
        avoid doing some of the arithmetic in theano
    pool_shape : tuple
        shape of the pool region (rows, cols)
    pool_stride : tuple
        strides between pooling regions (row stride, col stride)
    rng : theano random stream
    """

    r, c = image_shape
    pr, pc = pool_shape
    rs, cs = pool_stride
    global pool_size
    pool_size = pool_shape
    global stride_size
    stride_size = pool_stride
    batch = x.shape[0]
    channel = x.shape[1]
    rng = make_theano_rng(rng, 2022, which_method='multinomial')

    # Compute starting row of the last pool
    last_pool_r = last_pool(r, pr, rs) * rs
    # Compute number of rows needed in image for all indexes to work out
    required_r = last_pool_r + pr

    last_pool_c = last_pool(c, pc, cs) * cs
    required_c = last_pool_c + pc

    # final result shape
    res_r = int(numpy.floor(last_pool_r / rs)) + 1
    res_c = int(numpy.floor(last_pool_c / cs)) + 1

    # padding
    padded = tensor.alloc(0.0, batch, channel, required_r, required_c)
    #theano.tensor.alloc(value, *shape) - for allocating a new tensor with value filled with "value"

    x = tensor.set_subtensor(padded[:, :, 0:r, 0:c], x)
    #theano.tensor.set_subtensor(lval of = operator, rval of = operator) - for assigning a tensor to a subtensor of a tensor

    # unraveling
    window = tensor.alloc(0.0, batch, channel, res_r, res_c, pr, pc)

    # initializing window with proper values
    for row_within_pool in xrange(pr):
        row_stop = last_pool_r + row_within_pool + 1
        for col_within_pool in xrange(pc):
            col_stop = last_pool_c + col_within_pool + 1
            win_cell = x[:, :, row_within_pool:row_stop:rs,
                         col_within_pool:col_stop:cs]
            window = tensor.set_subtensor(
                window[:, :, :, :, row_within_pool, col_within_pool], win_cell)

    # find the norm
    norm = window.sum(axis=[4, 5])
    #tensor.sum(axis = []) - cal sum over given axes

    norm = tensor.switch(tensor.eq(norm, 0.0), 1.0, norm)
    '''
    theano.tensor.eq(a, b) - Returns a variable representing the result of logical equality (a==b)
    theano.tensor.switch(cond, ift, iff) - Returns a variable representing a switch between ift (iftrue) and iff (iffalse)
    Basically converting a zero norm to 1.0.
    '''
    norm = window / norm.dimshuffle(0, 1, 2, 3, 'x', 'x')
    #converting activation values to probabilities using below formula - pi = ai / sum(ai)

    # get prob
    prob = rng.multinomial(pvals=norm.reshape(
        (batch * channel * res_r * res_c, pr * pc)),
                           dtype='float32')
    # select
    res = (window * prob.reshape(
        (batch, channel, res_r, res_c, pr, pc))).max(axis=5).max(axis=4)

    return res
def stochastic_max_pool_bc01(bc01, pool_shape, pool_stride, image_shape, rng = None):
    """
    .. todo::
        WRITEME properly
    Stochastic max pooling for training as defined in:
    Stochastic Pooling for Regularization of Deep Convolutional Neural Networks
    Matthew D. Zeiler, Rob Fergus
    Parameters
    ----------
    bc01 : theano 4-tensor
        in format (batch size, channels, rows, cols),
        IMPORTANT: All values should be positive
    pool_shape : tuple
        shape of the pool region (rows, cols)
    pool_stride : tuple
        strides between pooling regions (row stride, col stride)
    image_shape : tuple
        avoid doing some of the arithmetic in theano
    rng : theano random stream
    """
    r, c = image_shape
    pr, pc = pool_shape
    rs, cs = pool_stride

    batch = bc01.shape[0]
    channel = bc01.shape[1]

    rng = make_theano_rng(rng, 2022, which_method='multinomial')

    # Compute index in pooled space of last needed pool
    # (needed = each input pixel must appear in at least one pool)
    def last_pool(im_shp, p_shp, p_strd):
        rval = int(numpy.ceil(float(im_shp - p_shp) / p_strd))
        assert p_strd * rval + p_shp >= im_shp
        assert p_strd * (rval - 1) + p_shp < im_shp
        return rval
    # Compute starting row of the last pool
    last_pool_r = last_pool(image_shape[0] ,pool_shape[0], pool_stride[0]) * pool_stride[0]
    # Compute number of rows needed in image for all indexes to work out
    required_r = last_pool_r + pr

    last_pool_c = last_pool(image_shape[1] ,pool_shape[1], pool_stride[1]) * pool_stride[1]
    required_c = last_pool_c + pc

    # final result shape
    res_r = int(numpy.floor(last_pool_r/rs)) + 1
    res_c = int(numpy.floor(last_pool_c/cs)) + 1

    for bc01v in get_debug_values(bc01):
        assert not contains_inf(bc01v)
        assert bc01v.shape[2] == image_shape[0]
        assert bc01v.shape[3] == image_shape[1]

    # padding
    padded = tensor.alloc(0.0, batch, channel, required_r, required_c)
    name = bc01.name
    if name is None:
        name = 'anon_bc01'
    bc01 = tensor.set_subtensor(padded[:,:, 0:r, 0:c], bc01)
    bc01.name = 'zero_padded_' + name

    # unraveling
    window = tensor.alloc(0.0, batch, channel, res_r, res_c, pr, pc)
    window.name = 'unravlled_winodows_' + name

    for row_within_pool in xrange(pool_shape[0]):
        row_stop = last_pool_r + row_within_pool + 1
        for col_within_pool in xrange(pool_shape[1]):
            col_stop = last_pool_c + col_within_pool + 1
            win_cell = bc01[:,:,row_within_pool:row_stop:rs, col_within_pool:col_stop:cs]
            window  =  tensor.set_subtensor(window[:,:,:,:, row_within_pool, col_within_pool], win_cell)

    # find the norm
    norm = window.sum(axis = [4, 5])
    norm = tensor.switch(tensor.eq(norm, 0.0), 1.0, norm)
    norm = window / norm.dimshuffle(0, 1, 2, 3, 'x', 'x')
    # get prob
    prob = rng.multinomial(pvals = norm.reshape((batch * channel * res_r * res_c, pr * pc)), dtype='float32')
    # select
    res = (window * prob.reshape((batch, channel, res_r, res_c,  pr, pc))).max(axis=5).max(axis=4)
    res.name = 'pooled_' + name

    return tensor.cast(res, theano.config.floatX)
Example #56
0
    def __init__(self,
                 nmap,
                 input_space=None,
                 nvisx=None,
                 nvisy=None,
                 input_source=("featuresX", "featuresY"),
                 act_enc=None,
                 act_dec=None,
                 irange=1e-3,
                 rng=9001):
        Block.__init__(self)
        Model.__init__(self)
        assert nmap > 0, "Number of mapping units must be positive"

        if nvisx is not None and nvisy is not None or input_space is not None:
            if nvisx is not None and nvisy is not None:
                assert nvisx > 0, "Number of visx units must be non-negative"
                assert nvisy > 0, "Number of visy units must be non-negative"
                input_space = CompositeSpace(
                    [VectorSpace(nvisx),
                     VectorSpace(nvisy)])
                self.nvisx = nvisx
                self.nvisy = nvisy
            elif isinstance(input_space.components[0], Conv2DSpace):
                rx, cx = input_space.components[0].shape
                chx = input_space.components[0].num_channels
                ry, cy = input_space.components[1].shape
                chy = input_space.components[1].num_channels
                self.nvisx = rx * cx * chx
                self.nvisy = ry * cy * chy
            else:
                raise NotImplementedError(
                    str(type(self)) + " does not support that input_space.")
        # Check whether the input_space and input_source structures match
        try:
            DataSpecsMapping((input_space, input_source))
        except ValueError:
            raise ValueError("The structures of `input_space`, %s, and "
                             "`input_source`, %s do not match. If you "
                             "specified a CompositeSpace as an input, "
                             "be sure to specify the data sources as well." %
                             (input_space, input_source))

        self.input_space = input_space
        self.input_source = input_source
        self.nmap = nmap
        self.output_space = VectorSpace(self.nmap)
        self._initialize_visbiasX(self.nvisx)  # self.visbiasX
        self._initialize_visbiasY(self.nvisy)  # self.visbiasY
        self._initialize_mapbias()  # self.mapbias
        self.irange = irange
        self.rng = make_np_rng(rng, which_method="randn")
        seed = int(self.rng.randint(2**30))
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        def _resolve_callable(conf, conf_attr):
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], '__call__'):
                return conf[conf_attr]
            elif (conf[conf_attr] in globals()
                  and hasattr(globals()[conf[conf_attr]], '__call__')):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" %
                                 (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), 'act_enc')
        self.act_dec = _resolve_callable(locals(), 'act_dec')