Example #1
0
 def loop (row):
     one_indices = T.nonzero(row)[0]
     zero_indices = T.eq(row, 0).nonzero()[0]
     random = shared_randomstreams.RandomStreams(5)
     ind1=random.random_integers(size=(1,), low=0, high=one_indices.shape[0]-1, ndim=None)
     ind2=random.random_integers(size=(50,), low=0, high=zero_indices.shape[0]-1, ndim=None)
     return one_indices[ind1], zero_indices[ind2]
Example #2
0
    def __init__(self,
                 incoming,
                 assume_normalized=False,
                 seed=1234,
                 action_dtype='int32',
                 name='ProbabilisticResolver'):
        """
        :param incoming: a lasagne layer that outputs action probability vectors
                WARNING! We assume that incoming probabilities are all nonnegative even if assume_normalized=False.
        :type incoming: lasagne.layers.Layer
        
        :param assume_normalized: if set to True, the incoming layer is assumed to 
            return outputs that add up to 1 (e.g. softmax output) along last axis
        :type assume_normalized: bool   
        
        :param seed: - random seed
        :type seed: int
        
        :action_dtype: type of action (usually (u)int 32 or 64)
        :type action_dtype: string or dtype
        
        :param name: layer name (using lasagne conventions)
        :type name: string
        """

        # probas float[2] - probability of random and optimal action respectively

        self.assume_normalized = assume_normalized
        self.action_dtype = action_dtype

        self.rng = random_streams.RandomStreams(seed)

        super(ProbabilisticResolver, self).__init__(incoming, name=name)
Example #3
0
    def __init__(self, init='glorot_uniform', activation='linear',
                 input_dim=None, vocab_size=None, n_noise = 25, Pn=[0.5, 0.5],
                 weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim
        self.vocab_size = vocab_size
        self.n_noise = n_noise
        self.Pn = theano.shared(numpy.array(Pn).astype(config.floatX))
        self.rng = RS.RandomStreams(seed=SEED)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)

        super(NCE, self).__init__(**kwargs)
Example #4
0
def dropout_layer(layer, p_dropout):
    # 获取999999之间的一个随机数
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    # 获取掩模
    mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
    return layer * T.cast(mask, theano.config.floatX)
def dropout_layer(layer, p_dropout):
    '''
    Drop out functionality using theano
    '''
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
    return layer * Tensor.cast(mask, theano.config.floatX)
Example #6
0
 def __init__(self, inpt, in_dim, out_dim, param_names=["W", "b"]):
     self.inpt = inpt
     self.out_dim = out_dim
     self.in_dim = in_dim
     self.param_names = param_names
     self.eps = t_random.RandomStreams().normal((
         inpt.shape[0],
         out_dim,
     ))
     self.initialise()
Example #7
0
    def __init__(
        self,
        init='glorot_uniform',
        activation='linear',
        input_dim=None,
        vocab_size=None,
        n_noise=25,
        Pn=[0.5, 0.5],
        weights=None,
        W_regularizer=None,
        b_regularizer=None,
        activity_regularizer=None,
        W_constraint=None,
        b_constraint=None,
        bias=True,
        **kwargs
    ):  #kwargs allow passing keyworded variable length of arguments to function
        self.init = initializations.get(
            init
        )  #use Glorot uniform distribution between +- sqrt(6 / (fan_in + fan_out))
        self.activation = activations.get(
            activation)  #use linear activation function
        self.input_dim = input_dim
        self.vocab_size = vocab_size
        self.n_noise = n_noise
        self.Pn = theano.shared(
            numpy.array(Pn).astype(config.floatX)
        )  #self.Pn is a shared variable now (used by different functions)
        self.rng = RS.RandomStreams(
            seed=SEED
        )  #create a stream of random variables with various distributions

        self.W_regularizer = regularizers.get(
            W_regularizer)  #apply penalties on weight during optimization
        self.b_regularizer = regularizers.get(
            b_regularizer)  #apply penalties on bias during optimization
        self.activity_regularizer = regularizers.get(
            activity_regularizer
        )  #apply penalties on activity during optimization

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights

        if self.input_dim:
            kwargs['input_shape'] = (
                self.input_dim,
            )  #add another argument named input_shape with a vlue of self.input_dim

        super(NCE, self).__init__(**kwargs)
Example #8
0
    def _get_train_output(self, layer_input):
        """Return layer's output used for training.

        When training, p_dropout weights are dropped out.

        :param layer_input: Layer input.
        :return: layer output.
        """
        random = shared_randomstreams.RandomStreams()
        mask = random.binomial(n=1,
                               p=1. - self.p_dropout,
                               size=layer_input.shape)
        return layer_input * T.cast(mask, theano.config.floatX)
Example #9
0
    def __init__(self,
                 incoming,
                 assume_normalized=False,
                 seed=1234,
                 output_dtype='int32',
                 name='MultiProbabilisticResolver'):

        self.assume_normalized = assume_normalized

        self.rng = random_streams.RandomStreams(seed)

        super(MultiProbabilisticResolver,
              self).__init__(incoming, name=name, output_dtype=output_dtype)
Example #10
0
def dropout_layer(layer, p_dropout):
    """Return layer has been dropout.

    Args:
        layer: The layer.
        p_dropout: The probability of dropout.

    Returns:
        The layer has been dropout.
    """
    srs = shared_randomstreams.RandomStreams(np.random.RandomState(
        0).randint(999999))
    mask = srs.binomial(n=1, p=1 - p_dropout, size=layer.shape)
    return layer * T.cast(mask, theano.config.floatX) / (1 - p_dropout)
Example #11
0
def Dropout_Func(rng, value, p):
    '''
	>>>type rng: numpy.random.RandomState
	>>>para rng: initalize weight randomly

	>>>type value: theano.tensor.TensorType
	>>>para value: input data

	>>>type p: float
	>>>para p: dropout rate
	'''
    srng = shared_randomstreams.RandomStreams(rng.randint(2011010539))
    mask = srng.binomial(n=1, p=1 - p, size=value.shape)
    return value * T.cast(mask, theano.config.floatX)
Example #12
0
def dropout_layer(layer, p_dropout):
    """
    Dropout some components of a layer according to p_dropout
    :param layer: given layer
    :param p_dropout: percent dropout
    :return: pruned layer
    """
    random_generator = randomgen.RandomGenerator()
    shared_random_number_generator = shared_randomstreams.RandomStreams(
        random_generator.randint(0, high=999999))
    mask = shared_random_number_generator.binomial(n=1,
                                                   p=1 - p_dropout,
                                                   size=layer.shape)
    return layer * tensor.cast(mask, theano.config.floatX)
Example #13
0
    def __init__(self, incoming, epsilon=None, seed=1234, name='EpsilonGreedyResolver',**kwargs):
        """
            epsilon float scalar: probability of random choice instead of optimal one
            seed constant: - random seed
        """
        if epsilon is None:
            epsilon = theano.shared(np.float32(0.1), "e-greedy.epsilon")

        self.epsilon = epsilon

        # probas float[2] - probability of random and optimal action respectively

        self.probas = T.stack([epsilon, 1 - epsilon])

        self.rng = random_streams.RandomStreams(seed)

        super(EpsilonGreedyResolver, self).__init__(incoming, name=name,**kwargs)
	def __init__(self, sizes,p_dropout=0):
		self.num_layers = len(sizes)
		self.sizes = sizes
		self.weights = [theano.shared(np.asarray(np.random.normal(loc=0.0,scale=np.sqrt(1.0/y),size=(y, x)),dtype=theano.config.floatX),
						name='w',
						borrow=True)
						for x, y in zip(self.sizes[:-1], self.sizes[1:])]
		self.biases = [theano.shared(np.asarray(np.random.normal(loc=0.0,scale=2.0,size=(1,y)),dtype = theano.config.floatX),
						name='b',
						broadcastable = (True,False))
						for y in self.sizes[1:]]
		self.params = []
		for weight,bias in zip (self.weights,self.biases):
			self.params.append(weight)
			self.params.append(bias)

		x = T.matrix('x')
		y = T.matrix('y')
		a = x
		a_dropout = x
		for i in xrange(len(self.weights)-1):
			srgn = shared_randomstreams.RandomStreams(np.random.RandomState(0).randint(99999))
			mask = srgn.binomial(n=1,p = 1-p_dropout,size=self.weights[i].shape)
			z_dropout = T.dot(a_dropout,self.weights[i].transpose()*T.cast(mask.transpose(),theano.config.floatX))+self.biases[i]
			z = (1-p_dropout)*T.dot(a,self.weights[i].transpose())+self.biases[i]
			a = ReLU(z)
			a_dropout = ReLU(z_dropout)

		z = (1-p_dropout)*T.dot(a,self.weights[-1].transpose())+self.biases[-1]
		mask = srgn.binomial(n=1,p = 1-p_dropout,size=self.weights[-1].shape)
		z_dropout = T.dot(a_dropout,self.weights[-1].transpose()*T.cast(mask.transpose(),theano.config.floatX))+self.biases[-1]
		a = softmax(z)
		epsilon = 10e-16
		a_dropout = softmax(z_dropout)
		self.cost = T.mean(-T.log(a_dropout[T.arange(a.shape[0]),T.argmax(y,axis=1)]+ epsilon))
		lmbda = T.scalar('lmbda')
		for w in self.weights:
			self.cost += 0.5*(lmbda)*T.mean(T.log(w**2+ epsilon))
		self.grads = [T.grad(self.cost,param) for param in self.params]
		eta = T.scalar('eta')
		self.updates = [(param,param-eta*grad) for param,grad in zip(self.params,self.grads)]
		self.feedforward = theano.function([x],a)
		self.train = theano.function([x,y,eta,lmbda],self.cost,updates = self.updates)
Example #15
0
    def __init__(self, optimizers=sgd):
        ''' model hyperparameters '''
        self.random_seed = 999
        self.batch_size = 100
        self.split = 0.7
        self.theano_rng = rs.RandomStreams(self.random_seed)
        self.optimizer = optimizers
        ''' Theano Tensor variables '''
        self.index = T.lscalar()  # minibatch index tensor
        self.visibles = []  # list of tensor variables to visible units
        self.W_params = []  # list of weight params tensors
        self.vbias = []  # list of vbias params tensors
        self.hbias = []  # list of hbias params tensors
        self.masks = []  # list of tensor gradient masks
        self.params = []  # list of params for gradient calculation
        ''' Theano Shared variables '''
        self.train_visibles = []  # list of shared variables to inputs
        self.valid_visibles = []  # list of shared variables to inputs

        self.num_samples = 0  # total number of samples used
        self.num_hidden = 0  # number of hidden units
        self.shapes = []  # list of number of features
        self.types = []  # list of type for each feature
Example #16
0
 def __init__(self, mu, logvar, samples=10, **kwargs):
     self.rng = shared_randomstreams.RandomStreams(1692)
     self.samples = samples
     super(GaussianSamplerLayer, self).__init__([mu, logvar], **kwargs)
Example #17
0
def dropout_layer(layer, p_dropout):
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
    return layer * T.cast(mask, 'float32')
def dropoutLayer(layer, pDropout):
    srng = shared_randomstreams.RandomStreams(np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1-pDropout, size=layer.shape)
    return layer*T.cast(mask, theano.config.floatX)
Example #19
0
def dropout(X, p):
    srng = t_random.RandomStreams()
    retain_prob = 1 - p
    X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
    X /= retain_prob
    return X.astype(theano.config.floatX)
Example #20
0
np.random.seed(0)
print(np.random.permutation(10))
np.random.seed(1)
print(np.random.permutation(10))
np.random.seed(2)
print(np.random.permutation(10))

print("---------")

np.random.seed(42)
print(np.random.randn(4))

print(np.random.randn(4))

print(np.random.randn(4))

np.random.seed(
    42
)  #np.random.RandomState maintains the number just as before that have exacly the same seed
print(np.random.randn(4))

print("--------------")

import theano
import theano.tensor as T
from theano.tensor import shared_randomstreams

srng = shared_randomstreams.RandomStreams(
    np.random.RandomState(0).randint(999999))
print(srng)
 def __dropout(self, layer):
     srng = shared_randomstreams.RandomStreams(
         np.random.RandomState(0).randint(999999))
     mask = srng.binomial(n=1, p=1.0 - Layer.dropout, size=layer.shape)
     return layer * T.cast(mask, theano.config.floatX)
Example #22
0
def dropout_layer(layer, p_dropout):  #用来生成随机的(0或1)的权重w,来模拟弃权
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
    return layer * T.cast(mask, theano.config.floatX)
Example #23
0
#!/usr/bin/env python
from theano import function
import theano.tensor as T
from theano.tensor import shared_randomstreams
import numpy as np
import numpy.random
from scipy.special import gammaincinv
from numpy.linalg import norm

# tensor stand-in for np.random.RandomState
rngT = shared_randomstreams.RandomStreams()
rng = numpy.random.RandomState()

# {{{ Fastfood Params }}}
n, d = T.dscalars('n', 'd')
# transform dimensions to be a power of 2
d0, n0 = d, n
l = T.ceil(T.log2(d))  # TODO cast to int
d = 2**l
k = T.ceil(n / d)  # TODO cast to int
n = d * k
# generate parameter 'matrices'
B = rng.choice([-1, 1], size=(k, d))
G = rng.normal(size=(k, d), dtype=np.float64)
PI = np.array([rng.permutation(d) for _ in xrange(k)]).T
S = np.empty((k * d, 1), dtype=np.float64)
# generate scaling matrix, S
for i in xrange(k):
    for j in xrange(d):
        p1 = rng.uniform(size=d)
        p2 = d / 2