def test_gen_cloning_with_shape_change(self): data = floatX(np.random.uniform(size=(1000, 10))) minibatches = DataSampler(data, batchsize=50) gen = generator(minibatches) gen_r = tt_rng().normal(size=gen.shape).T X = gen.dot(gen_r) res, _ = theano.scan(lambda x: x.sum(), X, n_steps=X.shape[0]) assert res.eval().shape == (50, ) shared = theano.shared(data) res2 = theano.clone(res, {gen: shared**2}) assert res2.eval().shape == (1000, )
def test_gen_cloning_with_shape_change(self): data = floatX(np.random.uniform(size=(1000, 10))) minibatches = DataSampler(data, batchsize=50) gen = generator(minibatches) gen_r = tt_rng().normal(size=gen.shape).T X = gen.dot(gen_r) res, _ = theano.scan(lambda x: x.sum(), X, n_steps=X.shape[0]) assert res.eval().shape == (50,) shared = theano.shared(data) res2 = theano.clone(res, {gen: shared**2}) assert res2.eval().shape == (1000,)
def randidx(self, size=None): if size is None: size = () elif isinstance(size, tt.TensorVariable): if size.ndim < 1: size = size[None] elif size.ndim > 1: raise ValueError('size ndim should be no more than 1d') else: pass else: size = tuple(np.atleast_1d(size)) return (tt_rng() .uniform(size=size, low=0.0, high=self.histogram.shape[0] - 1e-16) .astype('int64'))
def __init__(self, vars=None, batch_size=None, total_size=None, step_size=1.0, model=None, random_seed=None, minibatches=None, minibatch_tensors=None, **kwargs): warnings.warn(EXPERIMENTAL_WARNING) model = modelcontext(model) if vars is None: vars = model.vars vars = inputvars(vars) self.model = model self.vars = vars self.batch_size = batch_size self.total_size = total_size _value_error( total_size != None or batch_size != None, "total_size and batch_size of training data have to be specified", ) self.expected_iter = int(total_size / batch_size) # set random stream self.random = None if random_seed is None: self.random = tt_rng() else: self.random = tt_rng(random_seed) self.step_size = step_size shared = make_shared_replacements(vars, model) self.updates = OrderedDict() self.q_size = int(sum(v.dsize for v in self.vars)) flat_view = model.flatten(vars) self.inarray = [flat_view.input] self.dlog_prior = prior_dlogp(vars, model, flat_view) self.dlogp_elemwise = elemwise_dlogL(vars, model, flat_view) self.q_size = int(sum(v.dsize for v in self.vars)) if minibatch_tensors != None: _check_minibatches(minibatch_tensors, minibatches) self.minibatches = minibatches # Replace input shared variables with tensors def is_shared(t): return isinstance(t, theano.compile.sharedvalue.SharedVariable) tensors = [(t.type() if is_shared(t) else t) for t in minibatch_tensors] updates = OrderedDict({ t: t_ for t, t_ in zip(minibatch_tensors, tensors) if is_shared(t) }) self.minibatch_tensors = tensors self.inarray += self.minibatch_tensors self.updates.update(updates) self._initialize_values() super().__init__(vars, shared)
def setup_method(self): nr.seed(self.random_seed) self.old_tt_rng = tt_rng() set_tt_rng(RandomStream(self.random_seed))