Example #1
0
    def __init__(self):
        x, y, z = T.scalars("xyz")
        e = x * y
        op = th.OpFromGraph([x, y], [e])
        e2 = op(x, y) + z
        op2 = th.OpFromGraph([x, y, z], [e2])
        e3 = op2(x, y, z) + z

        self.inputs = [x, y, z]
        self.outputs = [e3]
Example #2
0
def gradSort(met, X):
    # -----------Start Batch Loop------------
    m = T.fvector()
    x = T.fmatrix()

    # Sort the input on the metric
    z = T.argsort(m, axis=0)
    out = x[z] + 0 * T.sum(m)

    sort = theano.function([m, x], [out])

    # Fix the gradient of the sort operation to be the sum of
    #   the gradients with respect to the input features
    def grad_edit(inps, grads):
        m, x = inps
        g, = grads

        z = T.argsort(m, axis=0)
        s = T.sum(g, axis=-1)

        am = T.max(abs(s), axis=-1)

        s = 10 * (s - T.clip(s, -.90 * am, .90 * am))

        out = s
        return out, g

    op = theano.OpFromGraph([m, x], [out])
    op.grad = grad_edit

    results, updates = theano.map(fn=op, sequences=[met, X], name='batch_sort')
    # ---------END Batch Loop-----------------

    r = results
    return r
Example #3
0
    def test_opfromgraph(self):
        # as with the scan tests above, insert foreign inputs into the
        # inner graph.
        outer = tensor.scalar("outer")
        shared = theano.shared(
            numpy.array(1., dtype=theano.config.floatX),
            name="shared")
        constant = tensor.constant(1., name="constant")
        z = outer * (shared + constant)

        # construct the inner graph
        a = tensor.scalar()
        b = tensor.scalar()
        r = a + b
        r.tag.replacement = z * (a - b)

        # construct the outer graph
        c = tensor.scalar()
        d = tensor.scalar()
        u = theano.OpFromGraph([a, b], [r])(c, d)
        t = z * u
        v, = map_variables(self.replacer, [t])
        t2 = z * v

        f = theano.function([c, d, outer], [t, t2])
        for m, n in itertools.combinations(list(range(10)), 2):
            assert f(m, n, outer=0.5) == [m + n, m - n]

        # test that the unsupported case of replacement with a shared
        # variable with updates crashes
        shared.update = shared + 1
        self.assertRaises(NotImplementedError,
                          map_variables, self.replacer, [t])
Example #4
0
 def __call__(self, x):
     # OpFromGraph is oblique to Theano optimizations, so we need to move
     # things to GPU ourselves if needed.
     if theano.gpuarray.pygpu_activated:
         maybe_to_gpu = lambda x: theano.gpuarray.as_gpuarray_variable(x, None)
     else:
         maybe_to_gpu = lambda x: x
     # We move the input to GPU if needed.
     x = maybe_to_gpu(x)
     # We note the tensor type of the input variable to the nonlinearity
     # (mainly dimensionality and dtype); we need to create a fitting Op.
     tensor_type = x.type
     # If we did not create a suitable Op yet, this is the time to do so.
     if tensor_type not in self.ops:
         # For the graph, we create an input variable of the correct type:
         inp = tensor_type()
         # We pass it through the nonlinearity (and move to GPU if needed).
         outp = maybe_to_gpu(self.nonlinearity(inp))
         # Then we fix the forward expression...
         op = theano.OpFromGraph([inp], [outp])
         # ...and replace the gradient with our own (defined in a subclass).
         op.grad = self.grad
         # Finally, we memoize the new Op
         self.ops[tensor_type] = op
     # And apply the memoized Op to the input we got.
     return self.ops[tensor_type](x)
Example #5
0
def create_model_op(interp_data):
    input_data_T = interp_data.interpolator.tg.input_parameters_list()
    input_data_P = interp_data.get_input_data()
    return theano.OpFromGraph(
        input_data_T,
        [interp_data.interpolator.tg.whole_block_model(interp_data.n_faults)],
        on_unused_input='ignore')
    def __call__(self, x):
        """
        #OpFromGraph is oblique to Theano optimizations, so we need to move thins
        #to gpu ourselves if needed
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        #we move the input to gpu if needed
        x = maybe_to_gpu(x)
        """

        #we note the tensor type of the input variable to the nonlinearity
        #(mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_type = x.type
        #if we did not create a suitable Op yet, this is the time to do so.
        if tensor_type not in self.ops:
            #For the graph, we create an input variable of the correct type:
            inp = tensor_type()
            #we pass it through the nonlinearity (and move to gpu if needed)
            outp = self.nonlinearity(inp)
            #then we fix the forward expression
            op = theano.OpFromGraph([inp], [outp])
            #and replace the gradient with out own (defined in a subclass)
            op.grad = self.grad
            #finally, we memorize the new op
            self.ops[tensor_type] = op
        #and apply the memorize op to the input we got
        return self.ops[tensor_type](x)
def lasagne_net_block(net,outputName):
  import lasagne
  import theano
  x=lasagne.layers.get_output(net['input'])
  feature=lasagne.layers.get_output(net[outputName])
  op=theano.OpFromGraph([x],[feature])
  return op
Example #8
0
    def __init__(self):
        x, y, z = T.scalars("xyz")
        e = T.nnet.sigmoid((x + y + z)**2)
        op = th.OpFromGraph([x, y, z], [e])
        e2 = op(x, y, z)

        self.inputs = [x, y, z]
        self.outputs = [e2]
Example #9
0
	def buildDotOp(self):
		a = TT.fmatrix("a")
		W = TT.fmatrix("W")
		V = TT.fmatrix("V")
		b = TT.dot(a, W)
		self.dot = T.OpFromGraph([a, W, V], [b], inline=True,
		                         grad_overrides=[
		    lambda inps, grads: TT.dot(grads[0], inps[2]),
		    "default",
		    "default"
		])
Example #10
0
	def buildGOp(self):
		#
		#          Dynamical System function ds/dt = f(\theta, v, s)
		#
		# The extended version is ds/dt = g(\theta, v, s, \beta) with
		#
		#     g(\theta, v, s, \beta) = f(\theta, v, s) - \beta dC(v,s)/ds
		#
		
		ins   = {}
		ins.update({t.name:t.newTheanoVar for t in self.getThetaIter()})
		ins.update({s.name:s.newTheanoVar for s in self.getStateIter()})
		ins.update({"beta": TT.fscalar("beta")})
		ins.update({"y":    TT.fmatrix("y")})
		outs  = {}
		
		
		beta  = ins["beta"]
		y     = ins["y"]
		for i in xrange(len(self.arch)):
			if   i == 0:
				dh = TT.zeros_like(ins["h_"+str(i)])
			elif i == len(self.arch)-1:
				bbelow = ins["b_"+str(i-1)]
				Wbelow = ins["W_"+str(i-1)+"_"+str(i)]
				Vbelow = ins["V_"+str(i)  +"_"+str(i-1)]
				
				hbelow = ins["h_"+str(i-1)]
				hhere  = ins["h_"+str(i)]
				
				dh = self.dot(self.rho(hbelow+bbelow), Wbelow, Vbelow) - \
				     beta*(hhere-y)
			else:
				bbelow = ins["b_"+str(i-1)]
				Wbelow = ins["W_"+str(i-1)+"_"+str(i)]
				Vbelow = ins["V_"+str(i)  +"_"+str(i-1)]
				babove = ins["b_"+str(i+1)]
				Wabove = ins["W_"+str(i)  +"_"+str(i+1)]
				Vabove = ins["V_"+str(i+1)+"_"+str(i)]
				
				hbelow = ins["h_"+str(i-1)]
				habove = ins["h_"+str(i+1)]
				
				dh = self.dot(self.rho(habove+babove), Wabove, Vabove) + \
				     self.dot(self.rho(hbelow+bbelow), Wbelow, Vbelow)
			outs["h_"+str(i)] = dh
		
		inputs  = [ins [k] for k in self.getGOpArgNames()]
		outputs = [outs[k] for k in self.getGOpRetNames()]
		self.g  = T.OpFromGraph(inputs, outputs, name="gOp")
Example #11
0
 def __call__(self, x):
     if theano.sandbox.cuda.cuda_enabled:
         maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
     else:
         maybe_to_gpu = lambda x: x
     x = maybe_to_gpu(x)
     tensor_type = x.type
     if tensor_type not in self.ops:
         inp = tensor_type()
         outp = maybe_to_gpu(self.nonlinearity(inp))
         op = theano.OpFromGraph([inp], [outp])
         op.grad = self.grad
         self.ops[tensor_type] = op
     return self.ops[tensor_type](x)
Example #12
0
 def __call__(self, x):
     if theano.gpuarray.pygpu_activated:
         ctx = theano.gpuarray.basic_ops.infer_context_name()
         maybe_to_gpu = lambda x: theano.gpuarray.basic_ops.as_gpuarray_variable(
             x, ctx)
     else:
         maybe_to_gpu = lambda x: x
     x = maybe_to_gpu(x)
     tensor_type = x.type
     if tensor_type not in self.ops:
         inp = tensor_type()
         outp = maybe_to_gpu(self.nonlinearity(inp))
         op = theano.OpFromGraph([inp], [outp])
         op.L_op = self.L_op
         self.ops[tensor_type] = op
     return self.ops[tensor_type](x)
Example #13
0
    def __call__(self, *args):
        # constants needs to be manually converted to tensors
        def try_convert_tensor(arg):
            if treeano.utils.is_variable(arg):
                return arg
            else:
                return T.constant(arg, dtype=fX)

        args = map(try_convert_tensor, args)
        # OpFromGraph is oblique to Theano optimizations, so we need to move
        # things to GPU ourselves if needed.
        if theano.sandbox.cuda.cuda_enabled:
            maybe_to_gpu = theano.sandbox.cuda.as_cuda_ndarray_variable
        else:
            maybe_to_gpu = lambda x: x
        # move the input to GPU if needed.
        args = map(maybe_to_gpu, args)
        # note the tensor type of the input variable to the fn
        # (mainly dimensionality and dtype); we need to create a fitting Op.
        tensor_types = tuple([arg.type for arg in args])
        # create a suitable Op if not yet done
        if tensor_types not in self.ops:
            # create an input variable of the correct type
            inps = [tensor_type() for tensor_type in tensor_types]
            # pass it through the fn (and move to GPU if needed)
            outp = maybe_to_gpu(self.fn(*inps))
            # fix the forward expression
            op = theano.OpFromGraph(inps, [outp])
            # keep a reference to previous gradient
            op.overwritten_grad = op.grad
            # replace the gradient with our own
            op.grad = self.grad
            # Finally, we memoize the new Op
            self.ops[tensor_types] = op
        # apply the memoized Op to the input we got
        return self.ops[tensor_types](*args)
Example #14
0
def MvNormalLogp():
    """Compute the log pdf of a multivariate normal distribution.

    This should be used in MvNormal.logp once Theano#5908 is released.

    Parameters
    ----------
    cov : tt.matrix
        The covariance matrix.
    delta : tt.matrix
        Array of deviations from the mean.
    """
    cov = tt.matrix('cov')
    cov.tag.test_value = floatX(np.eye(3))
    delta = tt.matrix('delta')
    delta.tag.test_value = floatX(np.zeros((2, 3)))

    solve_lower = tt.slinalg.Solve(A_structure='lower_triangular')
    solve_upper = tt.slinalg.Solve(A_structure='upper_triangular')
    cholesky = Cholesky(lower=True, on_error='nan')

    n, k = delta.shape
    n, k = f(n), f(k)
    chol_cov = cholesky(cov)
    diag = tt.nlinalg.diag(chol_cov)
    ok = tt.all(diag > 0)

    chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
    delta_trans = solve_lower(chol_cov, delta.T).T

    result = n * k * tt.log(f(2) * np.pi)
    result += f(2) * n * tt.sum(tt.log(diag))
    result += (delta_trans**f(2)).sum()
    result = f(-.5) * result
    logp = tt.switch(ok, result, -np.inf)

    def dlogp(inputs, gradients):
        g_logp, = gradients
        cov, delta = inputs

        g_logp.tag.test_value = floatX(1.)
        n, k = delta.shape

        chol_cov = cholesky(cov)
        diag = tt.nlinalg.diag(chol_cov)
        ok = tt.all(diag > 0)

        chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
        delta_trans = solve_lower(chol_cov, delta.T).T

        inner = n * tt.eye(k) - tt.dot(delta_trans.T, delta_trans)
        g_cov = solve_upper(chol_cov.T, inner)
        g_cov = solve_upper(chol_cov.T, g_cov.T)

        tau_delta = solve_upper(chol_cov.T, delta_trans.T)
        g_delta = tau_delta.T

        g_cov = tt.switch(ok, g_cov, -np.nan)
        g_delta = tt.switch(ok, g_delta, -np.nan)

        return [-0.5 * g_cov * g_logp, -g_delta * g_logp]

    return theano.OpFromGraph([cov, delta], [logp],
                              grad_overrides=dlogp,
                              inline=True)
Example #15
0
# In[13]:

data_interp = GeMpy.set_interpolator(geo_data, u_grade=0)

# This are the shared parameters and the compilation of the function. This will be hidden as well at some point
input_data_T = data_interp.interpolator.tg.input_parameters_list()
# This prepares the user data to the theano function
input_data_P = data_interp.interpolator.data_prep()

# In[14]:

# We create the op. Because is an op we cannot call it with python variables anymore. Thats why we have to make them shared
# Before
op2 = theano.OpFromGraph(input_data_T,
                         [data_interp.interpolator.tg.whole_block_model()],
                         on_unused_input='ignore')

# In[15]:

import pymc3 as pm

theano.config.compute_test_value = 'ignore'
model = pm.Model()
with model:
    # Stochastic value
    foliation = pm.Normal('foliation', 40, sd=10)

    # We convert a python variable to theano.shared
    dips = theano.shared(input_data_P[1])
Example #16
0
# %% 
geo_model._interpolator.theano_graph.compute_type


# %%
# Test fw model gradient
# ^^^^^^^^^^^^^^^^^^^^^^
# 

# %% 
import theano
import theano.tensor as tt
theano.config.compute_test_value = 'ignore'
geo_model_T = theano.OpFromGraph(geo_model.interpolator.theano_graph.input_parameters_loop,
                                [theano.grad(geo_model.interpolator.theano_graph.theano_output()[12][0],
                                             geo_model.interpolator.theano_graph.input_parameters_loop[4])],
                                 inline=True,
                                 on_unused_input='ignore',
                                 name='forw_grav')

# %% 
i = geo_model.interpolator.get_python_input_block()
th_f = theano.function([], geo_model_T(*i), on_unused_input='warn')

# %% 
geo_model.interpolator.theano_graph.sig_slope.set_value(20)

# %% 
th_f()


# %%
def bulid_cnnBlock(net):
  x=lasagne.layers.get_output(net['input'])
  feature=lasagne.layers.get_output(net['conv8/3x3_s1'])
  op=theano.OpFromGraph([x],[feature])

  return op
Example #18
0
    def __init__(self,
                 nvis,
                 nfac,
                 nmap,
                 name,
                 input_type='real',
                 corruption_type='zeromask',
                 corruption_level=.0,
                 numpy_rng=None,
                 theano_rng=None,
                 act_fn=T.nnet.sigmoid,
                 wxf_init=None,
                 wyf_init=None):
        # call base class' constructor
        super(GAE_Layer, self).__init__(name=name)
        self.nvis = nvis
        self.nfac = nfac
        self.nmap = nmap
        self.input_type = input_type
        self.corruption_type = corruption_type
        self._corruption_level = corruption_level
        self.act_fn = act_fn

        if not numpy_rng:
            self.numpy_rng = np.random.RandomState(1)
        else:
            self.numpy_rng = numpy_rng
        if not theano_rng:
            self.theano_rng = RandomStreams(1)
        else:
            self.theano_rng = theano_rng

        if wxf_init is None:
            wxf_init = numpy_rng.uniform(low=-.005,
                                         high=.005,
                                         size=(nvis, nfac)).astype(
                                             theano.config.floatX)
        if wyf_init is None:
            wyf_init = numpy_rng.uniform(low=-.005,
                                         high=.005,
                                         size=(nvis, nfac)).astype(
                                             theano.config.floatX)

        self.wxf = theano.shared(wxf_init, name='wxf_{0}'.format(self.name))
        self.wyf = theano.shared(wyf_init, name='wyf_{0}'.format(self.name))

        self.wmf_init = numpy_rng.uniform(low=-.01,
                                          high=.01,
                                          size=(nmap, nfac)).astype(
                                              theano.config.floatX)
        self.wfm_init = numpy_rng.uniform(low=-.01,
                                          high=.01,
                                          size=(nfac, nmap)).astype(
                                              theano.config.floatX)
        self.wmf = theano.shared(value=self.wmf_init,
                                 name='wmf_{0}'.format(self.name))
        self.wfm = theano.shared(value=self.wfm_init,
                                 name='wfm_{0}'.format(self.name))

        self.bx = theano.shared(np.zeros(nvis, dtype=theano.config.floatX),
                                name='bx_{0}'.format(self.name))
        self.by = theano.shared(np.zeros(nvis, dtype=theano.config.floatX),
                                name='by_{0}'.format(self.name))
        self.bm = theano.shared(np.zeros(nmap, dtype=theano.config.floatX) -
                                2.,
                                name='bm_{0}'.format(self.name))

        self.params = [
            self.wxf, self.wyf, self.wmf, self.wfm, self.bx, self.by, self.bm
        ]

        self._corruption_level = theano.shared(corruption_level)

        self.inputs = T.matrix(name='inputs_{0}'.format(self.name))
        self.inputs.tag.test_value = np.random.randn(100, nvis + nvis).astype(
            theano.config.floatX)

        self._inputsX = self.inputs[:, :nvis]
        self._inputsY = self.inputs[:, nvis:]

        if self.corruption_type == 'zeromask':
            self._corruptedX = theano_rng.binomial(
                size=self._inputsX.shape,
                n=1,
                p=1.0 - self._corruption_level,
                dtype=theano.config.floatX) * self._inputsX
            self._corruptedY = theano_rng.binomial(
                size=self._inputsY.shape,
                n=1,
                p=1.0 - self._corruption_level,
                dtype=theano.config.floatX) * self._inputsY
        elif self.corruption_type is None:
            self._corruptedX = self._inputsX
            self._corruptedY = self._inputsY
        else:
            raise ValueError('unsupported noise type')

        self._factorsX = T.dot(self._corruptedX, self.wxf)
        self._factorsY = T.dot(self._corruptedY, self.wyf)
        self._preactMappings = T.dot(self._factorsX * self._factorsY,
                                     self.wfm) + self.bm
        self._mappings = self.act_fn(self._preactMappings)

        self._factorsM = T.dot(self._mappings, self.wmf)

        self._preactReconsX = T.dot(self._factorsY * self._factorsM,
                                    self.wxf.T) + self.bx
        self._preactReconsY = T.dot(self._factorsX * self._factorsM,
                                    self.wyf.T) + self.by

        if self.input_type == 'real':
            self._reconsX = self._preactReconsX
            self._reconsY = self._preactReconsY
            self._costpercase = T.sum(
                0.5 * ((self._inputsX - self._reconsX)**2) + 0.5 *
                ((self._inputsY - self._reconsY)**2),
                axis=1)
        elif self.input_type == 'binary':
            self._reconsX = T.nnet.sigmoid(self._preactReconsX)
            self._reconsY = T.nnet.sigmoid(self._preactReconsY)
            self._costpercase = -T.sum(
                0.5 *
                (self._inputsY * T.log(self._reconsY) +
                 (1.0 - self._inputsY) * T.log(1.0 - self._reconsY)) + 0.5 *
                (self._inputsX * T.log(self._reconsX) +
                 (1.0 - self._inputsX) * T.log(1.0 - self._reconsX)),
                axis=1)
        else:
            raise Value('unsupported output type')

        self._cost = T.mean(self._costpercase)
        self._grads = T.grad(self._cost, self.params)

        self.pMGivenXY = theano.OpFromGraph([self._inputsX, self._inputsY],
                                            [self._preactMappings])

        self.pXGivenMY = theano.OpFromGraph([self._mappings, self._inputsY],
                                            [self._preactReconsX])

        self.pYGivenMX = theano.OpFromGraph([self._mappings, self._inputsX],
                                            [self._preactReconsY])

        self.mappings = theano.function(
            [self.inputs],
            self.act_fn(self.pMGivenXY(self._inputsX, self._inputsY)))