def make_node(self, y_true, y_score): y_true = T.as_tensor_variable(y_true) y_score = T.as_tensor_variable(y_score) output = [T.scalar(name=self.name, dtype=config.floatX)] return gof.Apply(self, [y_true, y_score], output)
def make_node(self, n, p): n = tensor.as_tensor_variable(n) p = as_sparse_variable(p) assert p.format in ["csr", "csc"] return gof.Apply(self, [n, p], [p.type()])
def make_node(self, x): return gof.Apply(self, [x], [x.type()])
def make_node(self, x): # Must work for all type that have a shape attribute. # This will fail at execution time. if not isinstance(x, theano.Variable): x = theano.tensor.as_tensor_variable(x) return gof.Apply(self, [x], [theano.tensor.lvector()])
def make_node(self, mean_anom, eccen): in_args = [ tt.as_tensor_variable(mean_anom), tt.as_tensor_variable(eccen), ] return gof.Apply(self, in_args, [in_args[0].type(), in_args[0].type()])
def make_node(self, data): return gof.Apply(self, [data], [theano.Variable(Generic()), data.type()])
def make_node(self, path): if isinstance(path, str): path = Constant(Generic(), path) return gof.Apply( self, [path], [tensor(self.dtype, broadcastable=self.broadcastable)])
def make_node(self, mean_anom, eccen): output_var = tt.TensorType(dtype=theano.scalar.upcast( mean_anom.dtype, eccen.dtype), broadcastable=[False] * mean_anom.ndim)() return gof.Apply(self, [mean_anom, eccen], [output_var])
def make_node(self, a): a_ = theano.tensor.as_tensor_variable(a) r = gof.Apply(self, [a_], [a_.type()]) return r
def make_node(self, x, *inputs): rval = tensor.Subtensor.make_node(self, x, *inputs) otype = GpuArrayType(dtype=rval.outputs[0].type.dtype, broadcastable=rval.outputs[0].type.broadcastable) x = as_gpuarray_variable(x) return gof.Apply(self, [x] + rval.inputs[1:], [otype()])
def make_node(self, bc): return gof.Apply(self, [tt.as_tensor_variable(bc)], [bc.type()])
def make_node(self, x): if x.type.ndim != 4: raise TypeError() x = tensor.as_tensor_variable(x) return gof.Apply(self, [x], [x.type(), x.type()])
def make_node(self, x, LrnOut, scale, gz): assert isinstance(x, Variable) and x.ndim == 4 assert isinstance(LrnOut, Variable) and LrnOut.ndim == 4 assert isinstance(scale, Variable) and scale.ndim == 4 assert isinstance(gz, Variable) and gz.ndim == 4 return gof.Apply(self, [x, LrnOut, scale, gz], [x.type()])
def make_node(self, g_y, coding_dist, true_one_of_n): return gof.Apply(self, [g_y, coding_dist, true_one_of_n], [coding_dist.type()])
def make_node(self): inputs = [theano.tensor.matrix()] outputs = [theano.tensor.scalar(), theano.tensor.scalar()] return gof.Apply(self, inputs, outputs)
def make_node(self, a, b): c = b.type() return gof.Apply(self, [a, b], [c])
def make_node(self, request, data): return gof.Apply( self, [request, data], [tensor(data.dtype, broadcastable=data.broadcastable)], )
def make_node(self, a, b): c = a.type() d = a.type() return gof.Apply(self, [a, b], [c, d])
def make_node(self, request, data): return gof.Apply(self, [request, data], [theano.Variable(Generic())])
def make_node(self, membuffer, index): # index has to be a scalar assert index.ndim == 0 # we neeed at least one dimension assert membuffer.ndim > 0 return gof.Apply(self, [membuffer, index], [membuffer.type()])
def make_node(self, *args): in_args = [tt.as_tensor_variable(a) for a in args] out_args = [in_args[3].type()] return gof.Apply(self, in_args, out_args)
def make_node(self, x): if x.type.ndim != 4: raise TypeError() # TODO: consider restrucing the dtype? return gof.Apply(self, [x], [x.type()])
def make_node(self, *args): # HERE `args` must be THEANO VARIABLES return gof.Apply(op=self, inputs=args, outputs=[tensor.lscalar()])
def make_node(self, *inputs): inputs = [tt.as_tensor_variable(i) for i in inputs] outputs = [tt.TensorType(inputs[0].dtype, (False, False))()] return gof.Apply(self, inputs, outputs)
def make_node(self, x): x = as_sparse_variable(x) return gof.Apply(self, [x], [x.type()])
def make_node(self, *inputs): inputs = [tt.as_tensor_variable(i) for i in inputs] outputs = [i.type() for i in inputs[:-1]] return gof.Apply(self, inputs, outputs)
def make_node(self, x): if x.type.ndim != 4: raise TypeError() # TODO: consider restricting the dtype? x = tensor.as_tensor_variable(x) return gof.Apply(self, [x], [x.type()])
def make_node(self, *inputs): outputs = [theano.tensor.vector()] return gof.Apply(self, inputs, outputs)
def make_node(self, arg): return gof.Apply(self, [tt.as_tensor_variable(arg)], [arg.type()])
def make_node(self, *inputs): inputs = [tt.as_tensor_variable(i) for i in inputs] outputs = [tt.bscalar()] return gof.Apply(self, inputs, outputs)