def make_node(self, x): x = basic.as_tensor_variable(x) self_axis = self.axis if self_axis is None: broadcastable = [False] else: if self_axis < 0: self_axis += len(x.broadcastable) if self_axis < 0 or self_axis >= len(x.broadcastable): raise RuntimeError( "Unique axis `{}` is outside of input ndim = " "{}.".format(self.axis, len(x.broadcastable))) broadcastable = [ b if axis != self_axis else False for axis, b in enumerate(x.broadcastable) ] outputs = [ basic.TensorType(broadcastable=broadcastable, dtype=x.dtype)() ] typ = basic.TensorType(broadcastable=[False], dtype="int64") if self.return_index: outputs.append(typ()) if self.return_inverse: outputs.append(typ()) if self.return_counts: outputs.append(typ()) return theano.Apply(self, [x], outputs)
def make_node(self, x): x = basic.as_tensor_variable(x) outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()] typ = basic.TensorType(broadcastable=[False], dtype='int64') if self.return_index: outputs.append(typ()) if self.return_inverse: outputs.append(typ()) if self.return_counts: outputs.append(typ()) return theano.Apply(self, [x], outputs)
def make_node(self, V, W, b, d): """ Parameters ---------- V Visible unit, input(batch,row,column,time,in channel) W Weights, filter(out channel,row,column,time,in channel) b bias, shape == (W.shape[0],) d strides when moving the filter over the input(dx,dy,dt) """ V_ = T.as_tensor_variable(V) W_ = T.as_tensor_variable(W) b_ = T.as_tensor_variable(b) d_ = T.as_tensor_variable(d) bcast = (V_.broadcastable[0], False, False, False, W_.broadcastable[0]) node = theano.Apply(self, inputs=[V_, W_, b_, d_], outputs=[T.TensorType(V_.dtype, bcast)()]) return node
def make_node(self, *inp): multi_index = [basic.as_tensor_variable(i) for i in inp[:-1]] dims = basic.as_tensor_variable(inp[-1]) for i in multi_index: if i.dtype not in basic.int_dtypes: raise TypeError( "'%s' object cannot be interpreted as an index" % str(i.dtype) ) if dims.dtype not in basic.int_dtypes: raise TypeError( "'%s' object cannot be interpreted as an index" % str(dims.dtype) ) if dims.ndim != 1: raise TypeError("dims must be a 1D array") return gof.Apply( self, multi_index + [dims], [ basic.TensorType( dtype="int64", broadcastable=(False,) * multi_index[0].ndim )() ], )
def make_node(self, V, d, WShape, dCdH): V_ = T.as_tensor_variable(V) d_ = T.as_tensor_variable(d) WShape_ = T.as_tensor_variable(WShape) dCdH_ = T.as_tensor_variable(dCdH) return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_], outputs=[ T.TensorType(V_.dtype, (False, False, False, False, False))() ] )
def make_node(self, W, b, d, H, RShape=None): """ Parameters ---------- W Weights, filter b Bias, shape == (W.shape[0],). d Strides when moving the filter over the input. H The output of Conv3D. """ W_ = T.as_tensor_variable(W) b_ = T.as_tensor_variable(b) d_ = T.as_tensor_variable(d) H_ = T.as_tensor_variable(H) if RShape: RShape_ = T.as_tensor_variable(RShape) else: RShape_ = T.as_tensor_variable([-1, -1, -1]) return theano.Apply( self, inputs=[W_, b_, d_, H_, RShape_], outputs=[ T.TensorType(H_.dtype, (False, False, False, False, False))() ])
def make_node(self, indices, dims): indices = basic.as_tensor_variable(indices) dims = basic.as_tensor_variable(dims) if indices.dtype not in basic.int_dtypes: raise TypeError("'%s' object cannot be interpreted as an index" % str(indices.dtype)) if dims.dtype not in basic.int_dtypes: raise TypeError("'%s' object cannot be interpreted as an index" % str(dims.dtype)) if dims.ndim != 1: raise TypeError("dims must be a 1D array") return gof.Apply( self, [indices, dims], [basic.TensorType(dtype='int64', broadcastable=(False,) * indices.ndim)() for i in xrange(self.ndim)])
def make_node(self, indices, dims): indices = basic.as_tensor_variable(indices) dims = basic.as_tensor_variable(dims) if indices.dtype not in basic.int_dtypes: raise TypeError( f"'{indices.dtype}' object cannot be interpreted as an index") if dims.dtype not in basic.int_dtypes: raise TypeError( f"'{dims.dtype}' object cannot be interpreted as an index") if dims.ndim != 1: raise TypeError("dims must be a 1D array") return Apply( self, [indices, dims], [ basic.TensorType(dtype="int64", broadcastable=(False, ) * indices.ndim)() for i in range(basic.get_vector_length(dims)) ], )