def make_node(self, images, top_down): """ .. todo:: WRITEME """ images = as_cuda_ndarray_variable(images) top_down = as_cuda_ndarray_variable(top_down) assert images.ndim == 4 assert top_down.ndim == 4 channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False houtput_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) houtput_type = CudaNdarrayType(broadcastable=houtput_broadcastable) houtput = houtput_type() poutput_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) poutput_type = CudaNdarrayType(broadcastable=poutput_broadcastable) poutput = poutput_type() return Apply(self, [images, top_down], [houtput, poutput])
def make_node(self, images, filters): """ .. todo:: WRITEME """ if not isinstance(images.type, CudaNdarrayType): raise TypeError( "FilterActs: expected images.type to be CudaNdarrayType, " "got " + str(images.type)) if not isinstance(filters.type, CudaNdarrayType): raise TypeError( "FilterActs: expected filters.type to be CudaNdarrayType, " "got " + str(filters.type)) assert images.ndim == 4 assert filters.ndim == 4 channels_broadcastable = filters.type.broadcastable[3] batch_broadcastable = images.type.broadcastable[3] # Computing whether the rows and columns are broadcastable requires doing # arithmetic on quantities that are known only at runtime, like the specific # shape of the image and kernel rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images, filters], [targets])
def make_node(self, images, acts, denoms, dout): """ .. todo:: WRITEME """ if not isinstance(images.type, CudaNdarrayType): inputs = images, acts, denoms, dout names = "images", "acts", "denoms", "dout" for name, var in zip(names, inputs): if not isinstance(var.type, CudaNdarrayType): raise TypeError("CrossMapNormUndo: expected %s.type " "to be CudaNdarrayType, " "got %s" (name, str(images.type))) assert images.ndim == 4 assert acts.ndim == 4 assert denoms.ndim == 4 assert dout.ndim == 4 # Not strictly necessary I don't think assert images.type.broadcastable == acts.type.broadcastable assert images.type.broadcastable == denoms.type.broadcastable assert images.type.broadcastable == dout.type.broadcastable targets_broadcastable = tuple(images.type.broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() out_acts = targets_type() return Apply(self, [images, acts, denoms, dout], [targets, out_acts])
def test_optimization(): op = CrossMapNorm(16, 15./16., 1, True) x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4)) f = theano.function([x_], theano.grad(op(x_)[0].sum(), x_)) nodes = [x for x in f.maker.fgraph.apply_nodes if type(x.op) == CrossMapNormUndo] assert len(nodes) == 1 assert nodes[0].op.inplace
def make_node(self, hid_acts, filters, output_shape=None): """ .. todo:: WRITEME Parameters ---------- hid_acts : WRITEME filters : WRITEME output_shape : 2-element TensorVariable, optional The spatial shape of the image """ if not isinstance(hid_acts.type, CudaNdarrayType): raise TypeError( "ImageActs: expected hid_acts.type to be CudaNdarrayType, " "got " + str(hid_acts.type)) if not isinstance(filters.type, CudaNdarrayType): raise TypeError( "ImageActs: expected filters.type to be CudaNdarrayType, " "got " + str(filters.type)) if output_shape is None: if self.stride != 1: raise ValueError( "You must specify an output_shape for ImageActs if the stride is not 1." ) hid_shape = hid_acts.shape[1:3] kernel_shape = filters.shape[1:3] output_shape = hid_shape + kernel_shape - 2 * self.pad - 1 assert hid_acts.ndim == 4 assert filters.ndim == 4 channels_broadcastable = filters.type.broadcastable[3] batch_broadcastable = hid_acts.type.broadcastable[3] # Computing whether the rows and columns are broadcastable requires doing # arithmetic on quantities that are known only at runtime, like the specific # shape of the image and kernel rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [hid_acts, filters, output_shape], [targets])
def make_node(self, images, hid_grads, output_shape): """ .. todo:: WRITEME """ if not isinstance(images.type, CudaNdarrayType): raise TypeError("WeightActs: expected images.type " "to be CudaNdarrayType, " "got " + str(images.type)) if not isinstance(hid_grads.type, CudaNdarrayType): raise TypeError("WeightActs: expected hid_acts.type " "to be CudaNdarrayType, " "got " + str(hid_grads.type)) assert images.ndim == 4 assert hid_grads.ndim == 4 input_channels_broadcastable = images.type.broadcastable[0] # We don't know anything about filter_rows or filter_cols at compile # time, so we assume they're not broadcastable. filter_rows_broadcastable = False filter_cols_broadcastable = False output_channels_broadcastable = hid_grads.type.broadcastable[0] weights_grads_type = CudaNdarrayType( (input_channels_broadcastable, filter_rows_broadcastable, filter_cols_broadcastable, output_channels_broadcastable)) partial_sums_type = CudaNdarrayType((False, ) * 5) weights_grads = weights_grads_type() partial_sums = partial_sums_type() return Apply(self, [images, hid_grads, output_shape], [weights_grads, partial_sums])
def make_node(self, images): """ .. todo:: WRITEME """ if not isinstance(images.type, CudaNdarrayType): raise TypeError("CrossMapNorm: expected images.type to be CudaNdarrayType, " "got " + str(images.type)) assert images.ndim == 4 targets_broadcastable = images.type.broadcastable targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) denoms = targets_type() targets = targets_type() return Apply(self, [images], [targets, denoms])
def make_node(self, images, filters): """ .. todo:: WRITEME """ ibcast = images.broadcastable fbcast = filters.broadcastable igroups, icolors_per_group, irows, icols, icount = ibcast fmodulesR, fmodulesC, fcolors, frows, fcols = fbcast[:-2] fgroups, filters_per_group = fbcast[-2:] hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount) if not isinstance(images.type, CudaNdarrayType): raise TypeError('gpu_filter_acts requires CudaNdarray images', images) if not isinstance(filters.type, CudaNdarrayType): raise TypeError('gpu_filter_acts requires CudaNdarray filters', filters) htype = CudaNdarrayType(broadcastable=hbcast) return theano.gof.Apply(self, [images, filters], [htype()])
def make_node(self, images): """ .. todo:: WRITEME """ images = as_cuda_ndarray_variable(images) assert images.ndim == 4 channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images], [targets])
def test_cross_map_norm_simple(): op = CrossMapNorm(16, 15. / 16., 1., True) x = CudaNdarray(numpy.ones((16, 2, 2, 2), dtype='float32')) x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4)) f = theano.function([x_], op(x_)[0]) numpy.testing.assert_allclose(f(x), 0.0625)