def max_pool(images, imgshp, maxpoolshp): """Implements a max pooling layer Takes as input a 2D tensor of shape batch_size x img_size and performs max pooling. Max pooling downsamples by taking the max value in a given area, here defined by maxpoolshp. Outputs a 2D tensor of shape batch_size x output_size. :param images: 2D tensor containing images on which to apply convolution. Assumed to be of shape batch_size x img_size :param imgshp: tuple containing image dimensions :param maxpoolshp: tuple containing shape of area to max pool over :return: out1, symbolic result (2D tensor) :return: out2, logical shape of the output """ poolsize = np.int64(np.prod(maxpoolshp)) # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w) # in the first case, default nfeatures to 1 if np.size(imgshp) == 2: imgshp = (1, ) + imgshp # construct indices and index pointers for sparse matrix, which, # when multiplied with input images will generate a stack of image # patches indices, indptr, spmat_shape, sptype, outshp = convolution_indices.conv_eval( imgshp, maxpoolshp, maxpoolshp, mode="valid") # print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX' # print 'imgshp = ', imgshp # print 'maxpoolshp = ', maxpoolshp # print 'outshp = ', outshp # build sparse matrix, then generate stack of image patches csc = aesara.sparse.CSM(sptype)(np.ones(indices.size), indices, indptr, spmat_shape) patches = sparse.structured_dot(csc, images.T).T pshape = aet.stack([ images.shape[0] * aet.as_tensor(np.prod(outshp)), aet.as_tensor(imgshp[0]), aet.as_tensor(poolsize), ]) patch_stack = reshape(patches, pshape, ndim=3) out1 = tt_max(patch_stack, axis=2) pshape = aet.stack([ images.shape[0], aet.as_tensor(np.prod(outshp)), aet.as_tensor(imgshp[0]), ]) out2 = reshape(out1, pshape, ndim=3) out3 = DimShuffle(out2.broadcastable, (0, 2, 1))(out2) return aet.flatten(out3, 2), outshp
def test_jax_Reshape(): a = vector("a") x = reshape(a, (2, 2)) x_fg = FunctionGraph([a], [x]) compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)]) # Test breaking "omnistaging" changes in JAX. # See https://github.com/tensorflow/probability/commit/782d0c64eb774b9aac54a1c8488e4f1f96fbbc68 x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2)) x_fg = FunctionGraph([a], [x]) compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])
def test_jax_Reshape_nonconcrete(): a = vector("a") b = iscalar("b") x = reshape(a, (b, b)) x_fg = FunctionGraph([a, b], [x]) compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX), 2])
def test_subtensor_multiple_slices(self): r""" This addresses a bug that happens when you have multiple subtensors on the output of `Scan`. The bug requires the reshape to be produced, and it has something to do with how the `Subtensor`\s overlap. """ def f_pow2(x_tm1): return 2 * x_tm1 state = vector("state") n_steps = iscalar("nsteps") output, updates = scan( f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False, ) nw_shape = ivector("nw_shape") # Note that the output is reshaped to 3 dimensional tensor, and my_f = function( [state, n_steps, nw_shape], [reshape(output, nw_shape, ndim=3)[:-2], output[:-4]], updates=updates, allow_input_downcast=True, ) nodes = [x for x in my_f.maker.fgraph.toposort() if isinstance(x.op, Scan)] # This assertion fails if savemem optimization failed on scan if config.mode != "FAST_COMPILE": assert nodes[0].op._scan_savemem_visited rng = np.random.default_rng(utt.fetch_seed()) my_f(rng.uniform(size=(3,)), 4, np.int64([2, 2, 3]))
def local_reshape_dimshuffle(fgraph, node): """ If a dimshuffle is inside a reshape and does not change the order of dimensions, remove it. Reshape(Dimshuffle(x), shp) -> Reshape(x, shp) """ if isinstance(node.op, Reshape): input_ = node.inputs[0] if input_.owner and isinstance(input_.owner.op, DimShuffle): new_order = input_.owner.op.new_order offset = 0 for dim in new_order: if dim == "x": continue elif dim != offset: return False else: offset += 1 return [ reshape(input_.owner.inputs[0], node.inputs[1], ndim=node.outputs[0].ndim) ] return False
def test_local_reshape_dimshuffle(): reshape_dimshuffle = out2in(local_reshape_dimshuffle) x = matrix("x") y = x.dimshuffle("x", 0, "x", 1) out = reshape(y, (1, x.shape[0] * x.shape[1], 1)) g = FunctionGraph([x], [out]) reshape_dimshuffle(g) topo = g.toposort() assert any([not isinstance(x, DimShuffle) for x in topo])
def normal( self, size, avg=0.0, std=1.0, ndim=None, dtype=None, nstreams=None, truncate=False, **kwargs, ): """ Sample a tensor of values from a normal distribution. Parameters ---------- size : int_vector_like Array dimensions for the output tensor. avg : float_like, optional The mean value for the truncated normal to sample from (defaults to 0.0). std : float_like, optional The standard deviation for the truncated normal to sample from (defaults to 1.0). truncate : bool, optional Truncates the normal distribution at 2 standard deviations if True (defaults to False). When this flag is set, the standard deviation of the result will be less than the one specified. ndim : int, optional The number of dimensions for the output tensor (defaults to None). This argument is necessary if the size argument is ambiguous on the number of dimensions. dtype : str, optional The data-type for the output tensor. If not specified, the dtype is inferred from avg and std, but it is at least as precise as floatX. kwargs Other keyword arguments for random number generation (see uniform). Returns ------- samples : TensorVariable A Aesara tensor of samples randomly drawn from a normal distribution. """ size = _check_size(size) avg = undefined_grad(as_tensor_variable(avg)) std = undefined_grad(as_tensor_variable(std)) if dtype is None: dtype = aes.upcast(config.floatX, avg.dtype, std.dtype) avg = at.cast(avg, dtype=dtype) std = at.cast(std, dtype=dtype) # generate even number of uniform samples # Do manual constant folding to lower optiimizer work. if isinstance(size, Constant): n_odd_samples = size.prod(dtype="int64") else: n_odd_samples = prod(size, dtype="int64") n_even_samples = n_odd_samples + n_odd_samples % 2 uniform = self.uniform( (n_even_samples, ), low=0.0, high=1.0, ndim=1, dtype=dtype, nstreams=nstreams, **kwargs, ) # box-muller transform u1 = uniform[:n_even_samples // 2] u2 = uniform[n_even_samples // 2:] r = sqrt(-2.0 * log(u1)) theta = np.array(2.0 * np.pi, dtype=dtype) * u2 cos_theta, sin_theta = cos(theta), sin(theta) z0 = r * cos_theta z1 = r * sin_theta if truncate: # use valid samples to_fix0 = (z0 < -2.0) | (z0 > 2.0) to_fix1 = (z1 < -2.0) | (z1 > 2.0) z0_valid = z0[at.nonzero(~to_fix0)] z1_valid = z1[at.nonzero(~to_fix1)] # re-sample invalid samples to_fix0 = at.nonzero(to_fix0)[0] to_fix1 = at.nonzero(to_fix1)[0] n_fix_samples = to_fix0.size + to_fix1.size lower = at.constant(1.0 / np.e**2, dtype=dtype) u_fix = self.uniform( (n_fix_samples, ), low=lower, high=1.0, ndim=1, dtype=dtype, nstreams=nstreams, **kwargs, ) r_fix = sqrt(-2.0 * log(u_fix)) z0_fixed = r_fix[:to_fix0.size] * cos_theta[to_fix0] z1_fixed = r_fix[to_fix0.size:] * sin_theta[to_fix1] # pack everything together to a useful result norm_samples = at.join(0, z0_valid, z0_fixed, z1_valid, z1_fixed) else: norm_samples = at.join(0, z0, z1) if isinstance(n_odd_samples, Variable): samples = norm_samples[:n_odd_samples] elif n_odd_samples % 2 == 1: samples = norm_samples[:-1] else: samples = norm_samples samples = reshape(samples, newshape=size, ndim=ndim) samples *= std samples += avg return samples
def conv2d( input, filters, image_shape=None, filter_shape=None, border_mode="valid", subsample=(1, 1), **kargs, ): """ signal.conv.conv2d performs a basic 2D convolution of the input with the given filters. The input parameter can be a single 2D image or a 3D tensor, containing a set of images. Similarly, filters can be a single 2D filter or a 3D tensor, corresponding to a set of 2D filters. Shape parameters are optional and will result in faster execution. Parameters ---------- input : Symbolic aesara tensor for images to be filtered. Dimensions: ([num_images], image height, image width) filters : Symbolic aesara tensor for convolution filter(s). Dimensions: ([num_filters], filter height, filter width) border_mode: {'valid', 'full'} See scipy.signal.convolve2d. subsample Factor by which to subsample output. image_shape : tuple of length 2 or 3 ([num_images,] image height, image width). filter_shape : tuple of length 2 or 3 ([num_filters,] filter height, filter width). kwargs See aesara.tensor.nnet.conv.conv2d. Returns ------- symbolic 2D,3D or 4D tensor Tensor of filtered images, with shape ([number images,] [number filters,] image height, image width). """ assert input.ndim in (2, 3) assert filters.ndim in (2, 3) # use shape information if it is given to us ### if filter_shape and image_shape: if input.ndim == 3: bsize = image_shape[0] else: bsize = 1 imshp = (1, ) + tuple(image_shape[-2:]) if filters.ndim == 3: nkern = filter_shape[0] else: nkern = 1 kshp = filter_shape[-2:] else: nkern, kshp = None, None bsize, imshp = None, None # reshape tensors to 4D, for compatibility with ConvOp ### if input.ndim == 3: sym_bsize = input.shape[0] else: sym_bsize = 1 if filters.ndim == 3: sym_nkern = filters.shape[0] else: sym_nkern = 1 new_input_shape = aet.join(0, aet.stack([sym_bsize, 1]), input.shape[-2:]) input4D = reshape(input, new_input_shape, ndim=4) new_filter_shape = aet.join(0, aet.stack([sym_nkern, 1]), filters.shape[-2:]) filters4D = reshape(filters, new_filter_shape, ndim=4) # perform actual convolution ### op = conv.ConvOp( output_mode=border_mode, dx=subsample[0], dy=subsample[1], imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs, ) output = op(input4D, filters4D) # flatten to 3D tensor if convolving with single filter or single image if input.ndim == 2 and filters.ndim == 2: if config.warn__signal_conv2d_interface: warnings.warn( "aesara.tensor.signal.conv2d() now outputs a 2d tensor when both" " inputs are 2d. To disable this warning, set the Aesara flag" " warn__signal_conv2d_interface to False", stacklevel=3, ) output = aet.flatten(output.T, ndim=2).T elif input.ndim == 2 or filters.ndim == 2: output = aet.flatten(output.T, ndim=3).T return output
def convolve( kerns, kshp, nkern, images, imgshp, step=(1, 1), bias=None, mode="valid", flatten=True, ): """Convolution implementation by sparse matrix multiplication. :note: For best speed, put the matrix which you expect to be smaller as the 'kernel' argument "images" is assumed to be a matrix of shape batch_size x img_size, where the second dimension represents each image in raster order If flatten is "False", the output feature map will have shape: .. code-block:: python batch_size x number of kernels x output_size If flatten is "True", the output feature map will have shape: .. code-block:: python batch_size x number of kernels * output_size .. note:: IMPORTANT: note that this means that each feature map (image generate by each kernel) is contiguous in memory. The memory layout will therefore be: [ <feature_map_0> <feature_map_1> ... <feature_map_n>], where <feature_map> represents a "feature map" in raster order kerns is a 2D tensor of shape nkern x N.prod(kshp) :param kerns: 2D tensor containing kernels which are applied at every pixel :param kshp: tuple containing actual dimensions of kernel (not symbolic) :param nkern: number of kernels/filters to apply. nkern=1 will apply one common filter to all input pixels :param images: tensor containing images on which to apply convolution :param imgshp: tuple containing image dimensions :param step: determines number of pixels between adjacent receptive fields (tuple containing dx,dy values) :param mode: 'full', 'valid' see CSM.evaluate function for details :param sumdims: dimensions over which to sum for the tensordot operation. By default ((2,),(1,)) assumes kerns is a nkern x kernsize matrix and images is a batchsize x imgsize matrix containing flattened images in raster order :param flatten: flatten the last 2 dimensions of the output. By default, instead of generating a batchsize x outsize x nkern tensor, will flatten to batchsize x outsize*nkern :return: out1, symbolic result :return: out2, logical shape of the output img (nkern,heigt,width) :TODO: test for 1D and think of how to do n-d convolutions """ # start by computing output dimensions, size, etc kern_size = np.int64(np.prod(kshp)) # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w) # in the first case, default nfeatures to 1 if np.size(imgshp) == 2: imgshp = (1, ) + imgshp # construct indices and index pointers for sparse matrix, which, # when multiplied with input images will generate a stack of image # patches indices, indptr, spmat_shape, sptype, outshp = convolution_indices.conv_eval( imgshp, kshp, step, mode) # build sparse matrix, then generate stack of image patches csc = aesara.sparse.CSM(sptype)(np.ones(indices.size), indices, indptr, spmat_shape) patches = (sparse.structured_dot(csc, images.T)).T # compute output of linear classifier pshape = aet.stack([ images.shape[0] * aet.as_tensor(np.prod(outshp)), aet.as_tensor(imgshp[0] * kern_size), ]) patch_stack = reshape(patches, pshape, ndim=2) # kern is of shape: nkern x ksize*number_of_input_features # output is thus of shape: bsize*outshp x nkern output = dot(patch_stack, kerns.T) # add bias across each feature map (more efficient to do it now) if bias is not None: output += bias # now to have feature maps in raster order ... # go from bsize*outshp x nkern to bsize x nkern*outshp newshp = aet.stack([ images.shape[0], aet.as_tensor(np.prod(outshp)), aet.as_tensor(nkern) ]) tensout = reshape(output, newshp, ndim=3) output = DimShuffle((False, ) * tensout.ndim, (0, 2, 1))(tensout) if flatten: output = aet.flatten(output, 2) return output, np.hstack((nkern, outshp))
def test_basics(self): a = dvector() b = dmatrix() d = dmatrix() # basic to 1 dim(without list) c = reshape(b, as_tensor_variable(6), ndim=1) f = self.function([b], c) b_val1 = np.asarray([[0, 1, 2], [3, 4, 5]]) c_val1 = np.asarray([0, 1, 2, 3, 4, 5]) b_val2 = b_val1.T c_val2 = np.asarray([0, 3, 1, 4, 2, 5]) f_out1 = f(b_val1) f_out2 = f(b_val2) assert np.array_equal(f_out1, c_val1), (f_out1, c_val1) assert np.array_equal(f_out2, c_val2), (f_out2, c_val2) # basic to 1 dim(with list) c = reshape(b, (as_tensor_variable(6),), ndim=1) f = self.function([b], c) assert np.array_equal( f(np.asarray([[0, 1, 2], [3, 4, 5]])), np.asarray([0, 1, 2, 3, 4, 5]) ) # basic to shape object of same ndim c = reshape(b, d.shape) f = self.function([b, d], c) assert np.array_equal( f(np.asarray([[0, 1, 2], [3, 4, 5]]), [[0, 1], [2, 3], [4, 5]]), np.asarray([[0, 1], [2, 3], [4, 5]]), ) # basic to 2 dims c = reshape(a, [2, 3]) f = self.function([a], c) assert np.array_equal( f(np.asarray([0, 1, 2, 3, 4, 5])), np.asarray([[0, 1, 2], [3, 4, 5]]) ) # test that it works without inplace operations a_val = np.asarray([0, 1, 2, 3, 4, 5]) a_val_copy = np.asarray([0, 1, 2, 3, 4, 5]) b_val = np.asarray([[0, 1, 2], [3, 4, 5]]) f_sub = self.function([a, b], c - b) assert np.array_equal(f_sub(a_val, b_val), np.zeros_like(b_val)) assert np.array_equal(a_val, a_val_copy) # test that it works with inplace operations a_val = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") a_val_copy = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") b_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") f_sub = self.function([a, b], c - b) assert np.array_equal(f_sub(a_val, b_val), np.zeros_like(b_val)) assert np.array_equal(a_val, a_val_copy) # verify gradient def just_vals(v): return Reshape(2)(v, _asarray([2, 3], dtype="int32")) utt.verify_grad(just_vals, [a_val], mode=self.mode) # test infer_shape self._compile_and_check([a], [c], (a_val,), self.op) # test broadcast flag for constant value of 1 c = reshape(b, (b.shape[0], b.shape[1], 1)) # That reshape may get replaced with a dimshuffle, with is ignored, # so we pass "ignore_empty=True" f = self.function([b], c, ignore_empty=True) assert np.array_equal( f(np.asarray([[0, 1, 2], [3, 4, 5]])), np.asarray([[[0], [1], [2]], [[3], [4], [5]]]), ) assert f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable == ( False, False, True, ) # test broadcast flag for constant value of 1 if it cannot be # replaced with dimshuffle c = reshape(b, (b.shape[1], b.shape[0], 1)) f = self.function([b], c, ignore_empty=True) assert np.array_equal( f(np.asarray([[0, 1, 2], [3, 4, 5]])), np.asarray([[[0], [1]], [[2], [3]], [[4], [5]]]), ) assert f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable == ( False, False, True, )