def bincount(x, weights=None, minlength=None, assert_nonneg=False): """Count number of occurrences of each value in array of ints. The number of bins (of size 1) is one larger than the largest value in x. If minlength is specified, there will be at least this number of bins in the output array (though it will be longer if necessary, depending on the contents of x). Each bin gives the number of occurrences of its index value in x. If weights is specified the input array is weighted by it, i.e. if a value n is found at position i, out[n] += weight[i] instead of out[n] += 1. Parameters ---------- x : 1 dimension, nonnegative ints weights : array of the same shape as x with corresponding weights. Optional. minlength : A minimum number of bins for the output array. Optional. assert_nonneg : A flag that inserts an assert_op to check if every input x is nonnegative. Optional. .. versionadded:: 0.6 """ if x.ndim != 1: raise TypeError("Inputs must be of dimension 1.") if assert_nonneg: assert_op = Assert("Input to bincount has negative values!") x = assert_op(x, aet_all(x >= 0)) max_value = aet.cast(x.max() + 1, "int64") if minlength is not None: max_value = maximum(max_value, minlength) # Note: we do not use inc_subtensor(out[x], ...) in the following lines, # since out[x] raises an exception if the indices (x) are int8. if weights is None: out = aet.zeros([max_value], dtype=x.dtype) out = advanced_inc_subtensor1(out, 1, x) else: out = aet.zeros([max_value], dtype=weights.dtype) out = advanced_inc_subtensor1(out, weights, x) return out
def to_one_hot(y, nb_class, dtype=None): """ Return a matrix where each row correspond to the one hot encoding of each element in y. Parameters ---------- y A vector of integer value between 0 and nb_class - 1. nb_class : int The number of class in y. dtype : data-type The dtype of the returned matrix. Default floatX. Returns ------- object A matrix of shape (y.shape[0], nb_class), where each row ``i`` is the one hot encoding of the corresponding ``y[i]`` value. """ ret = aet.zeros((y.shape[0], nb_class), dtype=dtype) ret = set_subtensor(ret[aet.arange(y.shape[0]), y], 1) return ret
def scan_checkpoints( fn, sequences=None, outputs_info=None, non_sequences=None, name="checkpointscan_fn", n_steps=None, save_every_N=10, padding=True, ): """Scan function that uses less memory, but is more restrictive. In :func:`~aesara.scan`, if you compute the gradient of the output with respect to the input, you will have to store the intermediate results at each time step, which can be prohibitively huge. This function allows to do ``save_every_N`` steps of forward computations without storing the intermediate results, and to recompute them during the gradient computation. Notes ----- Current assumptions: * Every sequence has the same length. * If ``n_steps`` is specified, it has the same value as the length of any sequence. * The value of ``save_every_N`` divides the number of steps the scan will run without remainder. * Only singly-recurrent and non-recurrent outputs are used. No multiple recurrences. * Only the last timestep of any output will ever be used. Parameters ---------- fn ``fn`` is a function that describes the operations involved in one step of ``scan``. See the documentation of :func:`~aesara.scan` for more information. sequences ``sequences`` is the list of Aesara variables or dictionaries describing the sequences ``scan`` has to iterate over. All sequences must be the same length in this version of ``scan``. outputs_info ``outputs_info`` is the list of Aesara variables or dictionaries describing the initial state of the outputs computed recurrently. non_sequences ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. n_steps ``n_steps`` is the number of steps to iterate given as an int or Aesara scalar (> 0). If any of the input sequences do not have enough elements, scan will raise an error. If n_steps is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. save_every_N ``save_every_N`` is the number of steps to go without storing the computations of ``scan`` (ie they will have to be recomputed during the gradient computation). padding If the length of the sequences is not a multiple of ``save_every_N``, the sequences will be zero padded to make this version of ``scan`` work properly, but will also result in a memory copy. It can be avoided by setting ``padding`` to False, but you need to make sure the length of the sequences is a multiple of ``save_every_N``. Returns ------- tuple Tuple of the form ``(outputs, updates)`` as in :func:`~aesara.scan`, but with a small change: It only contain the output at each ``save_every_N`` step. The time steps that are not returned by this function will be recomputed during the gradient computation (if any). See Also -------- :func:`~aesara.scan`: Looping in Aesara. """ # Standardize the format of input arguments if sequences is None: sequences = [] elif not isinstance(sequences, list): sequences = [sequences] if not isinstance(outputs_info, list): outputs_info = [outputs_info] if non_sequences is None: non_sequences = [] elif not isinstance(non_sequences, list): non_sequences = [non_sequences] # Check that outputs_info has no taps: for element in outputs_info: if isinstance(element, dict) and "taps" in element: raise RuntimeError("scan_checkpoints doesn't work with taps.") # Determine how many steps the original scan would run if n_steps is None: n_steps = sequences[0].shape[0] # Compute the number of steps of the outer scan o_n_steps = at.cast(ceil(n_steps / save_every_N), "int64") # Compute the number of steps of the inner scan i_n_steps = save_every_N * at.ones((o_n_steps, ), "int64") mod = n_steps % save_every_N last_n_steps = at.switch(eq(mod, 0), save_every_N, mod) i_n_steps = set_subtensor(i_n_steps[-1], last_n_steps) # Pad the sequences if needed if padding: # Since padding could be an empty tensor, Join returns a view of s. join = Join(view=0) for i, s in enumerate(sequences): n = s.shape[0] % save_every_N z = at.zeros((n, s.shape[1:]), dtype=s.dtype) sequences[i] = join(0, [s, z]) # Establish the input variables of the outer scan o_sequences = [ s.reshape( [s.shape[0] / save_every_N, save_every_N] + [s.shape[i] for i in range(1, s.ndim)], s.ndim + 1, ) for s in sequences ] o_sequences.append(i_n_steps) new_nitsots = [i for i in outputs_info if i is None] o_nonsequences = non_sequences def outer_step(*args): # Separate the received arguments into their respective (seq, outputs # from previous iterations, nonseqs) categories i_sequences = list(args[:len(o_sequences)]) i_prev_outputs = list(args[len(o_sequences):-len(o_nonsequences)]) i_non_sequences = list(args[-len(o_nonsequences):]) i_outputs_infos = i_prev_outputs + [ None, ] * len(new_nitsots) # Call the user-provided function with the proper arguments results, updates = scan( fn=fn, sequences=i_sequences[:-1], outputs_info=i_outputs_infos, non_sequences=i_non_sequences, name=name + "_inner", n_steps=i_sequences[-1], ) if not isinstance(results, list): results = [results] # Keep only the last timestep of every output but keep all the updates if not isinstance(results, list): return results[-1], updates else: return [r[-1] for r in results], updates results, updates = scan( fn=outer_step, sequences=o_sequences, outputs_info=outputs_info, non_sequences=o_nonsequences, name=name + "_outer", n_steps=o_n_steps, allow_gc=True, ) return results, updates
def neibs2images(neibs, neib_shape, original_shape, mode="valid"): """ Function :func:`neibs2images <aesara.sandbox.neighbours.neibs2images>` performs the inverse operation of :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>`. It inputs the output of :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>` and reconstructs its input. Parameters ---------- neibs : 2d tensor Like the one obtained by :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>`. neib_shape `neib_shape` that was used in :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>`. original_shape Original shape of the 4d tensor given to :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>` Returns ------- object Reconstructs the input of :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>`, a 4d tensor of shape `original_shape`. Notes ----- Currently, the function doesn't support tensors created with `neib_step` different from default value. This means that it may be impossible to compute the gradient of a variable gained by :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>` w.r.t. its inputs in this case, because it uses :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>` for gradient computation. Examples -------- Example, which uses a tensor gained in example for :func:`images2neibs <aesara.sandbox.neighbours.neibs2images>`: .. code-block:: python im_new = neibs2images(neibs, (5, 5), im_val.shape) # Aesara function definition inv_window = aesara.function([neibs], im_new) # Function application im_new_val = inv_window(neibs_val) .. note:: The code will output the initial image array. """ neibs = as_tensor_variable(neibs) neib_shape = as_tensor_variable(neib_shape) original_shape = as_tensor_variable(original_shape) new_neib_shape = stack( [original_shape[-1] // neib_shape[1], neib_shape[1]]) output_2d = images2neibs(neibs.dimshuffle("x", "x", 0, 1), new_neib_shape, mode=mode) if mode == "ignore_borders": # We use set_subtensor to accept original_shape we can't infer # the shape and still raise error when it don't have the right # shape. valid_shape = original_shape valid_shape = set_subtensor( valid_shape[2], (valid_shape[2] // neib_shape[0]) * neib_shape[0]) valid_shape = set_subtensor( valid_shape[3], (valid_shape[3] // neib_shape[1]) * neib_shape[1]) output_4d = output_2d.reshape(valid_shape, ndim=4) # padding the borders with zeros for d in (2, 3): pad_shape = list(output_4d.shape) pad_shape[d] = original_shape[d] - valid_shape[d] output_4d = concatenate([output_4d, zeros(pad_shape)], axis=d) elif mode == "valid": # TODO: we do not implement all mode with this code. # Add a check for the good cases. output_4d = output_2d.reshape(original_shape, ndim=4) else: raise NotImplementedError(f"neibs2images do not support mode={mode}") return output_4d
def grad(self, inp, grads): x, neib_shape, neib_step = inp (gz, ) = grads if self.mode in ("valid", "ignore_borders"): if (neib_shape is neib_step or neib_shape == neib_step or # Aesara Constant == do not compare the data # the equals function do that. (hasattr(neib_shape, "equals") and neib_shape.equals(neib_step) )): return [ neibs2images(gz, neib_shape, x.shape, mode=self.mode), grad_undefined(self, 1, neib_shape), grad_undefined(self, 2, neib_step), ] if self.mode in ["valid"]: # Iterate over neighborhood positions, summing contributions. def pos2map(pidx, pgz, prior_result, neib_shape, neib_step): """ Helper function that adds gradient contribution from a single neighborhood position i,j. pidx = Index of position within neighborhood. pgz = Gradient of shape (batch_size*num_channels*neibs) prior_result = Shape (batch_size, num_channnels, rows, cols) neib_shape = Number of rows, cols in a neighborhood. neib_step = Step sizes from image2neibs. """ nrows, ncols = neib_shape rstep, cstep = neib_step batch_size, num_channels, rows, cols = prior_result.shape i = pidx // ncols j = pidx - (i * ncols) # This position does not touch some img pixels in valid mode. result_indices = prior_result[:, :, i:(rows - nrows + i + 1):rstep, j:(cols - ncols + j + 1):cstep, ] newshape = ((batch_size, num_channels) + ((rows - nrows) // rstep + 1, ) + ((cols - ncols) // cstep + 1, )) return inc_subtensor(result_indices, pgz.reshape(newshape)) indices = arange(neib_shape[0] * neib_shape[1]) pgzs = gz.dimshuffle((1, 0)) result, _ = aesara.scan( fn=pos2map, sequences=[indices, pgzs], outputs_info=zeros(x.shape), non_sequences=[neib_shape, neib_step], ) grad_input = result[-1] return [ grad_input, grad_undefined(self, 1, neib_shape), grad_undefined(self, 2, neib_step), ] return [ grad_not_implemented(self, 0, x), grad_undefined(self, 1, neib_shape), grad_undefined(self, 2, neib_step), ]