def make_node(self, activations, labels, input_lengths): t_activations = at.as_tensor_variable(activations) # Ensure activations array is C-contiguous t_activations = cpu_contiguous(t_activations) t_labels = at.as_tensor_variable(labels) t_input_lengths = at.as_tensor_variable(input_lengths) if t_activations.type.dtype != "float32": raise TypeError("activations must use the float32 type!") if t_activations.ndim != 3: raise ValueError("activations must have 3 dimensions.") if t_labels.type.dtype != "int32": raise TypeError("labels must use the int32 type!") if t_labels.ndim != 2: raise ValueError("labels must have 2 dimensions.") if t_input_lengths.type.dtype != "int32": raise TypeError("input_lengths must use the int32 type!") if t_input_lengths.ndim != 1: raise ValueError("input_lengths must have 1 dimension.") costs = fvector(name="ctc_cost") outputs = [costs] if self.compute_grad: gradients = ftensor3(name="ctc_grad") outputs += [gradients] return Apply(self, inputs=[t_activations, t_labels, t_input_lengths], outputs=outputs)
def test_cpu_contiguous(): a = fmatrix("a") i = iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") f = aesara.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i])) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, CpuContiguous) for node in topo]) assert f(a_val, 1).flags["C_CONTIGUOUS"] assert f(a_val, 2).flags["C_CONTIGUOUS"] assert f(a_val, 3).flags["C_CONTIGUOUS"] # Test the grad: utt.verify_grad(cpu_contiguous, [np.random.rand(5, 7, 2)])