def _convert_repeat_vector(converter: KerasConverter, k_op: "keras.layers.RepeatVector"): x = converter.get_variable(converter.get_input_tensor(k_op)[0]) new_axis = Axis() multiplier = AxisKeyDict(x.order.axes, [1, 1]) multiplier[new_axis] = k_op.n x = x.reshape(shape=(x.shape[0], 1, x.shape[1]), order=Order([x.order.axes[0], new_axis, x.order.axes[1]])) y, = Tile(None, multiplier=multiplier)(x) converter.set_variable(converter.get_output_tensor(k_op)[0], y)
def template(x_order=OrderNHWC, x_shape=[2, 3, 4, 5], multiplier=AxisKeyDict(OrderNHWC.axes, [3, 4, 5, 6]), y_order=OrderNHWC, description: str = ""): vx = np.random.rand(*x_shape) vy = np.tile(vx, [multiplier[a] for a in x_order.axes]) x = Variable(vx.shape, order=x_order) y, = Tile(None, multiplier=multiplier)(x) y.change_order(y_order) generate_kernel_test_case( description=f"Tile {description}", graph=Graph([x], [y]), backend=["webgpu", "webassembly", "webgl"], inputs={x: vx}, expected={y: np.transpose(vy.data, [x_order.axes_dict[a] for a in y.order.axes])}, EPS=1e-2 )
def _convert_tile(converter: ChainerConverter, c_op: "chainer.functions.Tile"): x = converter.get_variable(c_op.inputs[0]) reps = c_op.reps if x.ndim > len(reps): reps = (1, ) * (x.ndim - len(reps)) + reps else: while x.ndim < len(c_op.reps): x = x.expand_dims(Axis(), 0) y, = Tile(None, AxisKeyDict(x.order.axes, reps))(x) converter.set_variable(c_op.outputs[0](), y)
def tile_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"): x = converter.get_variable(tf_op.inputs[0]) multiplier = converter.get_variable(tf_op.inputs[1]) if not isinstance(multiplier, ConstantVariable): raise NotImplementedError( "[TensorFlowConverter] Operator 'Tile' with dynamic multiplier is not supported yet." ) multiplier = AxisKeyDict(x.order.axes, multiplier.data.astype(int).flatten().tolist()) y, = Tile(None, multiplier=multiplier)(x) converter.set_variable(tf_op.outputs[0], y)
def pad_v2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"): x = converter.get_variable(tf_op.inputs[0]) paddings = converter.get_variable(tf_op.inputs[1]) if not isinstance(paddings, ConstantVariable): raise NotImplementedError( '[TensorFlowConverter] PadV2 with dynamic padding size is not supported' ) paddings = paddings.data.astype(np.int).tolist() constant_values = converter.get_variable(tf_op.inputs[2]).change_order( x.order) for axis, (pad_begin, pad_end) in zip(x.order.axes, paddings): xs = [] if pad_begin > 0: multiplier = AxisKeyDict(x.order.axes, [ pad_begin if a == axis else x.shape_dict[a] for a in x.order.axes ]) xs.append(Tile(None, multiplier)(constant_values)[0]) xs.append(x) if pad_end > 0: multiplier = AxisKeyDict(x.order.axes, [ pad_end if a == axis else x.shape_dict[a] for a in x.order.axes ]) xs.append(Tile(None, multiplier)(constant_values)[0]) if len(xs) > 1: x, = Concat(None, axis=axis)(*xs) converter.set_variable(tf_op.outputs[0], x)
def _convert_pad(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) pads = attrs["pads"].ints if len(pads) != 2 * x.ndim: raise ValueError( "[ONNXConverter] The length of parameter \"pads\" in \"Pad\" node must be double of input tensor's dimension" ) pads_begin = pads[:x.ndim] pads_end = pads[x.ndim:] mode = attrs["mode"].s if "mode" in attrs else b"constant" value = attrs["value"].f if "value" in attrs else 0 constant_values = ConstantVariable(np.full([1] * x.ndim, value), x.order) for pad_begin, pad_end, axis in zip(pads_begin, pads_end, x.order.axes): xs = [] if pad_begin > 0: if mode == b"constant": multiplier = AxisKeyDict(x.order.axes, [ pad_begin if a == axis else x.shape_dict[a] for a in x.order.axes ]) xs.append(Tile(None, multiplier)(constant_values)[0]) elif mode == b"reflect": slices = [ slice(pad_begin, 0, -1) if a == axis else slice(None) for a in x.order.axes ] xs.append(x[slices]) elif mode == b"edge": slices = [ slice(pad_begin - 1, None, -1) if a == axis else slice(None) for a in x.order.axes ] xs.append(x[slices]) else: raise NotImplementedError( f"[ONNXConverter] Unknown mode \"{mode}\"") xs.append(x) if pad_end > 0: if mode == b"constant": multiplier = AxisKeyDict(x.order.axes, [ pad_end if a == axis else x.shape_dict[a] for a in x.order.axes ]) xs.append(Tile(None, multiplier)(constant_values)[0]) elif mode == b"reflect": slices = [ slice(-2, -2 - pad_end, -1) if a == axis else slice(None) for a in x.order.axes ] xs.append(x[slices]) elif mode == b"edge": slices = [ slice(-1, -1 - pad_end, -1) if a == axis else slice(None) for a in x.order.axes ] xs.append(x[slices]) else: raise NotImplementedError( f"[ONNXConverter] Unknown mode \"{mode}\"") if len(xs) > 1: x, = Concat(None, axis=axis)(*xs) converter.set_variable(onnx_op.output[0], x)