def template(shape=(2, 3, 4, 5), x0_order=OrderNHWC, x1_order=OrderNHWC, x2_order=OrderNHWC, y_order=OrderNHWC, description: str = ""): vx0 = np.where(np.random.rand(*shape).astype(np.float32) > 0.5, 0, 1) vx1 = np.random.rand(*shape).astype(np.float32) - 0.5 vx2 = np.random.rand(*shape).astype(np.float32) - 0.5 vy = np.where(vx0 == 1, vx1, vx2) x0 = Variable(vx0.shape, order=OrderNHWC) x1 = Variable(vx1.shape, order=OrderNHWC) x2 = Variable(vx2.shape, order=OrderNHWC) y, = Select(None)(x0, x1, x2) x0.change_order(x0_order) x1.change_order(x1_order) x2.change_order(x2_order) y.change_order(y_order) generate_kernel_test_case( description=f"Select {description}", graph=Graph([x0, x1, x2], [y]), inputs={ x0: np.transpose(vx0, [OrderNHWC.axes_dict[a] for a in x0.order.axes]), x1: np.transpose(vx1, [OrderNHWC.axes_dict[a] for a in x1.order.axes]), x2: np.transpose(vx2, [OrderNHWC.axes_dict[a] for a in x2.order.axes]) }, expected={y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])}, )
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) max_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["max"].f min_x = ConstantVariable(np.ones([1] * x.ndim), x.order) * attrs["min"].f y, = Select(None)(x > max_x, max_x, x) y, = Select(None)(y > min_x, y, min_x) converter.set_variable(onnx_op.output[0], y)
def test_broadcast(): vx0 = np.where(np.random.rand(*[2, 5]).astype(np.float32) > 0.5, 0, 1) vx1 = np.random.rand(*[3, 4]).astype(np.float32) - 0.5 vx2 = np.random.rand(*[2, 3, 4, 5]).astype(np.float32) - 0.5 vy = np.where(vx0[:, None, None, :] == 1, vx1[None, :, :, None], vx2) x0 = Variable(vx0.shape, order=OrderNC) x1 = Variable(vx1.shape, order=Order([Axis.H, Axis.W])) x2 = Variable(vx2.shape, order=OrderNHWC) y, = Select(None)(x0, x1, x2) y.change_order(OrderNHWC) generate_kernel_test_case( description=f"Select broadcast", graph=Graph([x0, x1, x2], [y]), inputs={x0: vx0, x1: vx1, x2: vx2}, expected={y: vy}, )
def _convert_selu(converter: ONNXConverter, onnx_op: INodeProto): x = converter.get_variable(onnx_op.input[0]) attrs = attribute_dict(onnx_op) alpha = attrs["alpha"].f if "alpha" in attrs else 1.6732 gamma = attrs["gamma"].f if "gamma" in attrs else 1.0507 y, = Select(None)(x > 0, gamma * x, gamma * (alpha * Exp(None)(x)[0] - alpha)) converter.set_variable(onnx_op.output[0], y)
def _convert_minimum(converter: KerasConverter, k_op: "keras.layers.Minimum"): xs = [converter.get_variable(tensor) for tensor in converter.get_input_tensor(k_op)] for x in xs[1:]: xs[0].order.unify(x.order) y = xs[0] for x in xs[1:]: cond = y > x y, = Select(None)(cond, x, y) converter.set_variable(converter.get_output_tensor(k_op)[0], y)
def select_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"): cond = converter.get_variable(tf_op.inputs[0]) x1 = converter.get_variable(tf_op.inputs[1]) x2 = converter.get_variable(tf_op.inputs[2]) check_broadcast_constraints(cond, x1) check_broadcast_constraints(cond, x2) check_broadcast_constraints(x1, x2) y, = Select(None)(cond, x1, x2) converter.set_variable(tf_op.outputs[0], y)
def _convert_min(converter: ONNXConverter, onnx_op: INodeProto): xs = [converter.get_variable(v) for v in onnx_op.input] while len(xs) > 1: x0 = xs.pop(0) x1 = xs.pop(0) check_broadcast_constraints(x0, x1) y, = Select(None)(x0 > x1, x1, x0) xs.append(y) converter.set_variable(onnx_op.output[0], xs[0])