Example #1
0
def convert_graph_layout(mod, desired_layout):
    """Alter the layout of the input graph.

    Parameters
    ----------
    mod : tvm.relay.Module
        The relay module to convert.
    desired_layout : str
        The layout to convert to.

    Returns
    -------
    mod : tvm.relay.Module
        The converted module.
    """

    # Assume for the time being that graphs only have
    # conv2d as heavily-sensitive operators.
    desired_layouts = {
        "nn.conv2d": [desired_layout, "default"],
        "qnn.conv2d": [desired_layout, "default"],
    }

    # Convert the layout of the graph where possible.
    seq = transform.Sequential([
        relay.transform.RemoveUnusedFunctions(),
        relay.transform.ConvertLayout(desired_layouts),
    ])

    with transform.PassContext(opt_level=3):
        try:
            return seq(mod)
        except Exception as err:
            raise TVMCException("Error converting layout to {0}: {1}".format(
                desired_layout, str(err)))
Example #2
0
        return op

    def visit_var(self, var: relay.Var):
        new_ty = self.ty_mut.visit(var.checked_type)
        return relay.Var(name_hint=var.name_hint, type_annotation=new_ty)


class TensorDTypeMutator(relay.TypeMutator):
    def __init__(self, tgt_ty: str):
        super().__init__()
        self.tgt_ty = tgt_ty

    def visit_tensor_type(self, tt: relay.TensorType):
        return relay.TensorType(tt.concrete_shape, dtype=self.tgt_ty)


inputs = keras.Input(shape=(224, 224, 3), batch_size=4)
x = layers.Conv2D(64, 3, use_bias=False, padding='same')(inputs)
x = layers.BatchNormalization(epsilon=1e-5)(x)
x = layers.GlobalAvgPool2D()(x)
keras_model = keras.Model(inputs=inputs, outputs=x)
keras_model.summary()
ir_mod, params = relay.frontend.from_keras(keras_model,
                                           shape={'input_1': (4, 3, 224, 224)})
print(ir_mod)
ir_mod = transform.Sequential(passes=[
    AlterDType('float16'),
    relay.transform.InferType(),
])(ir_mod)
print(ir_mod)