Example #1
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "lhs": (32, 1024, 1024),
            "rhs": (32, 1024, 1000),
            "initializer": nd.normal,
            "transpose_a": False,
            "transpose_b": False,
            "forward_stype": None,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.lhs = get_mx_ndarray(ctx=self.ctx,
                                  in_tensor=self.inputs["lhs"],
                                  dtype=self.inputs["dtype"],
                                  initializer=self.inputs["initializer"],
                                  attach_grad=self.inputs["run_backward"])
        self.rhs = get_mx_ndarray(ctx=self.ctx,
                                  in_tensor=self.inputs["rhs"],
                                  dtype=self.inputs["dtype"],
                                  initializer=self.inputs["initializer"],
                                  attach_grad=self.inputs["run_backward"])
Example #2
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default prediction is (32, 1000, 1) to mimic a prediction of batch_size=32 and 1000 class classification.
        default_parameters = {
            "pred": (32, 1000, 1),
            "pred_initializer": nd.normal,
            "label": (32, 1000, 1),
            "label_initializer": nd.normal,
            "weight": 1.0,
            "run_backward": False,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        # Create a random prediction and label tensor
        self.pred = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["pred"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["pred_initializer"],
                                   attach_grad=self.inputs["run_backward"])
        self.label = get_mx_ndarray(
            ctx=self.ctx,
            in_tensor=self.inputs["label"],
            dtype=self.inputs["dtype"],
            initializer=self.inputs["label_initializer"],
            attach_grad=self.inputs["run_backward"])

        self.block = loss.L2Loss(weight=self.inputs["weight"], batch_axis=0)

        self.block.initialize(ctx=self.ctx)
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (32, 3, 256, 256)
        default_parameters = {
            "data": (32, 3, 256, 256),
            "data_initializer": nd.normal,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        # Create a random prediction and label tensor
        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.BatchNorm()

        self.block.initialize(ctx=self.ctx)
Example #4
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (128, 512, 512) to mimic an input of batch_size=128 and a sample image of size 512*512.
        default_parameters = {
            "data": (128, 512, 512),
            "data_initializer": nd.normal,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.Flatten()

        self.block.initialize(ctx=self.ctx)
Example #5
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (3, 512, 512) to mimic an input image of size 512*512 with 3 channels.
        default_parameters = {
            "data": (128, 512, 512),
            "data_initializer": nd.normal,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        # Batchify the input data. (3, 1024, 1024) => (1, 3, 1024, 1024)
        self.block = nn.Lambda(lambda x: nd.expand_dims(data=x, axis=0))

        self.block.initialize(ctx=self.ctx)
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (25, 32, 256),
            "data_initializer": nd.normal,
            "hidden_size": 100,
            "num_layers": 1,
            "layout": "TNC",
            "dropout": 0,
            "bidirectional": False,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = rnn.LSTM(hidden_size=self.inputs["hidden_size"],
                              num_layers=self.inputs["num_layers"],
                              layout=self.inputs["layout"],
                              dropout=self.inputs["dropout"],
                              bidirectional=self.inputs["bidirectional"],
                              dtype=self.inputs["dtype"])

        self.block.initialize(ctx=self.ctx)
Example #7
0
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256, 256),
            "data_initializer": nd.normal,
            "pool_size": (2, 2),
            "strides": None,
            "padding": (0, 0),
            "layout": "NCHW",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.MaxPool2D(pool_size=self.inputs["pool_size"],
                                  strides=self.inputs["strides"],
                                  padding=self.inputs["padding"],
                                  layout=self.inputs["layout"])
        self.block.initialize(ctx=self.ctx)
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs
        default_parameters = {"this": (1024, 1024),
                              "other": (1024, 1024),
                              "initializer": nd.normal,
                              "run_backward": False,
                              "dtype": "float32"}

        super().__init__(ctx=ctx, warmup=warmup, runs=runs, default_parameters=default_parameters,
                         custom_parameters=inputs)
        self.this = get_mx_ndarray(ctx=self.ctx, in_tensor=self.inputs["this"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["initializer"],
                                   attach_grad=self.inputs["run_backward"])
        self.other = get_mx_ndarray(ctx=self.ctx, in_tensor=self.inputs["other"],
                                    dtype=self.inputs["dtype"],
                                    initializer=self.inputs["initializer"],
                                    attach_grad=self.inputs["run_backward"])
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256, 256),
            "data_initializer": nd.normal,
            "channels": 64,
            "kernel_size": (3, 3),
            "strides": (1, 1),
            "padding": (0, 0),
            "output_padding": (0, 0),
            "dilation": (1, 1),
            "layout": "NCHW",
            "activation": None,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.Conv2DTranspose(
            channels=self.inputs["channels"],
            kernel_size=self.inputs["kernel_size"],
            strides=self.inputs["strides"],
            padding=self.inputs["padding"],
            output_padding=self.inputs["output_padding"],
            dilation=self.inputs["dilation"],
            layout=self.inputs["layout"],
            activation=self.inputs["activation"])

        self.block.initialize(ctx=self.ctx)
Example #10
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (1, 1024) to mimic an input of batch_size=1 and a sample image of size 512*512.
        # Default number of units 256 is referred from ResNet architecture as commonly used Dense Layer size.
        # Default activation is None because we want to benchmark just dense layer operation.
        default_parameters = {
            "data": (512, 512),
            "units": 256,
            "activation": None,
            "use_bias": True,
            "flatten": True,
            "data_initializer": nd.normal,
            "weight_initializer": "Xavier",
            "bias_initializer": "Zeros",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.Dense(
            units=self.inputs["units"],
            activation=self.inputs["activation"],
            use_bias=self.inputs["use_bias"],
            flatten=self.inputs["flatten"],
            dtype=self.inputs["dtype"],
            weight_initializer=self.inputs["weight_initializer"],
            bias_initializer=self.inputs["bias_initializer"])

        self.block.initialize(ctx=self.ctx)
Example #11
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (1024, 1000),
            "initializer": nd.normal,
            "axis": -1,
            "k": 10,
            "ret_typ": "indices",
            "is_ascend": True,
            "run_backward": False,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["initializer"],
                                   attach_grad=self.inputs["run_backward"])
Example #12
0
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256),
            "data_initializer": nd.normal,
            "layout": "NCW",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.GlobalAvgPool1D(layout=self.inputs["layout"])
        self.block.initialize(ctx=self.ctx)