Beispiel #1
0
    def Reshape(self, tf_node, inputs):
        """
        Reshapes a tensor.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            tensor, shape, name
        """
        # TODO: currently only support constants and flatten to 1d and 2d
        # get inputs
        tensor, shape = inputs

        def get_flatten_idx(shape_i, shape_o):
            """
            check if flattening shape is valid
            Args:
                shape_i: input tensor shape
                shape_o: output flattend tensor shape

            Returns:
                None if flatten not valid, otherwise the flatten_at index
            """
            return None

        # get input and output shape
        shape_i = tensor.shape.lengths
        shape_o = tuple(shape.const.astype(int))
        if np.prod(shape_i) != np.prod(shape_o):
            raise ValueError("Total size of input and output dimension "
                             "mismatch.")

        if tensor.const is not None:
            # reshape const
            np_val = np.reshape(tensor.const, shape_o)
            return ng.constant(np_val,
                               shape_to_axes(np_val.shape)).named(tf_node.name)
        else:
            ndims_o = len(shape_o)
            if ndims_o != 1 and ndims_o != 2:
                raise NotImplementedError("Reshape can only support flatten"
                                          "to 1d or 2d.")
            if ndims_o == 1:
                tensor = ng.flatten(tensor)
            else:
                cumprods = list(np.cumprod(shape_i))
                flatten_at_idx = cumprods.index(shape_o[0]) + 1
                tensor = ng.flatten_at(tensor, flatten_at_idx)
            res = ng.cast_axes(tensor, shape_to_axes(shape_o))
            return res.named(tf_node.name)
Beispiel #2
0
    def Fill(self, tf_node, inputs):
        """
        Creates a tensor filled with a scalar value.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            dims, value, name
        """
        # get inputs
        shape_op, const_val_op = inputs

        # get shape, const_val
        shape = tuple(shape_op.const.astype(int))
        const_val = const_val_op.const

        # convert to numpy value
        np_val = np.zeros(shape)
        np_val.fill(const_val)

        # create op
        ng_op = ng.constant(np_val,
                            shape_to_axes(np_val.shape)).named(tf_node.name)
        return ng_op
Beispiel #3
0
    def Range(self, tf_node, inputs):
        """
        Creates a sequence of integers.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            start, limit, delta, name
        """
        # get inputs
        start, limit, delta = inputs

        # get range
        try:
            range_val = np.arange(start.const, limit.const, delta.const)
        except:
            raise NotImplementedError("[NON-NATIVE] Input to `Range` must all "
                                      "be integer, dynamic allocation is not "
                                      "supported.")

        # return
        return ng.constant(range_val,
                           shape_to_axes(range_val.shape)).named(tf_node.name)
Beispiel #4
0
    def Tile(self, tf_node, inputs):
        """
        Constructs a tensor by tiling a given tensor.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            tensor, shape, name
        """
        tensor, multiples = inputs

        # get inputs
        try:
            input_val = tensor.const
            multiples_val = multiples.const
        except:
            raise NotImplementedError(
                "Tile not supported in ngraph, "
                "currently only const tensor is supported.")

        # check shapes
        input_shape = input_val.shape
        input_ndims = len(input_shape)
        assert input_ndims >= 1 and input_ndims == len(multiples_val)

        output_val = np.tile(input_val, multiples_val.astype(int))

        # make new constants
        return ng.constant(output_val,
                           shape_to_axes(output_val.shape)).named(tf_node.name)
Beispiel #5
0
    def Const(self, tf_node, inputs):
        """
        Creates a constant tensor.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            value, dtype, shape, name
        """
        # convert to numpy value
        np_val = tensor_util.MakeNdarray(tf_node.attr['value'].tensor)
        ng_op = ng.constant(np_val,
                            shape_to_axes(np_val.shape)).named(tf_node.name)
        return ng_op
Beispiel #6
0
    def ZerosLike(self, tf_node, inputs):
        """
        Creates a tensor with all elements set to zero.

        Returns:
            A `Tensor` with all elements set to zero.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            tensor, dtype, name
        """
        shape = inputs[0].axes.lengths
        np_val = np.zeros(shape)
        ng_op = ng.constant(np_val,
                            shape_to_axes(np_val.shape)).named(tf_node.name)
        return ng_op
Beispiel #7
0
    def TruncatedNormal(self, tf_node, inputs):
        """
        Outputs random values from a truncated normal distribution.
        `tf.truncated_normal()` call generates several ops, the
        The `TruncatedNormal` op is what we implement here.

        shape --> TruncatedNormal
                       |
                       V
        stddev -----> Mul
                       |
                       V
        mean -------> Add
                       |
                       V
                    (output)

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            shape, mean, dtype, seed, name
        """
        # get inputs
        shape = tuple(inputs[0].const.astype(int))

        # generate truncated standard normal
        mu, sigma, lo, up = 0., 1., -2., 2
        generator = scipy.stats.truncnorm((lo - mu) / sigma, (up - mu) / sigma,
                                          loc=mu,
                                          scale=sigma)
        np_val = generator.rvs(shape)
        ng_op = ng.constant(np_val,
                            shape_to_axes(np_val.shape)).named(tf_node.name)
        return ng_op
Beispiel #8
0
    def RandomStandardNormal(self, tf_node, inputs):
        """
        Outputs random values from a normal distribution. `tf.random_normal()`
        call generates several ops. The `RandomStandardNormal` op is what we
        implement here.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            shape, mean, dtype, seed, name
        """
        # get inputs
        shape = tuple(inputs[0].const.astype(int))

        # generate standard normal
        np_val = np.random.standard_normal(size=shape)
        ng_op = ng.constant(np_val,
                            shape_to_axes(np_val.shape)).named(tf_node.name)
        return ng_op
Beispiel #9
0
    def BroadcastGradientArgs(self, tf_node, inputs):
        """
        Given shapes of two tensors, computes the reduction indices for the
        gradient computation
        Reference:
        - BCastGradArgsOp https://goo.gl/5vx4QN
        - BCast::BCast https://goo.gl/gzOiA2
        TODO: Untested in real models, dangerous. Currently, our implementation
        only imports forward graph from tensorflow and does the gradient
        computation in ngraph.

        Arguments:
            tf_node: NodeDef object, the tensorflow node to convert.
            inputs: List of ngraph Ops as inputs to this node.

        Returns:
            A ngraph Op corresponding to the tensorflow node.

        Inputs to tf_node:
            sx, sy
        """
        # get inputs
        sx, sy = list(to_int(inputs[0].const)), list(to_int(inputs[1].const))

        # fast path for common case of identical shapes for sx and sy
        if np.array_equal(sx, sy):
            return None, None

        # reverse the shape of x and y for convenience.
        x = list(reversed(sx))
        y = list(reversed(sy))

        # 1-extend and align x and y so that they are the same size
        if len(x) > len(y):
            y += [1] * (len(x) - len(y))
        else:
            x += [1] * (len(y) - len(x))

        # going through each dimension starting from the inner-most
        # dimension, compares dimension of x and y. They are compatible if
        # they are equal or either is 1
        grad_x_reduce_idx_ = []
        grad_y_reduce_idx_ = []
        n = len(x)
        for i in range(n):
            if x[i] == y[i]:
                continue
            elif x[i] == 1:
                grad_x_reduce_idx_.append(n - 1 - i)
            elif y[i] == 1:
                grad_y_reduce_idx_.append(n - 1 - i)
            else:
                raise ValueError("Shape %s and %s not numpy-compatible" %
                                 (sx, sy))

        # reverse all vectors since x and y were reversed at very beginning
        grad_x_reduce_idx_ = list(reversed(grad_x_reduce_idx_))
        grad_y_reduce_idx_ = list(reversed(grad_y_reduce_idx_))

        # make ng constant array
        if grad_x_reduce_idx_:
            x_array = np.array(grad_x_reduce_idx_)
            ng_x_array = ng.constant(x_array, shape_to_axes(
                x_array.shape)).named(tf_node.name)
        else:
            ng_x_array = None

        if grad_y_reduce_idx_:
            y_array = np.array(grad_y_reduce_idx_)
            ng_y_array = ng.constant(y_array,
                                     axes=shape_to_axes(y_array.shape)).named(
                                         tf_node.name)
        else:
            ng_y_array = None

        return ng_x_array, ng_y_array