示例#1
0
 def __init__(
     self,
     num_features,
     eps=1e-5,
     momentum=0.1,
     affine=True,
     track_running_stats=True,
 ):
     super().__init__(num_features, eps, momentum, affine,
                      track_running_stats)
     self._training_op = (
         flow.builtin_op("normalization").Input("x").Input("moving_mean").
         Input("moving_variance").Input("gamma").Input("beta").Attr(
             "axis",
             1).Attr("epsilon",
                     eps).Attr("momentum",
                               momentum).Output("y").Output("mean").Output(
                                   "inv_variance").Attr("training",
                                                        True).Build())
     self._testing_op = (
         flow.builtin_op("normalization").Input("x").Input("moving_mean").
         Input("moving_variance").Input("gamma").Input("beta").Attr(
             "axis", 1).Attr("epsilon", eps).Attr(
                 "momentum", momentum).Output("y").Attr("training",
                                                        False).Build())
示例#2
0
    def __init__(self, min_value=None, max_value=None) -> None:
        super().__init__()
        if min_value is not None:
            floating_min_value = float(min_value)
            integral_min_value = int(min_value)
        if max_value is not None:
            floating_max_value = float(max_value)
            integral_max_value = int(max_value)

        if min_value is not None and max_value is not None:
            self._op = (
                flow.builtin_op("clip_by_scalar").Input("x").Output("y").Attr(
                    "floating_min", floating_min_value).Attr(
                        "integral_min", integral_min_value).Attr(
                            "floating_max", floating_max_value).Attr(
                                "integral_max", integral_max_value).Build())
        elif min_value is not None:
            self._op = (flow.builtin_op("clip_by_scalar_min").Input(
                "x").Output("y").Attr("floating_min", floating_min_value).Attr(
                    "integral_min", integral_min_value).Build())
        elif max_value is not None:
            self._op = (flow.builtin_op("clip_by_scalar_max").Input(
                "x").Output("y").Attr("floating_max", floating_max_value).Attr(
                    "integral_max", integral_max_value).Build())
        else:
            raise ValueError(
                "min_value and max_value cannot be None at the same time")
示例#3
0
    def __init__(
        self,
        normalized_shape: _shape_t,
        eps: float = 1e-5,
        elementwise_affine: bool = True,
    ) -> None:
        super(LayerNorm, self).__init__()
        if isinstance(normalized_shape, int):
            # mypy error: incompatible types in assignment
            normalized_shape = (normalized_shape, )  # type: ignore[assignment]
        self.normalized_shape = tuple(
            normalized_shape)  # type: ignore[arg-type]

        self.epsilon = eps
        self.elementwise_affine = elementwise_affine
        if self.elementwise_affine:
            self.weight = flow.nn.Parameter(
                flow.Tensor(*self.normalized_shape))
            self.bias = flow.nn.Parameter(flow.Tensor(*self.normalized_shape))
        else:
            self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        self.reset_parameters()
        # An integer specifies which axis to normalize at first, defaults to 1.
        self.begin_norm_axis = 1
        # An integer specifies which axis params at, defaults to 1 in 'NCHW' format
        self.begin_params_axis = 1

        self._op = (flow.builtin_op("layer_norm").Input("x").Input(
            "gamma").Input("beta").Output("y").Output("mean").Output(
                "inv_variance").Output("normalized").Build())

        self._op2 = (flow.builtin_op("layer_norm").Input("x").Output(
            "y").Output("mean").Output("inv_variance").Build())
示例#4
0
    def __init__(
            self,
            in_channels: int,
            out_channels: int,
            kernel_size: _size_2_t,
            stride: _size_2_t = 1,
            padding: _size_2_t = 0,
            dilation: _size_2_t = 1,
            groups: int = 1,
            bias: bool = True,
            padding_mode: str = "zeros",  # TODO: refine this type
    ):
        super().__init__()

        assert padding_mode == "zeros"
        kernel_size = _pair(kernel_size)
        stride = _pair(stride)
        padding = _pair(padding)
        dilation = _pair(dilation)
        self.groups = groups
        self.weight = flow.nn.Parameter(
            flow.Tensor(out_channels, in_channels // groups, *kernel_size))
        self.bias = None
        self._bias_add_op = None
        if bias:
            self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
            self._bias_add_op = (flow.builtin_op("bias_add").Input("a").Input(
                "b").Output("out").Attr("axis", 1).Build())

        self._op = (flow.builtin_op("conv2d").Input("in").Input("weight").Attr(
            "filters", out_channels).Attr("padding_before", padding).Attr(
                "strides", stride).Attr("kernel_size", kernel_size).Attr(
                    "dilation_rate", dilation).Attr("groups", groups).Attr(
                        "data_format", "channels_first").Output("out").Build())
        self.reset_parameters()
示例#5
0
 def __init__(self, copy):
     super().__init__()
     self._copy_op = flow.builtin_op("copy").Input("in").Output(
         "out").Build()
     self._cast_op = flow.builtin_op("cast").Input("in").Output(
         "out").Build()
     self.copy = copy
示例#6
0
    def __init__(self, p: float = 0.5, inplace: bool = False):
        _DropoutNd.__init__(self, p, inplace)

        if self.p == 1.0:
            scale = 1.0
        else:
            scale = float(1.0 / (1.0 - self.p))

        seed = random.randint(-sys.maxsize, sys.maxsize)
        self._op = (
            flow.builtin_op("dropout")
            .Input("in")
            .Input("mask")
            .Output("out")
            .Attr("scale", scale)
            .Build()
        )
        self._mask_op = (
            flow.builtin_op("random_mask_like")
            .Input("like")
            .Output("out")
            .Attr("rate", self.p)
            .Attr("seed", seed)
            .Build()
        )
示例#7
0
    def __init__(
        self,
        color_space: str = "BGR",
        output_layout: str = "NCHW",
        crop_h: int = 0,
        crop_w: int = 0,
        crop_pos_y: float = 0.5,
        crop_pos_x: float = 0.5,
        mean: Sequence[float] = [0.0],
        std: Sequence[float] = [1.0],
        output_dtype: flow.dtype = flow.float,
    ):
        super().__init__()
        self._op = (flow.builtin_op("crop_mirror_normalize_from_uint8").Input(
            "in").Input("mirror").Output("out").Attr(
                "color_space",
                color_space).Attr("output_layout", output_layout).Attr(
                    "mean", mean).Attr("std", std).Attr("crop_h", crop_h).Attr(
                        "crop_w", crop_w).Attr("crop_pos_y", crop_pos_y).Attr(
                            "crop_pos_x",
                            crop_pos_x).Attr("output_dtype",
                                             output_dtype).Build())

        self._val_op = (
            flow.builtin_op("crop_mirror_normalize_from_tensorbuffer").Input(
                "in").Output("out").Attr("color_space", color_space).Attr(
                    "output_layout",
                    output_layout).Attr("mean", mean).Attr("std", std).Attr(
                        "crop_h", crop_h).Attr("crop_w", crop_w).Attr(
                            "crop_pos_y",
                            crop_pos_y).Attr("crop_pos_x", crop_pos_x).Attr(
                                "output_dtype", output_dtype).Build())
示例#8
0
 def __init__(self, dim: Optional[int] = None):
     super().__init__()
     self.axis = -1 if dim is None else dim
     self._op = flow.builtin_op("softmax").Input("in").Output("out").Build()
     self._transpose_op = (
         flow.builtin_op("transpose").Input("input").Output("output").Attr(
             "perm", []).Build())
示例#9
0
    def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
        super().__init__()

        self.use_bias = bias
        self.weight = flow.nn.Parameter(flow.Tensor(out_features, in_features))
        self.bias = None

        if bias:
            self.bias = flow.nn.Parameter(flow.Tensor(out_features))

        self._matmul_op = (
            flow.builtin_op("matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", True)
            .Attr("alpha", 1.0)
            .Build()
        )

        self._broadcast_matmul_op = (
            flow.builtin_op("broadcast_matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", True)
            .Attr("alpha", 1.0)
            .Build()
        )

        self.reset_parameters()
示例#10
0
    def __init__(self) -> None:
        super().__init__()
        self._matmul_op = (
            flow.builtin_op("matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", False)
            .Attr("alpha", 1.0)
            .Build()
        )

        self._batch_matmul_op = (
            flow.builtin_op("batch_matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", False)
            .Attr("alpha", 1.0)
            .Build()
        )

        self._broadcast_matmul_op = (
            flow.builtin_op("broadcast_matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", False)
            .Attr("alpha", 1.0)
            .Build()
        )
示例#11
0
    def __init__(
        self,
        parameters: Union[Iterator[Parameter], List[Dict]],
        lr: float = 1e-3,
        alpha: float = 0.99,
        eps: float = 1e-8,
        weight_decay: float = 0,
        momentum: float = 0.0,
        centered: bool = False,
        scale: float = 1.0,
    ):
        super().__init__()
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert alpha >= 0.0, f"Invalid alpha value: {alpha}"
        assert eps >= 0.0, f"Invalid epsilon value: {eps}"
        assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
        assert scale > 0.0, f"Invalid scale factor: {scale}"
        assert momentum == 0.0, "Not support momentum greater than zeros now!"

        self._default_options["lr"] = lr
        self._default_options["alpha"] = alpha
        self._default_options["eps"] = eps
        self._default_options["weight_decay"] = weight_decay
        self._default_options["centered"] = centered
        self._default_options["scale"] = scale

        # Add parameters
        if isinstance(parameters, GeneratorType):
            self._param_groups.append(
                ParamGroup(parameters, self._default_options))
        else:  # List[Dict]
            for param in parameters:
                self._param_groups.append(
                    ParamGroup(param, self._default_options))

        for param_group in self._param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()
                self._state[param][
                    "square_avg"] = flow.experimental.zeros_like(param)
                if param_group.options["centered"]:
                    self._state[param][
                        "grad_avg"] = flow.experimental.zeros_like(param)

        self._centered_rmsprop = (flow.builtin_op("rmsprop_update").Input(
            "model").Input("model_diff").Input("learning_rate").Input(
                "mean_square").Input("mean_gradient").Attr(
                    "centered", True).Attr("l1", 0.0).Attr("l2", 0.0).Build())
        self._rmsprop = (
            flow.builtin_op("rmsprop_update").Input("model").Input(
                "model_diff").Input("learning_rate").Input("mean_square").Attr(
                    "centered", False).Attr("l1", 0.0).Attr("l2", 0.0).Build())
 def test_dynamic_attrs(test_case):
     x = (flow.builtin_op("constant").Output("out").Attr(
         "is_floating_value",
         True).Attr("floating_value",
                    3.0).Attr("dtype",
                              flow.float32).Attr("shape",
                                                 [2, 3]).Build())()[0]
     op = flow.builtin_op("expand_dims").Input("in").Output("out").Build()
     y = op(x, axis=1)[0]
     test_case.assertEqual(y.shape, flow.Size((2, 1, 3)))
     y = op(x, axis=2)[0]
     test_case.assertEqual(y.shape, flow.Size((2, 3, 1)))
 def test_stateful_local_kernel(test_case):
     op1 = (flow.builtin_op("constant").Output("out").Attr(
         "is_floating_value",
         True).Attr("floating_value",
                    3.0).Attr("dtype", flow.float32).Attr("shape",
                                                          [1, 1]).Build())
     op2 = (flow.builtin_op("matmul").Input("a").Input("b").Attr(
         "transpose_a",
         False).Attr("transpose_b",
                     False).Attr("alpha", float(1.0)).Output("out").Build())
     x = op1()[0]
     x = op2(x, x)[0]
示例#14
0
    def __init__(self, dim: int = None, keepdim: bool = False) -> None:
        super().__init__()
        self._op_softmax_last_dim = (
            flow.builtin_op("argmax").Input("in").Output("out").Build())
        self._expand_op = (
            flow.builtin_op("expand_dims").Input("in").Output("out").Attr(
                "axis", -1).Build())
        self._flatten = (
            flow.builtin_op("flatten").Input("in").Output("out").Attr(
                "start_dim", 0).Attr("end_dim", -1).Build())

        self.dim = dim
        self.keepdim = keepdim
示例#15
0
    def __init__(
        self,
        parameters: Union[Iterator[Parameter], List[Dict]],
        lr: float,
        momentum: float = 0.0,
        scale: float = 1.0,
    ):
        super().__init__()
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert momentum >= 0.0, f"Invalid momentum: {momentum}"
        assert scale >= 0.0, f"Invalid scale factor: {scale}"

        self._default_options["lr"] = lr
        self._default_options["scale"] = scale
        if momentum != 0.0:
            self._default_options["momentum"] = momentum

        # Add parameters
        if isinstance(parameters, GeneratorType):
            self._param_groups.append(
                ParamGroup(parameters, self._default_options))
        else:  # List[Dict]
            for param in parameters:
                self._param_groups.append(
                    ParamGroup(param, self._default_options))

        for param_group in self._param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()
                if "momentum" in self._default_options:
                    self._state[param]["momentum_buf"] = flow.tmp.zeros(
                        # TODO: zeros module support flow.Size parameter
                        tuple(param.shape))

        if "momentum" in self._default_options.keys():
            self._op = (flow.builtin_op("momentum_update").Input(
                "model").Input("model_diff").Input(
                    "learning_rate").Input("momentum").Attr(
                        "scale",
                        self._default_options["scale"]).Attr("l1", 0.0).Attr(
                            "l2",
                            0.0).Attr("beta",
                                      self._default_options["momentum"]).Attr(
                                          "weight_decay", 0.0).Build())
        else:
            self._op = (flow.builtin_op("sgd_update").Input("model").Input(
                "model_diff").Input("learning_rate").Attr(
                    "scale", self._default_options["scale"]).Attr(
                        "weight_decay", 0.0).Attr("l1", 0.0).Attr("l2",
                                                                  0.0).Build())
示例#16
0
    def __init__(
        self,
        parameters: Union[Iterator[Parameter], List[Dict]],
        lr: float = 1e-3,
        momentum: float = 0.0,
        scale: float = 1.0,
    ):
        super().__init__()
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert momentum >= 0.0, f"Invalid momentum: {momentum}"
        assert scale >= 0.0, f"Invalid scale factor: {scale}"

        self._default_options["lr"] = lr
        self._default_options["scale"] = scale
        self._default_options["momentum"] = momentum

        # Add parameters
        if isinstance(parameters, GeneratorType):
            self.param_groups.append(ParamGroup(parameters, self._default_options))
        else:  # List[Dict]
            for param in parameters:
                self.param_groups.append(ParamGroup(param, self._default_options))

        for param_group in self.param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()
                if param_group["momentum"] != 0.0:
                    self._state[param]["momentum_buf"] = flow.experimental.zeros_like(
                        param
                    )

        self._momentum_sgd = (
            flow.builtin_op("momentum_update")
            .Input("model")
            .Input("model_diff")
            .Input("momentum")
            .Attr("l1", 0.0)
            .Attr("l2", 0.0)
            .Attr("weight_decay", 0.0)
            .Build()
        )
        self._sgd = (
            flow.builtin_op("sgd_update")
            .Input("model")
            .Input("model_diff")
            .Attr("weight_decay", 0.0)
            .Attr("l1", 0.0)
            .Attr("l2", 0.0)
            .Build()
        )
示例#17
0
 def __init__(
     self,
     ofrecord_dir: str,
     batch_size: int = 1,
     data_part_num: int = 1,
     part_name_prefix: str = "part-",
     part_name_suffix_length: int = -1,
     random_shuffle: bool = False,
     shuffle_buffer_size: int = 1024,
     shuffle_after_epoch: bool = False,
     random_seed: int = -1,
     name: Optional[str] = None,
 ):
     super().__init__()
     seed, has_seed = mirrored_gen_random_seed(random_seed)
     self._op = (flow.builtin_op("OFRecordReader", name).Output("out").Attr(
         "data_dir",
         ofrecord_dir).Attr("data_part_num", data_part_num).Attr(
             "batch_size",
             batch_size).Attr("part_name_prefix", part_name_prefix).Attr(
                 "random_shuffle", random_shuffle).Attr(
                     "shuffle_buffer_size", shuffle_buffer_size).Attr(
                         "shuffle_after_epoch", shuffle_after_epoch).Attr(
                             "part_name_suffix_length",
                             part_name_suffix_length).Attr("seed",
                                                           seed).Build())
示例#18
0
    def forward(self, x):
        _, _, h, w = x.shape

        if (self.padding[2] < h and self.padding[3] < h and self.padding[0] < w
                and self.padding[1] < w):

            if x.dtype in [flow.float32, flow.float16, flow.float64]:
                floating_value = float(self.value)
                integral_value = int(0)
            else:
                floating_value = float(0)
                integral_value = int(self.value)

            self._op = (
                flow.builtin_op("constant_pad2d").Input("x").Output("y").Attr(
                    "padding",
                    self.padding).Attr("floating_value", floating_value).Attr(
                        "integral_value", integral_value).Build())

            res = self._op(x)[0]
            return res

        else:
            raise AssertionError(
                "Padding size should be less than the corresponding input dimension. Please check."
            )
示例#19
0
文件: permute.py 项目: xmyqsh/oneflow
    def __init__(self, *dims) -> None:
        super().__init__()
        self.perm = list(*dims)

        self._op = (
            flow.builtin_op("transpose").Input("input").Output("output").Attr(
                "perm", []).Build())
示例#20
0
 def __init__(self, num_parameters: int = 1, init: float = 0.25) -> None:
     super().__init__()
     self.num_parameters = num_parameters
     self.weight = flow.nn.Parameter(
         flow.Tensor(num_parameters, 1, 1).fill_(init))
     self.op = flow.builtin_op("prelu").Input("x").Input("alpha").Output(
         "y").Build()
示例#21
0
 def __init__(
     self,
     min_val: float = -1,
     max_val: float = 1,
     inplace: bool = False,
     min_value: Optional[float] = None,
     max_value: Optional[float] = None,
 ):
     super().__init__()
     if min_value is not None:
         warnings.warn(
             "keyword argument min_value is deprecated and rename to min_val"
         )
         min_val = min_value
     if max_value is not None:
         warnings.warn(
             "keyword argument max_value is deprecated and rename to max_val"
         )
         max_val = max_value
     self._op = (
         flow.builtin_op("hardtanh")
         .Input("in")
         .Attr("min_val", min_val)
         .Attr("max_val", max_val)
         .Output("out")
         .Build()
     )
示例#22
0
def _build_reduce_op(op_type_name, keepdims):
    return (
        flow.builtin_op(op_type_name)
        .Input("input_tensor")
        .Output("output_tensor")
        .Attr("keepdims", keepdims)
        .Build()
    )
示例#23
0
 def __init__(self, start: Tuple[int, ...], stop: Tuple[int, ...],
              step: Tuple[int, ...]) -> None:
     super().__init__()
     self._op = (flow.builtin_op("slice_update").Input("x").Input(
         "update").Output("y").Attr("start",
                                    start).Attr("stop",
                                                stop).Attr("step",
                                                           step).Build())
示例#24
0
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
    scores_inds = flow_exp.argsort(scores, dim=0, descending=True)
    boxes = flow._C.gather(boxes, scores_inds, axis=0)
    _nms_op = (flow_exp.builtin_op("nms").Input("in").Output("out").Attr(
        "iou_threshold", iou_threshold).Attr("keep_n", -1).Build())
    keep = _nms_op(boxes)[0]
    index = flow_exp.squeeze(flow_exp.argwhere(keep), dim=[1])
    return flow._C.gather(scores_inds, index, axis=0)
示例#25
0
 def __init__(self) -> None:
     super().__init__()
     self._op = (
         flow.builtin_op("scalar_mul_by_tensor")
         .Input("x")
         .Input("scalar")
         .Output("y")
         .Build()
     )
示例#26
0
 def __init__(self, dtype: flow.dtype) -> None:
     super().__init__()
     self._op = (
         flow.builtin_op("cast")
         .Input("in")
         .Output("out")
         .Attr("dtype", dtype)
         .Build()
     )
示例#27
0
 def __init__(
     self,
     dim: Optional[int] = 1,
 ):
     super().__init__()
     self.dim = dim
     self._op = (
         flow.builtin_op("transpose").Input("input").Output("output").Attr(
             "perm", []).Build())
示例#28
0
    def __init__(
        self,
        size: Union[_size_any_t, flow.Size],
        value: Union[float, int],
        dtype: Optional[flow.dtype],
        device: Union[flow.device, str] = None,
        requires_grad: bool = False,
    ) -> None:
        super().__init__()
        assert size is not None, "shape must not be None!"
        assert isinstance(
            size, (int, tuple, flow.Size)
        ), "shape should be int or tuple int!"
        size = _single(size)
        if dtype is None:
            dtype = flow.float32

        if device is None:
            self.device = flow.device("cpu")
        self.requires_grad = requires_grad

        if dtype in [
            flow.int,
            flow.int64,
            flow.int32,
            flow.char,
            flow.int8,
            flow.long,
            flow.uint8,
        ]:
            floating_value = float(0)
            integer_value = int(value)
            is_floating_value = False
        elif dtype in [
            flow.float32,
            flow.float,
            flow.double,
            flow.float64,
            flow.float16,
            flow.half,
        ]:
            floating_value = float(value)
            integer_value = int(0)
            is_floating_value = True
        else:
            raise NotImplementedError("Unsupport data type")

        self._op = (
            flow.builtin_op("constant")
            .Output("out")
            .Attr("is_floating_value", is_floating_value)
            .Attr("floating_value", floating_value)
            .Attr("integer_value", integer_value)
            .Attr("dtype", dtype)
            .Attr("shape", size)
            .Build()
        )
示例#29
0
    def __init__(self,
                 axis: Optional[Union[int, Sequence[int]]] = None,
                 keepdims: bool = False) -> None:
        super().__init__()

        self.axis = axis
        self.keepdims = keepdims
        self._op = (flow.builtin_op("reduce_sum").Input("input_tensor").Output(
            "output_tensor").Attr("keepdims", keepdims).Build())
示例#30
0
 def __init__(self) -> None:
     super().__init__()
     self._op = (
         flow.builtin_op("broadcast_greater")
         .Input("x")
         .Input("y")
         .Output("z")
         .Build()
     )