Exemplo n.º 1
0
    def __init__(
        self,
        params: Union[Iterator[Parameter], List[Dict]],
        lr: float = 0.001,
        alpha: float = 0.99,
        eps: float = 1e-08,
        weight_decay: float = 0,
        momentum: float = 0.0,
        centered: bool = False,
    ):
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert alpha >= 0.0, f"Invalid alpha value: {alpha}"
        assert eps >= 0.0, f"Invalid epsilon value: {eps}"
        assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
        assert momentum == 0.0, "Not support momentum greater than zeros now!"
        options = dict()
        options["lr"] = lr
        options["alpha"] = alpha
        options["eps"] = eps
        options["weight_decay"] = weight_decay
        options["centered"] = centered
        super().__init__(params, options)

        for param_group in self.param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()

        self._centered_rmsprop = (flow.stateful_op("rmsprop_update").Input(
            "model").Input("model_diff").Input("mean_square").Input(
                "mean_gradient").Build())
        self._rmsprop = (flow.stateful_op("rmsprop_update").Input(
            "model").Input("model_diff").Input("mean_square").Build())
Exemplo n.º 2
0
    def __init__(
        self,
        parameters: Union[Iterator[Parameter], List[Dict]],
        lr: float = 0.001,
        momentum: float = 0.0,
        weight_decay: float = 0.0,
    ):
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert momentum >= 0.0, f"Invalid momentum: {momentum}"
        assert weight_decay >= 0.0, f"Invalid weight_decay: {weight_decay}"
        options = dict()
        options["lr"] = lr
        options["momentum"] = momentum
        options["weight_decay"] = weight_decay
        super().__init__(parameters, options)

        for param_group in self.param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()

        self._momentum_sgd = (flow.stateful_op("momentum_update").Input(
            "model").Input("model_diff").Input("momentum").Build())
        self._sgd = (flow.stateful_op("sgd_update").Input("model").Input(
            "model_diff").Build())
Exemplo n.º 3
0
    def __init__(
        self,
        params: Union[Iterator[Parameter], List[Dict]],
        lr: float = 0.001,
        betas: Tuple[float, float] = (0.9, 0.999),
        eps: float = 1e-08,
        weight_decay: float = 0,
        amsgrad: bool = False,
        do_bias_correction: bool = True,
    ):
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert eps >= 0.0, f"Invalid epsilon value: {eps}"
        assert (
            betas[0] >= 0.0 and betas[0] < 1.0
        ), f"Invalid beta parameter at index 0: {betas[0]}"
        assert (
            betas[1] >= 0.0 and betas[1] < 1.0
        ), f"Invalid beta parameter at index 1: {betas[1]}"
        assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
        options = dict()
        options["lr"] = lr
        options["eps"] = eps
        options["betas"] = betas
        options["weight_decay"] = weight_decay
        options["amsgrad"] = amsgrad
        options["bias_correction1"] = 1.0
        options["bias_correction2"] = 1.0
        options["do_bias_correction"] = do_bias_correction
        super().__init__(params, options)

        for param_group in self.param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()

        self._op_with_amsgrad = (
            flow.stateful_op("adam_update")
            .Input("model")
            .Input("model_diff")
            .Input("m")
            .Input("v")
            .Input("max_v")
            .Build()
        )

        self._op_without_amsgrad = (
            flow.stateful_op("adam_update")
            .Input("model")
            .Input("model_diff")
            .Input("m")
            .Input("v")
            .Build()
        )
Exemplo n.º 4
0
 def __init__(self, blob_name: str, name: Optional[str] = None):
     super().__init__()
     if name is not None:
         print("WARNING: name has been deprecated and has NO effect.\n")
     self._op = (flow.stateful_op("ofrecord_bytes_decoder").Input(
         "in").Output("out").Build())
     self.blob_name = blob_name
Exemplo n.º 5
0
    def __init__(
        self,
        annotation_file: str,
        image_dir: str,
        batch_size: int,
        shuffle: bool = True,
        random_seed: Optional[int] = None,
        group_by_aspect_ratio: bool = True,
        remove_images_without_annotations: bool = True,
        stride_partition: bool = True,
        device: Union[flow.device, str] = None,
        placement: flow.placement = None,
        sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
    ):
        super().__init__()

        _handle_shuffle_args(self, shuffle, random_seed)
        _handle_distributed_args(self, device, placement, sbp)

        self.annotation_file = annotation_file
        self.image_dir = image_dir
        self.batch_size = batch_size
        self.group_by_aspect_ratio = group_by_aspect_ratio
        self.remove_images_without_annotations = remove_images_without_annotations
        self.stride_partition = stride_partition

        self._op = (flow.stateful_op("COCOReader").Output("image").Output(
            "image_id").Output("image_size").Output("gt_bbox").Output(
                "gt_label").Output("gt_segm").Output("gt_segm_index").Build())
Exemplo n.º 6
0
 def __init__(
     self,
     blob_name: str,
     shape: Sequence[int],
     dtype: flow.dtype,
     dim1_varying_length: bool = False,
     truncate: bool = False,
     auto_zero_padding: bool = False,
     name: Optional[str] = None,
 ):
     super().__init__()
     if auto_zero_padding:
         print(
             "WARNING: auto_zero_padding has been deprecated, Please use truncate instead.\n"
         )
     if name is not None:
         print("WARNING: name has been deprecated and has NO effect.\n")
     self.blob_name = blob_name
     self.shape = shape
     self.dtype = dtype
     self.dim1_varying_length = dim1_varying_length
     self.truncate = truncate
     self.auto_zero_padding = auto_zero_padding
     self._op = (flow.stateful_op("ofrecord_raw_decoder").Input(
         "in").Output("out").Build())
Exemplo n.º 7
0
    def __init__(
        self,
        files: List[str],
        batch_size: int,
        shuffle: bool,
        shuffle_mode: str,
        random_seed: Optional[int] = None,
        shuffle_buffer_size: int = 1024,
        shuffle_after_epoch: bool = False,
        verify_example: bool = True,
        placement: flow.placement = None,
        sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
    ):

        super().__init__()

        _handle_shuffle_args(self, shuffle, random_seed)
        _handle_distributed_args(self, None, placement, sbp)

        if shuffle_mode not in ["batch", "instance"]:
            raise ValueError("shuffle_mode should be 'batch' or 'instance'")

        self.files = files
        self.batch_size = batch_size
        self.shuffle_mode = shuffle_mode
        self.shuffle_buffer_size = shuffle_buffer_size
        self.shuffle_after_epoch = shuffle_after_epoch
        self.verify_example = verify_example

        self.op = flow.stateful_op("OneRecReader").Output("out").Build()
Exemplo n.º 8
0
 def __init__(self, shape: Sequence[int], dtype: flow.dtype,
              alignment: int):
     super().__init__()
     self._op = (flow.stateful_op("image_batch_align").Input("in").Output(
         "out").Build())
     self.shape = shape
     self.dtype = dtype
     self.alignment = alignment
Exemplo n.º 9
0
 def __init__(self,
              dtype: flow.dtype = flow.uint8,
              color_space: str = "BGR"):
     super().__init__()
     self.color_space = color_space
     self.dtype = dtype
     self._op = flow.stateful_op("image_decode").Input("in").Output(
         "out").Build()
Exemplo n.º 10
0
    def __init__(
        self,
        color_space: str = "BGR",
        output_layout: str = "NCHW",
        crop_h: int = 0,
        crop_w: int = 0,
        crop_pos_y: float = 0.5,
        crop_pos_x: float = 0.5,
        mean: Sequence[float] = [0.0],
        std: Sequence[float] = [1.0],
        output_dtype: flow.dtype = flow.float,
    ):
        super().__init__()
        if output_layout != "NCHW":
            print(
                "WARNING: output_layout has been deprecated. Please use Environment Variable ONEFLOW_ENABLE_NHWC, and make it equals 1."
            )
        if os.getenv("ONEFLOW_ENABLE_NHWC") == "1":
            output_layout = "NHWC"
        else:
            output_layout = "NCHW"

        self.color_space = color_space
        self.output_layout = output_layout
        self.mean = mean
        self.std = std
        self.crop_h = crop_h
        self.crop_w = crop_w
        self.crop_pos_y = crop_pos_y
        self.crop_pos_x = crop_pos_x
        self.output_dtype = output_dtype

        self._op_uint8_with_mirror = (
            flow.stateful_op("crop_mirror_normalize_from_uint8").Input(
                "in").Input("mirror").Output("out").Build())
        self._op_uint8_no_mirror = (
            flow.stateful_op("crop_mirror_normalize_from_uint8").Input(
                "in").Output("out").Build())
        self._op_buffer_with_mirror = (
            flow.stateful_op("crop_mirror_normalize_from_tensorbuffer").Input(
                "in").Input("mirror").Output("out").Build())

        self._op_buffer_no_mirror = (
            flow.stateful_op("crop_mirror_normalize_from_tensorbuffer").Input(
                "in").Output("out").Build())
Exemplo n.º 11
0
 def __init__(self,
              out_shapes,
              out_dtypes,
              out_num: int = 1,
              dynamic_out: bool = False):
     super().__init__()
     self._op = (flow.stateful_op(
         "tensor_buffer_to_list_of_tensors_v2").Input("in").Output(
             "out", out_num).Build())
     self.out_shapes = out_shapes
     self.out_dtypes = out_dtypes
     self.dynamic_out = dynamic_out
Exemplo n.º 12
0
 def __init__(self, num_sample):
     super().__init__()
     self.num_sample = num_sample
     self._op = (
         flow.stateful_op("distributed_partial_fc_sample")
         .Input("weight")
         .Input("label")
         .Output("mapped_label")
         .Output("sampled_label")
         .Output("sampled_weight")
         .Build()
     )
Exemplo n.º 13
0
    def __init__(
        self,
        ofrecord_dir: str,
        batch_size: int = 1,
        data_part_num: int = 1,
        part_name_prefix: str = "part-",
        part_name_suffix_length: int = -1,
        random_shuffle: bool = False,
        shuffle_buffer_size: int = 1024,
        shuffle_after_epoch: bool = False,
        random_seed: int = -1,
        device: Union[flow.device, str] = None,
        placement: flow.placement = None,
        sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
        name: Optional[str] = None,
    ):
        super().__init__()

        if name is not None:
            print("WARNING: name has been deprecated and has NO effect.\n")
        self.ofrecord_dir = ofrecord_dir
        self.batch_size = batch_size
        self.data_part_num = data_part_num
        self.part_name_prefix = part_name_prefix
        self.part_name_suffix_length = part_name_suffix_length
        self.random_shuffle = random_shuffle
        self.shuffle_buffer_size = shuffle_buffer_size
        self.shuffle_after_epoch = shuffle_after_epoch

        self.placement = placement
        if placement is None:
            self.device = device or flow.device("cpu")
        else:
            assert device is None

        if placement is not None:
            assert isinstance(sbp,
                              (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
            if isinstance(sbp, flow.sbp.sbp):
                sbp = (sbp, )
            else:
                for elem in sbp:
                    assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
            assert len(sbp) == len(placement.ranks.shape)
        else:
            assert sbp is None, "sbp: %s" % sbp

        self.sbp = sbp

        (self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)
        self._op = flow.stateful_op("OFRecordReader").Output("out").Build()
Exemplo n.º 14
0
 def __init__(
     self,
     blob_name: str,
     color_space: str = "BGR",
     num_attempts: int = 10,
     random_seed: Optional[int] = None,
     random_area: Sequence[float] = [0.08, 1.0],
     random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
 ):
     super().__init__()
     self.blob_name = blob_name
     self.color_space = color_space
     self.num_attempts = num_attempts
     self.random_area = random_area
     self.random_aspect_ratio = random_aspect_ratio
     (self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)
     self._op = (flow.stateful_op("ofrecord_image_decoder_random_crop").
                 Input("in").Output("out").Build())
Exemplo n.º 15
0
    def __init__(
        self,
        data_file_prefix: str,
        seq_length: int,
        num_samples: int,
        batch_size: int,
        dtype: flow.dtype = flow.int64,
        shuffle: bool = True,
        random_seed: Optional[int] = None,
        split_sizes: Optional[Sequence[str]] = None,
        split_index: Optional[int] = None,
        device: Union[flow.device, str] = None,
        placement: flow.placement = None,
        sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
    ):
        super().__init__()

        _handle_shuffle_args(self, shuffle, random_seed)
        _handle_distributed_args(self, device, placement, sbp)

        self.data_file_prefix = data_file_prefix
        self.batch_size = batch_size
        self.num_samples = num_samples
        self.seq_length = seq_length
        self.dtype = dtype

        if split_index is None:
            split_index = 0
        self.split_index = split_index

        if split_sizes is None:
            split_sizes = (1, )
        self.split_sizes = split_sizes

        if split_index >= len(split_sizes):
            raise ValueError(
                "split index {} is out of range, split_sizes {}".formart(
                    split_index, split_sizes))

        self.op_ = (flow.stateful_op("megatron_gpt_mmap_data_loader").Output(
            "out").Build())
Exemplo n.º 16
0
    def __init__(
        self,
        batch_size: int = 1,
        random_seed: Optional[int] = None,
        probability: float = 0.5,
        device: Union[flow.device, str] = None,
        placement: flow.placement = None,
        sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
    ):
        super().__init__()
        self.batch_size = batch_size
        self.probability = probability

        self.placement = placement
        if placement is None:
            self.device = device or flow.device("cpu")
            assert self.device == "cpu" or self.device == flow.device(
                "cpu"), "coin flip only supports cpu currently."
        else:
            assert device is None

        if placement is not None:
            assert isinstance(sbp,
                              (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
            if isinstance(sbp, flow.sbp.sbp):
                sbp = (sbp, )
            else:
                for elem in sbp:
                    assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
            assert len(sbp) == len(placement.ranks.shape)
            assert (self.placement.type == "cpu"
                    ), "coin flip only supports cpu currently."
        else:
            assert sbp is None, "sbp: %s" % sbp

        self.sbp = sbp

        (self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)

        self._op = flow.stateful_op("coin_flip").Output("out").Build()
Exemplo n.º 17
0
    def __init__(
        self,
        params: Union[Iterator[Parameter], List[Dict]],
        lr: float = 0.001,
        lr_decay: float = 0.0,
        weight_decay: float = 0,
        initial_accumulator_value: float = 0.0,
        eps: float = 1e-10,
    ):
        assert lr >= 0.0, f"Invalid learning rate: {lr}"
        assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
        assert (
            initial_accumulator_value >= 0.0
        ), f"Invalid initial_accumulator_value value: {initial_accumulator_value}"
        assert eps >= 0.0, f"Invalid epsilon value: {eps}"

        options = dict()
        options["lr"] = lr
        options["initial_accumulator_value"] = initial_accumulator_value
        options["lr_decay"] = lr_decay
        options["weight_decay"] = weight_decay
        options["eps"] = eps
        super().__init__(params, options)

        for param_group in self.param_groups:
            for param in param_group.parameters:
                assert param.is_leaf, "parameters must be leaf tensor"
                self._state[param] = dict()
                self._state[param]["sum"] = flow.zeros_like(param).fill_(
                    initial_accumulator_value
                )

        self._op = (
            flow.stateful_op("adagrad_update")
            .Input("model")
            .Input("model_diff")
            .Input("sum")
            .Build()
        )
Exemplo n.º 18
0
    def __init__(
        self,
        target_size: Union[int, Sequence[int]] = None,
        min_size: Optional[int] = None,
        max_size: Optional[int] = None,
        keep_aspect_ratio: bool = False,
        resize_side: str = "shorter",
        channels: int = 3,
        dtype: Optional[flow.dtype] = None,
        interpolation_type: str = "auto",
        name: Optional[str] = None,
        color_space: Optional[str] = None,
        interp_type: Optional[str] = None,
        resize_shorter: int = 0,
        resize_x: int = 0,
        resize_y: int = 0,
    ):
        super().__init__()
        if name is not None:
            print("WARNING: name has been deprecated and has NO effect.\n")
        deprecated_param_used = False
        if color_space is not None:
            print(
                "WARNING: color_space has been deprecated. Please use channels instead."
            )
            print(traceback.format_stack()[-2])
            deprecated_param_used = True
            assert isinstance(color_space, str)
            if color_space.upper() == "RGB" or color_space.upper() == "BGR":
                channels = 3
            elif color_space.upper() == "GRAY":
                channels = 1
            else:
                raise ValueError("invalid color_space")
        self.channels = channels
        if interp_type is not None:
            print(
                "WARNING: interp_type has been deprecated. Please use interpolation_type instead."
            )
            print(traceback.format_stack()[-2])
            deprecated_param_used = True
            assert isinstance(interp_type, str)
            if interp_type == "Linear":
                interpolation_type = "bilinear"
            elif interp_type == "NN":
                interpolation_type = "nearest_neighbor"
            elif interp_type == "Cubic":
                interpolation_type = "bicubic"
            else:
                raise ValueError("invalid interp_type")
        self.interpolation_type = interpolation_type

        if resize_x > 0 and resize_y > 0:
            print(
                "WARNING: resize_x and resize_y has been deprecated. Please use target_size instead."
            )
            print(traceback.format_stack()[-2])
            deprecated_param_used = True
            target_size = (resize_x, resize_y)
            keep_aspect_ratio = False
        if resize_shorter > 0:
            print(
                "WARNING: resize_shorter has been deprecated. Please use target_size instead."
            )
            print(traceback.format_stack()[-2])
            deprecated_param_used = True
            target_size = resize_shorter
            keep_aspect_ratio = True
            resize_side = "shorter"
        self.keep_aspect_ratio = keep_aspect_ratio
        if self.keep_aspect_ratio:
            if not isinstance(target_size, int):
                raise ValueError(
                    "target_size must be an int when keep_aspect_ratio is True"
                )
            if min_size is None:
                min_size = 0
            if max_size is None:
                max_size = 0
            if resize_side == "shorter":
                resize_longer = False
            elif resize_side == "longer":
                resize_longer = True
            else:
                raise ValueError('resize_side must be "shorter" or "longer"')
            self.target_size = target_size
            self.min_size = min_size
            self.max_size = max_size
            self.resize_longer = resize_longer
            self._op = (
                flow.stateful_op("image_resize_keep_aspect_ratio").Input(
                    "in").Output("out").Output("size").Output("scale").Build())
        else:
            if (not isinstance(target_size, (list, tuple))
                    or len(target_size) != 2 or (not all(
                        (isinstance(size, int) for size in target_size)))):
                raise ValueError(
                    "target_size must be a form like (width, height) when keep_aspect_ratio is False"
                )
            if dtype is None:
                dtype = flow.uint8
            self.dtype = dtype
            (self.target_w, self.target_h) = target_size
            self._op = (flow.stateful_op("image_resize_to_fixed").Input(
                "in").Output("out").Output("scale").Build())
Exemplo n.º 19
0
 def __init__(self, parallel_conf_str: str):
     super().__init__()
     self._op = (flow.stateful_op("eager_nccl_all_reduce").Input(
         "in").Output("out").Build())
     self.parallel_conf = parallel_conf_str
Exemplo n.º 20
0
 def __init__(self, blob_name: str, color_space: str = "BGR"):
     super().__init__()
     self._op = (flow.stateful_op("ofrecord_image_decoder").Input(
         "in").Output("out").Build())
     self.blob_name = blob_name
     self.color_space = color_space
Exemplo n.º 21
0
 def __init__(self, std: Sequence[float], mean: Sequence[float]):
     super().__init__()
     self.std = std
     self.mean = mean
     self._op = flow.stateful_op("image_normalize").Input("in").Output(
         "out").Build()