def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t = 1, padding: _size_1_t = 0, output_padding: _size_1_t = 0, groups: int = 1, bias: bool = True, dilation: _size_1_t = 1, padding_mode: str = "zeros", ) -> None: super().__init__() assert ( padding_mode == "zeros" ), "Only `zeros` padding mode is supported for ConvTranspose1d" self.kernel_size = _single(kernel_size) self.stride = _single(stride) self.padding = _single(padding) self.dilation = _single(dilation) self.output_padding = _single(output_padding) self.groups = groups assert in_channels % groups == 0 assert out_channels % groups == 0 self.weight = flow.nn.Parameter( flow.Tensor(in_channels, out_channels // groups, *self.kernel_size) ) self.filters = out_channels self.bias = None self._bias_add_op = None if bias: self.bias = flow.nn.Parameter(flow.Tensor(out_channels)) self.reset_parameters()
def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t = 1, padding: _size_1_t = 0, dilation: _size_1_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = "zeros", ): super().__init__() assert padding_mode == "zeros" self.padding_mode = padding_mode self.kernel_size = _single(kernel_size) self.stride = _single(stride) self.padding = _single(padding) self.dilation = _single(dilation) self.groups = groups self.channel_pos = "channels_first" assert in_channels % groups == 0 assert out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.weight = flow.nn.Parameter( flow.Tensor(out_channels, in_channels // groups, *self.kernel_size) ) self.out_channel_groups = out_channels // groups self.bias = None if bias: self.bias = flow.nn.Parameter(flow.Tensor(out_channels)) self.reset_parameters()
def new_empty_op(x, size, dtype=None, device=None, placement=None, sbp=None, requires_grad=False): new_size = _single(_handle_size_arg(size)) new_dtype = dtype new_device = device new_placement = placement new_sbp = sbp if dtype is None: new_dtype = x.dtype if device is None: new_device = x.device if x.is_local else None if placement is None: new_placement = x.placement if x.is_global else None if sbp is None: new_sbp = x.sbp if x.is_global else None return empty_op( new_size, dtype=new_dtype, device=new_device, placement=new_placement, sbp=new_sbp, requires_grad=requires_grad, )
def __init__( self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0, ceil_mode: bool = False, count_include_pad: bool = True, ): super().__init__() self.kernel_size = _single(kernel_size) data_format = "NCHW" # only support "NCHW" for now ! self.channel_pos = ("channels_first" if data_format == "NCHW" else "channels_last") self.stride = _single(stride) if ( stride is not None) else _single(kernel_size) self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad self.padding = _single(padding)
def __init__( self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0, dilation: _size_1_t = 1, return_indices: bool = False, ceil_mode: bool = False, ): super().__init__() self.kernel_size = _single(kernel_size) self.stride = _single( stride) if stride is not None else self.kernel_size data_format = "NCL" # only support "NCL" for now ! self.channel_pos = "channels_first" if data_format == "NCL" else "channels_last" self.dilation = _single(dilation) self.padding = _single(padding) self.return_indices = return_indices self.ceil_mode = ceil_mode
def _rand_op_common_process( size, device=None, generator=None, placement=None, sbp=None ): if isinstance(device, str): device = flow.device(device) size = _single(size) processed_sbp = sbp if placement is not None: if isinstance(processed_sbp, flow.sbp.sbp): processed_sbp = (processed_sbp,) return size, device, generator, placement, processed_sbp
def __init__( self, size: Union[_size_any_t, flow.Size], value: Union[float, int], dtype: Optional[flow.dtype], device: Union[flow.device, str] = None, placement: flow.placement = None, sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None, requires_grad: bool = False, ) -> None: super().__init__() assert size is not None, "shape must not be None!" assert isinstance( size, (int, tuple, list, flow.Size)), "shape should be int or tuple int!" self.device = device if isinstance(self.device, str): self.device = flow.device(self.device) self.requires_grad = requires_grad size = _single(size) if dtype is None: dtype = flow.float32 if placement is None: if device is None: self.device = flow.device("cpu") else: assert device is None self.placement = placement self.sbp = sbp if placement is not None: assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp if isinstance(self.sbp, flow.sbp.sbp): self.sbp = (self.sbp, ) else: for elem in sbp: assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp assert len(self.sbp) == len(placement.hierarchy) else: assert sbp is None, "sbp: %s" % sbp self.shape = size self.value = value self.dtype = dtype
def empty_op( *size, dtype: Optional[flow.dtype] = None, device: Union[flow.device, str] = None, placement: flow.placement = None, sbp: Union[flow._oneflow_internal.sbp.sbp, List[flow._oneflow_internal.sbp.sbp]] = None, requires_grad: bool = False, pin_memory: bool = False, ): """ Returns a tensor filled with uninitialized data. The shape of the tensor is defined by the variable argument ``size``. Args: size (int... or oneflow.Size): Defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple or oneflow.Size. dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.float32``. device (oneflow.device, optional): The desired device of returned local tensor. If None, uses the current device. placement (flow.placement, optional): The desired device of returned global tensor. If None, will construct local tensor. sbp (flow.sbp or List[flow.sbp], optional): The desired sbp of returned global tensor. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False. pin_memory (bool, optional) – If set, returned tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False. For example: .. code-block:: python >>> import oneflow as flow >>> y = flow.empty(4, 5) # construct local empty tensor >>> y.shape oneflow.Size([4, 5]) >>> y.is_global False >>> placement = flow.placement("cpu", ranks=[0]) >>> y = flow.empty(4, 5, placement=placement, sbp=flow.sbp.broadcast) # construct consistent empty tensor >>> y.is_global True """ assert size is not None, "shape must not be None" shape = _single(_handle_size_arg(size)) if dtype is None: dtype = flow.float32 if placement is None: if device is None: device = flow.device("cpu") else: assert ( device is None ), "argument 'device' must be None when argument 'placement' exist" if placement is not None: assert ( sbp is not None ), "argument 'sbp' must not be None when argument 'placement' exist" assert isinstance( sbp, (flow.sbp.sbp, tuple, list )), f"argument 'sbp' must be flow.sbp.sbp, not %s" % (type(sbp)) if isinstance(sbp, flow.sbp.sbp): sbp = (sbp, ) else: for elem in sbp: assert isinstance(elem, flow.sbp.sbp), ( "Element in argument 'sbp' must be flow.sbp.sbp, not %s" % (type(elem))) assert len(sbp) == len(placement.ranks.shape) else: assert sbp is None, "argument 'sbp' must be None" if placement is not None: tensor = flow._C.global_empty(shape, dtype=dtype, placement=placement, sbp=sbp) else: tensor = flow._C.empty(shape, dtype=dtype, device=device, pin_memory=pin_memory) tensor.requires_grad_(requires_grad) return tensor
def new_ones_op(x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False): if isinstance(device, str): device = flow.device(device) if size != None: size = _single(size) new_size = size new_dtype = dtype new_device = device new_placement = placement new_sbp = sbp new_requires_grad = requires_grad if size is None: new_size = x.shape if dtype is None: new_dtype = x.dtype if device is None: new_device = x.device if x.is_local else None if placement is None: new_placement = x.placement if x.is_consistent else None if sbp is None: new_sbp = x.sbp if x.is_consistent else None if new_placement is not None: assert device is None assert new_sbp is not None assert isinstance( new_size, (int, tuple, flow.Size)), f"size parameter not correct, please check!" assert isinstance( new_dtype, flow.dtype), f"dtype parameter not correct, please check!" if new_placement is not None: assert isinstance( new_placement, flow.placement), f"device parameter not correct, please check!" assert isinstance( new_sbp, flow.sbp.sbp), f"device parameter not correct, please check!" else: assert isinstance( new_device, (str, flow.device)), f"device parameter not correct, please check!" assert isinstance( new_requires_grad, bool), f"requires_grad parameter not correct, please check!" if placement is not None: res = flow._C.consistent_constant(new_size, 1.0, dtype=new_dtype, placement=placement, sbp=sbp) else: res = flow._C.constant(new_size, 1.0, dtype=new_dtype, device=new_device) res.requires_grad = new_requires_grad return res
def __init__(self, output_size: _size_1_t) -> None: super().__init__() assert output_size is not None, "'output_size' cannot be NoneType" self.output_size = _single(output_size)
def empty_op( *size, dtype: Optional[flow.dtype] = None, device: Union[flow.device, str] = None, placement: flow.placement = None, sbp: Union[flow._oneflow_internal.sbp.sbp, List[flow._oneflow_internal.sbp.sbp]] = None, requires_grad: bool = False, ): """ Returns a tensor filled with uninitialized data. The shape of the tensor is defined by the variable argument ``size``. Args: size (int... or oneflow.Size): Defining the shape of the output tensor. Can be a variable number of arguments or a collection like a list or tuple or oneflow.Size. dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.float32``. device (torch.device, optional): The desired device of returned local tensor. If None, uses the current device. placement (flow.placement, optional): The desired device of returned consistent tensor. If None, will construct local tensor. sbp (flow.sbp or List[flow.sbp], optional): The desired sbp of returned consistent tensor. requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False. For example: .. code-block:: python >>> import oneflow as flow >>> y = flow.empty(4, 5) # construct local empty tensor >>> y.shape oneflow.Size([4, 5]) >>> y.is_consistent False >>> placement = flow.placement("cpu", {0: [0]}) >>> y = flow.empty(4, 5, placement=placement, sbp=flow.sbp.broadcast) # construct consistent empty tensor >>> y.is_consistent True """ assert size is not None, "shape must not be None" shape = _single(_handle_size_arg(size)) if dtype is None: dtype = flow.float32 if placement is None: if device is None: device = flow.device("cpu") else: assert device is None if placement is not None: assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp if isinstance(sbp, flow.sbp.sbp): sbp = (sbp, ) else: for elem in sbp: assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp assert len(sbp) == len(placement.hierarchy) else: assert sbp is None, "sbp: %s" % sbp if placement is not None: tensor = flow._C.consistent_empty(shape, dtype=dtype, placement=placement, sbp=sbp) else: tensor = flow._C.empty(shape, dtype=dtype, device=device) tensor.requires_grad_(requires_grad) return tensor
def expand_op(input, *sizes): sizes = _handle_size_arg(sizes) sizes = _single(sizes) return flow._C.expand(input, sizes)