Example #1
0
def create_batches_rnd(batch_size, data_folder, wav_lst, N_snt, wlen, lab_dict,
                       fact_amp):

    # Initialization of the minibatch (batch_size,[0=>x_t,1=>x_t+N,1=>random_samp])
    sig_batch = np.zeros([batch_size, wlen])
    lab_batch = np.zeros(batch_size)

    snt_id_arr = np.random.randint(N_snt, size=batch_size)

    rand_amp_arr = np.random.uniform(1.0 - fact_amp, 1 + fact_amp, batch_size)

    for i in range(batch_size):

        # select a random sentence from the list
        [signal, fs] = sf.read(data_folder + wav_lst[snt_id_arr[i]])

        # accesing to a random chunk
        snt_len = signal.shape[0]
        snt_beg = np.random.randint(snt_len - wlen - 1)
        snt_end = snt_beg + wlen

        channels = len(signal.shape)
        if channels == 2:
            print("WARNING: stereo to mono: " + data_folder +
                  wav_lst[snt_id_arr[i]])
            signal = signal[:, 0]

        sig_batch[i, :] = signal[snt_beg:snt_end] * rand_amp_arr[i]
        lab_batch[i] = lab_dict[wav_lst[snt_id_arr[i]].lower()]

    inp = flow.Tensor(sig_batch).to("cuda")
    lab = flow.Tensor(lab_batch).to("cuda")

    return inp, lab
Example #2
0
    def test_module_cpu_cuda(test_case):
        class CustomModule(flow.nn.Module):
            def __init__(self, param1, param2):
                super().__init__()
                self.param1 = param1
                self.param2 = param2

        tensor0 = flow.nn.Parameter(
            flow.Tensor(2, 3, device=flow.device("cpu")))
        tensor1 = flow.nn.Parameter(
            flow.Tensor(2, 3, device=flow.device("cpu")))
        sub_module = CustomModule(tensor0, tensor1)
        m = CustomModule(tensor1, sub_module)
        m.cuda()
        state_dict = m.state_dict()
        test_case.assertEqual(state_dict["param2.param1"].device,
                              flow.device("cuda:0"))
        test_case.assertEqual(state_dict["param2.param2"].device,
                              flow.device("cuda:0"))

        m.cpu()
        state_dict = m.state_dict()
        test_case.assertEqual(state_dict["param2.param1"].device,
                              flow.device("cpu"))
        test_case.assertEqual(state_dict["param2.param2"].device,
                              flow.device("cpu"))
Example #3
0
def _test_conv1d_bias_true(test_case, device):
    np_arr = np.array(
        [
            [
                [0.90499806, -1.11683071, 0.71605605, -0.56754625, 0.61944169],
                [-0.31317389, -0.26271924, 0.95579433, 0.52468461, 1.48926127],
            ]
        ]
    )
    input = flow.tensor(
        np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
    )
    weight = np.array(
        [
            [
                [0.01997352, 0.23834395, 0.00526353],
                [-0.04861857, -0.22751901, -0.06725175],
            ],
            [
                [0.13344523, -0.35202524, 0.15168799],
                [-0.25714493, -0.17459838, 0.28768948],
            ],
            [
                [0.10671382, -0.28205597, -0.39752254],
                [0.36393702, 0.07843742, -0.33898622],
            ],
            [
                [0.20485674, 0.04222689, -0.1898618],
                [0.22519711, -0.15910202, -0.35057363],
            ],
        ]
    )
    bias = np.array([0.01012857, 0.38912651, -0.01600273, -0.3883304])
    m = nn.Conv1d(2, 4, 3, stride=1, bias=True)
    m.weight = flow.nn.Parameter(flow.Tensor(weight))
    m.bias = flow.nn.Parameter(flow.Tensor(bias))
    m = m.to(device)
    np_out = np.array(
        [
            [
                [-0.22349545, -0.08447243, -0.37358052],
                [1.4130373, -0.04644597, 0.86949122],
                [-0.34765026, -0.31004351, -0.14158708],
                [-0.74985039, -0.87430149, -0.77354753],
            ]
        ]
    )
    output = m(input)
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
    output = output.sum()
    output.backward()
    np_grad = np.array(
        [
            [
                [0.4649893, 0.11147892, -0.3189539, -0.78394318, -0.43043283],
                [0.28337064, -0.19941133, -0.66853344, -0.95190406, -0.46912211],
            ]
        ]
    )
    test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
Example #4
0
 def __init__(
     self,
     in_channels: int,
     out_channels: int,
     kernel_size: _size_1_t,
     stride: _size_1_t = 1,
     padding: Union[str, _size_1_t] = 0,
     dilation: _size_1_t = 1,
     groups: int = 1,
     bias: bool = True,
     padding_mode: str = "zeros",
 ):
     super().__init__()
     assert padding_mode == "zeros"
     self.padding_mode = padding_mode
     self.kernel_size = _single(kernel_size)
     self.stride = _single(stride)
     self.dilation = _single(dilation)
     self.padding = (get_padding(padding, self.kernel_size,
                                 self.dilation, self.stride) if isinstance(
                                     padding, str) else _single(padding))
     self.groups = groups
     self.channel_pos = "channels_first"
     assert in_channels % groups == 0
     assert out_channels % groups == 0
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.weight = flow.nn.Parameter(
         flow.Tensor(out_channels, in_channels // groups,
                     *self.kernel_size))
     self.out_channel_groups = out_channels // groups
     self.bias = None
     if bias:
         self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
     self.reset_parameters()
Example #5
0
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: _size_2_t,
        stride: _size_2_t = 1,
        padding: _size_2_t = 0,
        output_padding: _size_2_t = 0,
        groups: int = 1,
        bias: bool = True,
        dilation: int = 1,
        padding_mode: str = "zeros",
    ) -> None:
        super().__init__()
        assert padding_mode == "zeros"
        self.kernel_size = _pair(kernel_size)
        self.stride = _pair(stride)
        self.padding = _pair(padding)
        self.output_padding = _pair(output_padding)
        self.dilation = _pair(dilation)
        self.groups = groups
        assert in_channels % groups == 0
        assert out_channels % groups == 0
        self.weight = flow.nn.Parameter(
            flow.Tensor(in_channels, out_channels // groups,
                        *self.kernel_size))
        self.in_channel_groups = in_channels // groups
        self.filters = out_channels
        self.bias = None
        self._bias_add_op = None
        if bias:
            self.bias = flow.nn.Parameter(flow.Tensor(out_channels))

        self.reset_parameters()
Example #6
0
    def test_concat_with_axis_one(test_case):
        input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
        input2 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)

        of_out = flow.cat([input1, input2], dim=1)
        np_out = np.concatenate((input1.numpy(), input2.numpy()), axis=1)
        test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_fused_tril_softmax_mask_scale(test_case, seq_length, channel, p,
                                        diagonal, tril_scale_value):
    x = np.random.randn(4, seq_length, channel)
    # fused version only support in GPU
    fused_x_tensor = flow.Tensor(x).to("cuda")
    fused_x_tensor.requires_grad = True
    fused_out = flow._C.fused_scale_tril_softmax_mask_scale(
        fused_x_tensor,
        p=p,
        diagonal=diagonal,
        tril_scale_value=tril_scale_value)[0]  # The second output is softmax_y

    origin_x_tensor = flow.Tensor(x).to("cuda")
    origin_x_tensor.requires_grad = True
    origin_out = flow.tril(origin_x_tensor, diagonal)
    origin_out = origin_out * tril_scale_value
    origin_out = flow.softmax(origin_out, dim=-1)
    origin_out = flow._C.dropout(origin_out, p=p)

    total_out = fused_out.sum() + origin_out.sum()
    total_out.backward()

    test_case.assertTrue(
        np.allclose(fused_out.numpy(),
                    origin_out.numpy(),
                    atol=1e-4,
                    rtol=1e-4))
    test_case.assertTrue(
        np.allclose(
            fused_x_tensor.grad.numpy(),
            origin_x_tensor.grad.numpy(),
            atol=1e-4,
            rtol=1e-4,
        ))
Example #8
0
    def test_tensor_register_post_grad_accumulation_hook(test_case):
        shape = (2, 3)
        x = flow.Tensor(*shape)
        x.requires_grad = True
        x._register_post_grad_accumulation_hook(lambda grad: grad * 2 + 1)
        y = x.sum() + (x * 2).sum()
        y.backward()
        test_case.assertTrue(
            np.allclose(x.grad.numpy(),
                        np.ones(shape) * 7,
                        atol=1e-4,
                        rtol=1e-4))

        x = flow.Tensor(*shape)
        x.requires_grad = True

        def inplace_add_and_return_none(x):
            x.add_(1)
            return None

        x._register_post_grad_accumulation_hook(inplace_add_and_return_none)
        y = x.sum() + (x * 2).sum()
        y.backward()
        test_case.assertTrue(
            np.allclose(x.grad.numpy(),
                        np.ones(shape) * 4,
                        atol=1e-4,
                        rtol=1e-4))
Example #9
0
    def test_tensor_register_hook(test_case):
        shape = (2, 3)
        x = flow.Tensor(*shape)
        x.requires_grad = True
        x.register_hook(lambda grad: grad * 2 + 1)
        y = x.sum() + (x * 2).sum()
        y.backward()
        test_case.assertTrue(
            np.allclose(x.grad.numpy(),
                        np.ones(shape) * 7,
                        atol=1e-4,
                        rtol=1e-4))
        x = flow.Tensor(*shape)
        x.requires_grad = True
        new_grad = flow.Tensor([[1, 2, 3], [4, 5, 6]])
        x.register_hook(lambda _: new_grad)
        y = x.sum() + (x * 2).sum()
        y.backward()
        test_case.assertTrue(np.allclose(x.grad.numpy(), new_grad.numpy()))
        grad_nonlocal = None

        def assign_nonlocal_variable_and_return_none(grad):
            nonlocal grad_nonlocal
            grad_nonlocal = grad

        x = flow.Tensor(*shape)
        x.requires_grad = True
        new_grad = flow.tensor([[1, 2, 3], [4, 5, 6]], dtype=flow.float32)
        x.register_hook(assign_nonlocal_variable_and_return_none)
        y = x.sum() + (x * 2).sum()
        y.backward()
        test_case.assertTrue(
            np.allclose(grad_nonlocal.numpy(),
                        np.ones(shape) * 3))
Example #10
0
    def test_module_setattr(test_case):
        class CustomModule(flow.nn.Module):
            def __init__(self, param1, param2):
                super().__init__()
                self.param1 = param1
                self.param2 = param2

        param0 = flow.nn.Parameter(flow.Tensor(2, 3))
        param1 = flow.nn.Parameter(flow.Tensor(2, 3))
        param2 = CustomModule(param0, param1)
        m = CustomModule(param1, param2)

        # m.parameters() contains param0 + param1 in submodule param2
        # and param1 in m
        params = list(m.parameters())
        test_case.assertEqual(len(params), 2)
        test_case.assertEqual(params[0], param1)
        test_case.assertEqual(params[1], param0)

        children = list(m.children())
        test_case.assertEqual(len(children), 1)
        child = children[0]
        test_case.assertEqual(child, param2)

        child_params = list(child.parameters())
        test_case.assertEqual(len(child_params), 2)
        test_case.assertEqual(child_params[0], param0)
        test_case.assertEqual(child_params[1], param1)
Example #11
0
 def test_tensor_autograd_related_methods(test_case):
     shape = (2, 3, 4, 5)
     x = flow.Tensor(*shape)
     y = flow.Tensor(*shape)
     y.requires_grad = True
     x.fill_(1.0)
     y.fill_(2.0)
     z = x + y
     test_case.assertFalse(x.requires_grad)
     test_case.assertTrue(x.is_leaf)
     test_case.assertTrue(y.requires_grad)
     test_case.assertTrue(y.is_leaf)
     test_case.assertTrue(z.requires_grad)
     test_case.assertFalse(z.is_leaf)
     with flow.no_grad():
         m = x + y
     test_case.assertTrue(m.is_leaf)
     test_case.assertFalse(m.requires_grad)
     m.requires_grad = True
     v = flow.Tensor(*shape)
     v.requires_grad = True
     z.retain_grad()
     w = v + z
     grad = flow.Tensor(*shape)
     grad.fill_(1.0)
     w.backward(gradient=grad, retain_graph=True)
     test_case.assertTrue(
         np.allclose(v.grad.numpy(), np.ones(shape), atol=1e-4, rtol=1e-4))
     test_case.assertTrue(
         np.allclose(y.grad.numpy(), np.ones(shape), atol=1e-4, rtol=1e-4))
     test_case.assertTrue(
         np.allclose(z.grad.numpy(), np.ones(shape), atol=1e-4, rtol=1e-4))
     test_case.assertIsNone(x.grad)
     w.backward(gradient=grad, retain_graph=True)
Example #12
0
 def __init__(self, features, eps=1e-6):
     super(LayerNorm, self).__init__()
     self.eps = eps
     self.weight = nn.Parameter(
         flow.Tensor(flow.ones(features, dtype=flow.float32)))
     self.bias = nn.Parameter(
         flow.Tensor(flow.zeros(features, dtype=flow.float32)))
Example #13
0
def _test_roi_align(test_case, device):
    input = flow.Tensor(np.random.randn(2, 3, 64, 64),
                        dtype=flow.float32,
                        device=flow.device(device))

    random_img_idx = np.random.randint(low=0, high=2, size=(200, 1))
    random_box_idx = np.random.uniform(low=0, high=64 * 64,
                                       size=(200, 2)).astype(np.float32)

    def get_h_w(idx1, idx2):
        if idx1 > idx2:
            idx1, idx2 = idx2, idx1
        h1 = idx1 // 64
        w1 = idx1 % 64
        h2 = idx2 // 64
        w2 = idx2 % 64
        return [x / 2 for x in [h1, w1, h2, w2]]

    zipped = zip(random_box_idx[:, 0], random_box_idx[:, 1])
    concated = [get_h_w(idx1, idx2) for (idx1, idx2) in zipped]
    concated = np.array(concated)
    rois = flow.Tensor(
        np.hstack((random_img_idx, concated)),
        dtype=flow.float32,
        device=flow.device(device),
    )

    roi_align_module = RoIAlign((14, 14), 2.0, 2, True)
    of_out = roi_align_module(input, rois)
    np_out = roi_align_np(input.numpy(), rois.numpy(), 14, 14, 2.0, 2, True)
    test_case.assertTrue(
        np.allclose(of_out.numpy(), np_out, rtol=1e-4, atol=1e-4))
Example #14
0
    def __init__(
            self,
            in_channels: int,
            out_channels: int,
            kernel_size: _size_2_t,
            stride: _size_2_t = 1,
            padding: _size_2_t = 0,
            dilation: _size_2_t = 1,
            groups: int = 1,
            bias: bool = True,
            padding_mode: str = "zeros",  # TODO: refine this type
    ):
        super().__init__()

        assert padding_mode == "zeros"
        kernel_size = _pair(kernel_size)
        stride = _pair(stride)
        padding = _pair(padding)
        dilation = _pair(dilation)
        self.groups = groups
        self.weight = flow.nn.Parameter(
            flow.Tensor(out_channels, in_channels // groups, *kernel_size))
        self.bias = None
        self._bias_add_op = None
        if bias:
            self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
            self._bias_add_op = (flow.builtin_op("bias_add").Input("a").Input(
                "b").Output("out").Attr("axis", 1).Build())

        self._op = (flow.builtin_op("conv2d").Input("in").Input("weight").Attr(
            "filters", out_channels).Attr("padding_before", padding).Attr(
                "strides", stride).Attr("kernel_size", kernel_size).Attr(
                    "dilation_rate", dilation).Attr("groups", groups).Attr(
                        "data_format", "channels_first").Output("out").Build())
        self.reset_parameters()
Example #15
0
    def _test_ddp_basic(test_case, dev_type):
        class Mul(flow.nn.Module):
            def __init__(self):
                super().__init__()
                self.w = flow.nn.Parameter(flow.Tensor([1, 1]))

            def forward(self, x):
                return x * self.w

        rank = flow.env.get_rank()
        if rank == 0:
            x = flow.Tensor([1, 1])
        elif rank == 1:
            x = flow.Tensor([2, 2])
        else:
            raise ValueError()

        x = x.to(dev_type)
        m = Mul().to(dev_type)
        m = ddp(m)
        y = m(x)
        y.sum().backward()

        test_case.assertTrue(
            np_allclose_with_shape(m.w.grad.numpy(), np.array([1.5, 1.5]))
        )
Example #16
0
    def test_indexing(test_case):
        class SliceExtracter:
            def __getitem__(self, key):
                return key

        se = SliceExtracter()

        def compare_getitem_with_numpy(tensor, slices):
            np_arr = tensor.numpy()
            test_case.assertTrue(
                np.allclose(np_arr[slices], tensor[slices].numpy()))

        def compare_setitem_with_numpy(tensor, slices, value):
            np_arr = tensor.numpy()
            if isinstance(value, flow.Tensor):
                np_value = value.numpy()
            else:
                np_value = value
            np_arr[slices] = np_value
            tensor[slices] = value
            test_case.assertTrue(np.allclose(np_arr, tensor.numpy()))

        x = flow.randn(5, 5)
        v = flow.Tensor([[0, 1, 2, 3, 4]])
        compare_getitem_with_numpy(x, se[-4:-1:2])
        compare_getitem_with_numpy(x, se[-1:])
        compare_setitem_with_numpy(x, se[-1:], v)
        compare_setitem_with_numpy(x, se[2::2], 2)
        x = flow.Tensor(2, 3, 4)
        v = flow.Tensor(3)
        compare_setitem_with_numpy(x, se[:, :, 2], v)
        x = flow.Tensor(2, 3, 4)
        compare_setitem_with_numpy(x, se[1, :, 2], v)
Example #17
0
    def _test_ddp_with_unused_param(test_case, dev_type):
        class Model(flow.nn.Module):
            def __init__(self):
                super().__init__()
                self.w = flow.nn.Parameter(flow.Tensor([1]))
                self.used_only_in_rank0 = flow.nn.Parameter(flow.Tensor([2]))
                self.unused_in_all_ranks = flow.nn.Parameter(flow.Tensor([3]))

            def forward(self, x):
                x = x * self.w
                if flow.env.get_rank() == 0:
                    x = x * self.used_only_in_rank0
                return x

        rank = flow.env.get_rank()
        if rank == 0:
            x = flow.Tensor([1])
        elif rank == 1:
            x = flow.Tensor([2])
        else:
            raise ValueError()

        x = x.to(dev_type)
        m = Model().to(dev_type)
        m = ddp(m)
        y = m(x)
        y.backward()

        test_case.assertTrue(np_allclose_with_shape(m.w.grad.numpy(), np.array([2])))
        test_case.assertTrue(
            np_allclose_with_shape(m.used_only_in_rank0.grad.numpy(), np.array([0.5]))
        )
        test_case.assertTrue(
            np_allclose_with_shape(m.unused_in_all_ranks.grad.numpy(), np.array([0]))
        )
Example #18
0
    def __init__(
        self,
        n_heads,
        d_model,
        dropout_rate=0.0,
        skip_term_b=False,
        share_qvk_proj=False,
    ):
        super(MultiHeadedSelfAttentionWithRelPos, self).__init__(
            n_heads, d_model, dropout_rate, share_qvk_proj
        )

        self.d_model = d_model
        self.share_qvk_proj = share_qvk_proj
        self.skip_term_b = skip_term_b
        self.nheads = n_heads
        self.d_k = d_model // n_heads

        self.qvk_proj = nn.Linear(
            d_model, d_model if self.share_qvk_proj else d_model * 3
        )

        self.pos_proj = nn.Linear(d_model, d_model, bias=False)

        self.posu = nn.Parameter(flow.Tensor(1, 1, n_heads, self.d_k))
        self.posv = nn.Parameter(flow.Tensor(1, 1, n_heads, self.d_k))
Example #19
0
    def __init__(
        self,
        num_features: int,
        eps: float = 1e-5,
        momentum: float = 0.1,
        affine: bool = True,
        track_running_stats: bool = True,
    ) -> None:
        super().__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats
        if self.affine:
            self.weight = flow.nn.Parameter(flow.Tensor(num_features))
            self.bias = flow.nn.Parameter(flow.Tensor(num_features))
        else:
            self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        if self.track_running_stats:
            self.register_buffer(
                "running_mean",
                flow.Tensor(num_features),
            )
            self.register_buffer(
                "running_var",
                flow.Tensor(num_features),
            )
        else:
            self.register_parameter("running_mean", None)
            self.register_parameter("running_var", None)

        self.reset_parameters()
Example #20
0
    def _test_ddp_multiple_buckets(test_case, dev_type):
        class Mul(flow.nn.Module):
            def __init__(self):
                super().__init__()
                for i in range(10):
                    self.register_parameter(
                        f"w{i}",
                        flow.nn.Parameter(flow.Tensor([i % 2 + 1, i % 2 + 1])))

            def forward(self, x):
                for i in range(10):
                    x = x * getattr(self, f"w{i}")
                return x

        rank = flow.env.get_rank()
        if rank == 0:
            x = flow.Tensor([1, 1])
        elif rank == 1:
            x = flow.Tensor([2, 2])
        else:
            raise ValueError()

        x = x.to(dev_type)
        m = Mul().to(dev_type)
        m = ddp(m, bucket_size=3)

        y = m(x)
        y.sum().backward()

        for i in range(10):
            test_case.assertTrue(
                np_allclose_with_shape(
                    getattr(m, f"w{i}").grad.numpy(),
                    np.array([48, 48]) if i % 2 == 0 else np.array([24, 24]),
                ))
Example #21
0
    def __init__(
        self,
        normalized_shape: _shape_t,
        eps: float = 1e-5,
        elementwise_affine: bool = True,
    ) -> None:
        super(LayerNorm, self).__init__()
        if isinstance(normalized_shape, int):
            # mypy error: incompatible types in assignment
            normalized_shape = (normalized_shape, )  # type: ignore[assignment]
        self.normalized_shape = tuple(
            normalized_shape)  # type: ignore[arg-type]

        self.epsilon = eps
        self.elementwise_affine = elementwise_affine
        if self.elementwise_affine:
            self.weight = flow.nn.Parameter(
                flow.Tensor(*self.normalized_shape))
            self.bias = flow.nn.Parameter(flow.Tensor(*self.normalized_shape))
        else:
            self.register_parameter("weight", None)
            self.register_parameter("bias", None)
        self.reset_parameters()
        # An integer specifies which axis to normalize at first, defaults to 1.
        self.begin_norm_axis = 1
        # An integer specifies which axis params at, defaults to 1 in 'NCHW' format
        self.begin_params_axis = 1

        self._op = (flow.builtin_op("layer_norm").Input("x").Input(
            "gamma").Input("beta").Output("y").Output("mean").Output(
                "inv_variance").Output("normalized").Build())

        self._op2 = (flow.builtin_op("layer_norm").Input("x").Output(
            "y").Output("mean").Output("inv_variance").Build())
Example #22
0
    def test_module_setattr(test_case):
        class CustomModule(flow.nn.Module):
            def __init__(self, param1, param2):
                super().__init__()
                self.param1 = param1
                self.param2 = param2

        param0 = flow.nn.Parameter(flow.Tensor(2, 3))
        param1 = flow.nn.Parameter(flow.Tensor(2, 3))
        param2 = CustomModule(param0, param1)
        m = CustomModule(param1, param2)
        params = list(m.parameters())
        test_case.assertEqual(len(params), 2)

        test_case.assertTrue(
            np.allclose(params[0].numpy(),
                        param1.numpy(),
                        atol=1e-4,
                        rtol=1e-4))
        test_case.assertTrue(
            np.allclose(params[1].numpy(),
                        param0.numpy(),
                        atol=1e-4,
                        rtol=1e-4))
        children = list(m.children())
        test_case.assertEqual(len(children), 1)
        child = children[0]
        test_case.assertEqual(child, param2)
        child_params = list(child.parameters())

        test_case.assertEqual(len(child_params), 2)
        test_case.assertTrue(
            np.allclose(child_params[0].numpy(), param0.numpy()))
        test_case.assertTrue(
            np.allclose(child_params[1].numpy(), param1.numpy()))
Example #23
0
 def __init__(
     self,
     in_channels: int,
     out_channels: int,
     kernel_size: _size_1_t,
     stride: _size_1_t = 1,
     padding: _size_1_t = 0,
     output_padding: _size_1_t = 0,
     groups: int = 1,
     bias: bool = True,
     dilation: _size_1_t = 1,
     padding_mode: str = "zeros",
 ) -> None:
     super().__init__()
     assert (padding_mode == "zeros"
             ), "Only `zeros` padding mode is supported for ConvTranspose1d"
     self.kernel_size = _single(kernel_size)
     self.stride = _single(stride)
     self.padding = _single(padding)
     self.dilation = _single(dilation)
     self.output_padding = _single(output_padding)
     self.groups = groups
     assert in_channels % groups == 0
     assert out_channels % groups == 0
     self.weight = flow.nn.Parameter(
         flow.Tensor(in_channels, out_channels // groups,
                     *self.kernel_size))
     self.filters = out_channels
     self.bias = None
     self._bias_add_op = None
     if bias:
         self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
     self.reset_parameters()
Example #24
0
    def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
        super().__init__()

        self.use_bias = bias
        self.weight = flow.nn.Parameter(flow.Tensor(out_features, in_features))
        self.bias = None

        if bias:
            self.bias = flow.nn.Parameter(flow.Tensor(out_features))

        self._matmul_op = (
            flow.builtin_op("matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", True)
            .Attr("alpha", 1.0)
            .Build()
        )

        self._broadcast_matmul_op = (
            flow.builtin_op("broadcast_matmul")
            .Input("a")
            .Input("b")
            .Output("out")
            .Attr("transpose_a", False)
            .Attr("transpose_b", True)
            .Attr("alpha", 1.0)
            .Build()
        )

        self.reset_parameters()
Example #25
0
def example_precess(wav, lab, wlen=3200, fact_amp=0.2):
    np.random.seed(10)

    sig_batch = np.zeros([1, wlen])
    lab_batch = np.zeros(1)
    rand_amp_arr = np.random.uniform(1.0 - fact_amp, 1 + fact_amp, 1)

    [signal, fs] = sf.read(wav)

    snt_len = signal.shape[0]
    snt_beg = np.random.randint(snt_len - wlen - 1)
    snt_end = snt_beg + wlen

    channels = len(signal.shape)
    if channels == 2:
        print("WARNING: stereo to mono")
        signal = signal[:, 0]

    sig_batch[0, :] = signal[snt_beg:snt_end] * rand_amp_arr[0]
    lab_batch[0] = int(lab)

    inp = flow.Tensor(sig_batch, dtype=flow.float32).to("cuda")
    lab = flow.Tensor(lab_batch, dtype=flow.float32).to("cuda")

    return inp, lab
Example #26
0
def _test_conv1d_compilcate(test_case, device):
    np_arr = np.array([[
        [-1.00674784, 0.51784992, 0.39896572, 0.11018554, 0.91136694],
        [1.95886874, 0.89779067, 0.4748213, 0.33313531, -0.49350029],
        [-0.19280219, 0.04023677, 1.66438103, -0.83563608, 0.15925731],
        [1.49166429, 1.45189261, -1.86512125, 0.34329697, 0.20413807],
    ]])
    input = flow.tensor(np_arr,
                        dtype=flow.float32,
                        device=flow.device(device),
                        requires_grad=True)
    weight = np.array([
        [
            [-0.36045218, 0.37349278, 0.04565236],
            [0.0242328, -0.09459515, -0.30684742],
        ],
        [
            [-0.30345008, -0.1196513, -0.26765293],
            [0.09876197, 0.03346226, 0.2748405],
        ],
        [
            [-0.37798449, 0.00242459, -0.34125558],
            [-0.05174343, -0.10443231, 0.09526101],
        ],
        [
            [0.34196907, -0.32667893, 0.40264183],
            [0.38025281, 0.26807079, -0.09074812],
        ],
    ])
    bias = np.array([-0.03499984, -0.21616256, 0.13312563, -0.24104381])
    m = nn.Conv1d(4,
                  4,
                  3,
                  groups=2,
                  stride=2,
                  padding=2,
                  dilation=2,
                  bias=True)
    m.weight = flow.nn.Parameter(flow.Tensor(weight))
    m.bias = flow.nn.Parameter(flow.Tensor(bias))
    m = m.to(device)
    np_out = np.array([[
        [-0.72379637, 0.67248386, 0.21977007],
        [-0.00643994, -0.1286152, -0.41589433],
        [-0.76877236, 0.29273134, -0.42040929],
        [1.0612179, -0.73787093, -0.37839717],
    ]])
    output = m(input)
    test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
    output = output.sum()
    output.backward()
    np_grad = np.array([[
        [-0.41006082, 0.0, -0.63206136, 0.0, 0.03184089],
        [0.06186188, 0.0, 0.02985496, 0.0, -0.09313981],
        [-0.36026976, 0.0, -0.2988835, 0.0, -0.26286808],
        [0.49214786, 0.0, 0.49666074, 0.0, 0.16815135],
    ]])
    test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06,
                                     1e-06))
Example #27
0
File: rnn.py Project: zzk0/oneflow
    def __init__(
        self,
        input_size: int,
        hidden_size: int,
        num_layers: int = 1,
        bias: bool = True,
        batch_first: bool = False,
        dropout: float = 0,
        bidirectional: bool = False,
    ):
        super().__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bias = bias
        self.batch_first = batch_first
        self.dropout = dropout
        self.bidirectional = bidirectional
        num_directions = 2 if bidirectional else 1
        gate_size = 3 * hidden_size
        self.drop = nn.Dropout(self.dropout)

        for layer in range(num_layers):
            for direction in range(num_directions):

                real_hidden_size = hidden_size
                layer_input_size = (
                    input_size if layer == 0 else real_hidden_size * num_directions
                )

                # TODO: Modify after adding the stride attribute
                # w_ih = flow.nn.Parameter(flow.Tensor(gate_size, layer_input_size))
                # w_hh = flow.nn.Parameter(flow.Tensor(gate_size, real_hidden_size))
                # b_ih = flow.nn.Parameter(flow.Tensor(gate_size))
                # b_hh = flow.nn.Parameter(flow.Tensor(gate_size))

                w_ih = flow.nn.Parameter(flow.Tensor(layer_input_size, gate_size))
                w_hh = flow.nn.Parameter(flow.Tensor(real_hidden_size, gate_size))
                b_ih = flow.nn.Parameter(flow.Tensor(gate_size))
                b_hh = flow.nn.Parameter(flow.Tensor(gate_size))

                layer_params = ()

                if bias:
                    layer_params = (w_ih, w_hh, b_ih, b_hh)
                else:
                    layer_params = (w_ih, w_hh)

                suffix = "_reverse" if direction == 1 else ""
                param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
                if bias:
                    param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
                param_names = [x.format(layer, suffix) for x in param_names]

                for name, param in zip(param_names, layer_params):
                    setattr(self, name, param)

        self.reset_parameters()
Example #28
0
 def test_tensor_greater(test_case):
     input1 = flow.Tensor(np.array([1, 1, 4]).astype(np.float32),
                          dtype=flow.float32)
     input2 = flow.Tensor(np.array([1, 2, 3]).astype(np.float32),
                          dtype=flow.float32)
     of_out = input1.gt(input2)
     np_out = np.greater(input1.numpy(), input2.numpy())
     test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
Example #29
0
def _test_stack_tuple_input(test_case, device, shape):
    x = np.random.rand(*shape)
    y = np.random.rand(*shape)
    x_tensor = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
    y_tensor = flow.Tensor(y, dtype=flow.float32, device=flow.device(device))
    out_np = np.stack([x, y], axis=0)
    out_of = flow.experimental.stack((x_tensor, y_tensor), dim=0).numpy()
    test_case.assertTrue(np.allclose(out_np, out_of, 1e-5, 1e-5))
Example #30
0
 def test_tensor_using_tensor(test_case):
     tensor = flow.Tensor(np.random.randn(2, 3, 4, 5),
                          device="cuda",
                          dtype=flow.int)
     input = flow.Tensor(np.random.randn(2, 3))
     output = input.to(tensor)
     test_case.assertEqual(output.dtype, flow.int)
     test_case.assertEqual(output.device, flow.device("cuda"))