Пример #1
0
    def test_supported_modes_property(self):
        augment = Compose(transforms=[
            PeakNormalization(p=1.0),
        ], )
        assert augment.supported_modes == {
            "per_batch", "per_example", "per_channel"
        }

        augment = Compose(
            transforms=[PeakNormalization(p=1.0),
                        ShuffleChannels(p=1.0)], )
        assert augment.supported_modes == {"per_example"}
    def test_shuffle(self):
        random.seed(42)
        samples = np.array([[[1.0, 0.5, -0.25, -0.125, 0.0]]],
                           dtype=np.float32)
        sample_rate = 16000

        augment = Compose(
            transforms=[
                Gain(min_gain_in_db=-18.0, max_gain_in_db=-16.0, p=1.0),
                PeakNormalization(p=1.0),
            ],
            shuffle=True,
            output_type="dict",
        )
        num_peak_normalization_last = 0
        num_gain_last = 0
        for i in range(100):
            processed_samples = augment(
                samples=torch.from_numpy(samples),
                sample_rate=sample_rate).samples.numpy()

            # Either PeakNormalization or Gain was applied last
            if processed_samples[0, 0, 0] < 0.2:
                num_gain_last += 1
            elif processed_samples[0, 0, 0] == 1.0:
                num_peak_normalization_last += 1
            else:
                raise AssertionError("Unexpected value!")

        self.assertGreater(num_peak_normalization_last, 10)
        self.assertGreater(num_gain_last, 10)
Пример #3
0
    def setUp(self):
        self.sample_rate = 16000
        self.audio = torch.randn(1, 1, 16000)

        self.transforms = [
            Gain(min_gain_in_db=-6.000001, max_gain_in_db=-2, p=1.0),
            PolarityInversion(p=1.0),
            PeakNormalization(p=1.0),
        ]
        Shift(p=1.0, output_type="dict"),
        # Non-differentiable transforms:
        # RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation:
        # [torch.DoubleTensor [1, 1, 5]], which is output 0 of IndexBackward, is at version 1; expected version 0 instead.
        # Hint: enable anomaly detection to find the operation that failed to compute its gradient,
        # with torch.autograd.set_detect_anomaly(True).
        pytest.param(
            HighPassFilter(p=1.0, output_type="dict"),
            marks=pytest.mark.skip("Not differentiable"),
        ),
        pytest.param(
            LowPassFilter(p=1.0, output_type="dict"),
            marks=pytest.mark.skip("Not differentiable"),
        ),
        pytest.param(
            PeakNormalization(p=1.0, output_type="dict"),
            marks=pytest.mark.skip("Not differentiable"),
        ),
    ],
)
def test_transform_is_differentiable(augment):
    sample_rate = 16000
    # Note: using float64 dtype to be compatible with AddBackgroundNoise fixtures
    samples = torch.tensor([[1.0, 0.5, -0.25, -0.125, 0.0]],
                           dtype=torch.float64).unsqueeze(1)
    samples_cpy = deepcopy(samples)

    # We are going to convert the input tensor to a nn.Parameter so that we can
    # track the gradients with respect to it. We'll "optimize" the input signal
    # to be closer to that after the augmentation to test differentiability
    # of the transform. If the signal got changed in any way, and the test
            Gain(min_gain_in_db=-15.0, max_gain_in_db=5.0, p=1.0),
            PolarityInversion(p=1.0),
        ]),
        Gain(min_gain_in_db=-6.000001, max_gain_in_db=-6, p=1.0),
        PolarityInversion(p=1.0),
        Shift(p=1.0),
        # Non-differentiable transforms:
        # RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation:
        # [torch.DoubleTensor [1, 1, 5]], which is output 0 of IndexBackward, is at version 1; expected version 0 instead.
        # Hint: enable anomaly detection to find the operation that failed to compute its gradient,
        # with torch.autograd.set_detect_anomaly(True).
        pytest.param(HighPassFilter(p=1.0),
                     marks=pytest.mark.skip("Not differentiable")),
        pytest.param(LowPassFilter(p=1.0),
                     marks=pytest.mark.skip("Not differentiable")),
        pytest.param(PeakNormalization(p=1.0),
                     marks=pytest.mark.skip("Not differentiable")),
    ],
)
def test_transform_is_differentiable(augment):
    sample_rate = 16000
    # Note: using float64 dtype to be compatible with AddBackgroundNoise fixtures
    samples = torch.tensor([[1.0, 0.5, -0.25, -0.125, 0.0]],
                           dtype=torch.float64).unsqueeze(1)
    samples_cpy = deepcopy(samples)

    # We are going to convert the input tensor to a nn.Parameter so that we can
    # track the gradients with respect to it. We'll "optimize" the input signal
    # to be closer to that after the augmentation to test differentiability
    # of the transform. If the signal got changed in any way, and the test
    # didn't crash, it means it works.
Пример #6
0
     ),
     "num_runs": 5,
 },
 {
     "instance": ApplyImpulseResponse(
         ir_paths=TEST_FIXTURES_DIR / "ir", mode=mode, p=1.0
     ),
     "num_runs": 1,
 },
 {
     "instance": Compose(
         transforms=[
             Gain(
                 min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=1.0
             ),
             PeakNormalization(mode=mode, p=1.0),
         ],
         shuffle=True,
     ),
     "name": "Shuffled Compose with Gain and PeakNormalization",
     "num_runs": 5,
 },
 {
     "instance": Compose(
         transforms=[
             Gain(
                 min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=0.5
             ),
             PolarityInversion(mode=mode, p=0.5),
         ],
         shuffle=True,
Пример #7
0
        sample_rates=[16000],
        devices=["cpu", "cuda"],
    )

    if not torch.cuda.is_available():
        params["devices"].remove("cuda")

    devices = {
        device_name: torch.device(device_name)
        for device_name in params["devices"]
    }

    transforms = [
        Gain(p=1.0),
        PolarityInversion(p=1.0),
        PeakNormalization(p=1.0),
        Shift(p=1.0),
    ]

    perf_objects = []

    for device_name in params["devices"]:
        device = devices[device_name]
        for batch_size in tqdm(params["batch_sizes"]):
            for num_channels in params["channels"]:
                for duration in params["durations"]:
                    for sample_rate in params["sample_rates"]:
                        for transform in transforms:
                            perf_objects += measure_execution_time(
                                transform,
                                batch_size,
Пример #8
0
                               mono=False)
    samples = np.stack((samples1, samples2), axis=0)
    samples = torch.from_numpy(samples)

    modes = ["per_batch", "per_example", "per_channel"]
    for mode in modes:
        transforms = [
            {
                "instance":
                Compose(
                    transforms=[
                        Gain(min_gain_in_db=-18.0,
                             max_gain_in_db=-16.0,
                             mode=mode,
                             p=1.0),
                        PeakNormalization(mode=mode, p=1.0),
                    ],
                    shuffle=True,
                ),
                "name":
                "Shuffled Compose with Gain and PeakNormalization",
                "num_runs":
                5,
            },
            {
                "instance":
                Compose(
                    transforms=[
                        Gain(min_gain_in_db=-18.0,
                             max_gain_in_db=-16.0,
                             mode=mode,
Пример #9
0
     ),
     "num_runs": 5,
 },
 {
     "get_instance": lambda: ApplyImpulseResponse(
         ir_paths=TEST_FIXTURES_DIR / "ir", mode=mode, p=1.0
     ),
     "num_runs": 1,
 },
 {
     "get_instance": lambda: Compose(
         transforms=[
             Gain(
                 min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=1.0
             ),
             PeakNormalization(mode=mode, p=1.0),
         ],
         shuffle=True,
     ),
     "name": "Shuffled Compose with Gain and PeakNormalization",
     "num_runs": 5,
 },
 {
     "get_instance": lambda: Compose(
         transforms=[
             Gain(
                 min_gain_in_db=-18.0, max_gain_in_db=-16.0, mode=mode, p=0.5
             ),
             PolarityInversion(mode=mode, p=0.5),
         ],
         shuffle=True,
Пример #10
0
     "get_instance": lambda: BandPassFilter(mode=mode, p=1.0),
     "num_runs": 5
 },
 {
     "get_instance": lambda: BandStopFilter(mode=mode, p=1.0),
     "num_runs": 5
 },
 {
     "get_instance":
     lambda: Compose(
         transforms=[
             Gain(min_gain_in_db=-18.0,
                  max_gain_in_db=-16.0,
                  mode=mode,
                  p=1.0),
             PeakNormalization(mode=mode, p=1.0),
         ],
         shuffle=True,
     ),
     "name":
     "Shuffled Compose with Gain and PeakNormalization",
     "num_runs":
     5,
 },
 {
     "get_instance":
     lambda: Compose(
         transforms=[
             Gain(min_gain_in_db=-18.0,
                  max_gain_in_db=-16.0,
                  mode=mode,