コード例 #1
0
def test_scattering1d_frontend():
    scattering = Scattering1D(2, shape=(10, ))
    assert isinstance(scattering,
                      ScatteringTorch1D), 'could not be correctly imported'

    with pytest.raises(RuntimeError) as ve:
        scattering = Scattering1D(2, shape=(10, ), frontend='doesnotexist')
    assert "is not valid" in ve.value.args[0]
コード例 #2
0
def test_scattering_shape_input():
    # Checks that a wrong input to shape raises an error
    J, Q = 6, 8
    with pytest.raises(ValueError) as ve:
        shape = 5, 6
        s = Scattering1D(J, shape, Q)
    assert "exactly one element" in ve.value.args[0]

    with pytest.raises(ValueError) as ve:
        shape = 1.5
        s = Scattering1D(J, shape, Q)
        # should invoke the else branch
    assert "1-tuple" in ve.value.args[0]
    assert "integer" in ve.value.args[0]
コード例 #3
0
ファイル: models.py プロジェクト: lcrosvila/thesis
 def __init__(self, J, Q, audio_length):
     
     super(Scatter, self).__init__()
     
     self.J = J
     self.Q = Q
     self.T = audio_length
     self.meta = Scattering1D.compute_meta_scattering(self.J, self.Q)
                           
     self.order0_indices = (self.meta['order'] == 0)
     self.order1_indices = (self.meta['order'] == 1)
     self.order2_indices = (self.meta['order'] == 2)
     
     self.scattering = Scattering1D(self.J, self.T, self.Q).cuda()
     self.output_size = self.scattering.output_size()
コード例 #4
0
def test_scattering_GPU_CPU(backend, random_state=42):
    """
    This function tests whether the CPU computations are equivalent to
    the GPU ones
    """
    if torch.cuda.is_available() and backend.name != 'torch_skcuda':
        torch.manual_seed(random_state)

        J = 6
        Q = 8
        T = 2**12

        # build the scattering
        scattering = Scattering1D(J, T, Q, backend=backend, frontend='torch').cpu()

        x = torch.randn(2, T)
        s_cpu = scattering(x)

        scattering = scattering.cuda()
        x_gpu = x.clone().cuda()
        s_gpu = scattering(x_gpu).cpu()
        # compute the distance

        Warning('Tolerance has been slightly lowered here...')
        assert torch.allclose(s_cpu, s_gpu, atol=1e-7)
コード例 #5
0
ファイル: test_scattering1d.py プロジェクト: paulsinz/kymatio
def test_computation_Ux(random_state=42):
    """
    Checks the computation of the U transform (no averaging for 1st order)
    """
    rng = np.random.RandomState(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(T,
                              J,
                              Q,
                              normalize='l1',
                              average=False,
                              max_order=1,
                              vectorize=False)
    # random signal
    x = torch.from_numpy(rng.randn(1, 1, T)).float()

    if force_gpu:
        scattering.cuda()
        x = x.cuda()

    s = scattering.forward(x)

    # check that the keys in s correspond to the order 0 and second order
    for k in range(len(scattering.psi1_f)):
        assert (k, ) in s.keys()
    for k in s.keys():
        if k is not ():
            assert k[0] < len(scattering.psi1_f)
        else:
            assert True
コード例 #6
0
ファイル: test_scattering1d.py プロジェクト: paulsinz/kymatio
def test_coordinates(random_state=42):
    """
    Tests whether the coordinates correspond to the actual values (obtained
    with Scattering1d.meta()), and with the vectorization
    """
    torch.manual_seed(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(T, J, Q, max_order=2)
    x = torch.randn(128, 1, T)

    if force_gpu:
        scattering.cuda()
        x = x.cuda()

    scattering.vectorize = False
    s_dico = scattering.forward(x)
    s_dico = {k: s_dico[k].data for k in s_dico.keys()}
    scattering.vectorize = True
    s_vec = scattering.forward(x)

    if force_gpu:
        s_dico = {k: s_dico[k].cpu() for k in s_dico.keys()}
        s_vec = s_vec.cpu()

    meta = scattering.meta()

    assert len(s_dico) == s_vec.shape[1]

    for cc in range(s_vec.shape[1]):
        k = meta['key'][cc]
        diff = s_vec[:, cc] - torch.squeeze(s_dico[k])
        assert torch.max(torch.abs(diff)) < 1e-7
コード例 #7
0
def scatter_dataset(dataset_path):
    """creates scattered features for data set"""

    #define scattering function
    scattering_function = Scattering1D(6, mfcc_length, 8)

    #get track names
    all_tracks_names = listdir(dataset_path)

    #get track labels
    labels = [genre_dictionary[track_file_name.split('.')[0]] for track_file_name in all_tracks_names]

    #create raw data from tracks
    raw_data_samples = map(
        lambda track_file_name: extract_raw_features(
            dataset_path + '/' + track_file_name),
        all_tracks_names)

    #split tracks into frames
    raw_data_splitted_tracks = [split_sample(sample) for sample in raw_data_samples]

    #remove last frame from every track. meaning frames that have length<frame_length
    uni_sized_data_splitted_tracks = [[raw_data_splitted_tracks[i][j]
                                       for j in (range(len(raw_data_splitted_tracks[i])-1))] for i in
                                       range(len(raw_data_splitted_tracks))]

    # #turn data into tensors
    # tensor_data_tracks = [map(to_normalized_tensor, track)
    #                       for track in uni_sized_data_splitted_tracks]

    #scatter data to get final features
    scattered_data_samples = [[
        calculateScatter(scattering_function, frame) for frame in track] for track in uni_sized_data_splitted_tracks]

    return zip(scattered_data_samples, labels)
コード例 #8
0
def test_sample_scattering(device, backend):
    """
    Applies scattering on a stored signal to make sure its output agrees with
    a previously calculated version.
    """
    test_data_dir = os.path.dirname(__file__)

    with open(os.path.join(test_data_dir, 'test_data_1d.npz'), 'rb') as f:
        buffer = io.BytesIO(f.read())
        data = np.load(buffer)


    x = torch.from_numpy(data['x']).to(device)
    J = data['J']
    Q = data['Q']
    Sx0 = torch.from_numpy(data['Sx']).to(device)

    T = x.shape[-1]

    scattering = Scattering1D(J, T, Q, backend=backend, frontend='torch').to(device)

    if backend.name == 'torch_skcuda' and device == 'cpu':
        with pytest.raises(TypeError) as ve:
            Sx = scattering(x)
        assert "CPU" in ve.value.args[0]
        return

    Sx = scattering(x)
    assert torch.allclose(Sx, Sx0)
コード例 #9
0
def scatter_dataset(dataset_path):
    scattering_function = Scattering1D(6, frame_length, 8)
    all_tracks_names = listdir(dataset_path)
    labels = map(
        lambda track_file_name: genre_dictionary[track_file_name.split('.')[0]
                                                 ], all_tracks_names)
    raw_data_samples = map(
        lambda track_file_name, label:
        (extract_raw_features(dataset_path + '/' + track_file_name), label),
        all_tracks_names, labels)
    raw_data_splitted_samples = [
        item for sublist in map(split_sample, raw_data_samples)
        for item in sublist
    ]
    uni_sized_data_splitted_samples = filter(
        lambda sample: len(sample[SAMPLE_DATA]) == frame_length,
        raw_data_splitted_samples)
    tensor_data_samples = map(
        lambda sample:
        (to_normalized_tensor(sample[SAMPLE_DATA]), sample[SAMPLE_LABEL]),
        uni_sized_data_splitted_samples)
    scattered_data_samples = map(
        lambda sample: calculateScatter(scattering_function, sample),
        tensor_data_samples)

    return list(scattered_data_samples)
コード例 #10
0
def test_differentiability_scattering(device, backend, random_state=42):
    """
    It simply tests whether it is really differentiable or not.
    This does NOT test whether the gradients are correct.
    """

    if backend.name.endswith("_skcuda"):
        pytest.skip("The skcuda backend does not pass differentiability"
                    "tests, but that's ok (for now).")

    torch.manual_seed(random_state)

    J = 6
    Q = 8
    T = 2**12

    scattering = Scattering1D(J, T, Q, frontend='torch',
                              backend=backend).to(device)

    x = torch.randn(2, T, requires_grad=True, device=device)

    s = scattering.forward(x)
    loss = torch.sum(torch.abs(s))
    loss.backward()
    assert torch.max(torch.abs(x.grad)) > 0.
コード例 #11
0
ファイル: test_scattering1d.py プロジェクト: f0k/kymatio
def test_differentiability_scattering(random_state=42):
    """
    It simply tests whether it is really differentiable or not.
    This does NOT test whether the gradients are correct.
    """

    if backend.NAME == "skcuda":
        warnings.warn(("The skcuda backend does not pass differentiability"
                       "tests, but that's ok (for now)."),
                      RuntimeWarning,
                      stacklevel=2)
        return

    torch.manual_seed(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(J, T, Q)
    x = torch.randn(128, T, requires_grad=True)

    if force_gpu:
        scattering.cuda()
        x = x.cuda()

    s = scattering.forward(x)
    loss = torch.sum(torch.abs(s))
    loss.backward()
    assert torch.max(torch.abs(x.grad)) > 0.
コード例 #12
0
ファイル: test_scattering1d.py プロジェクト: paulsinz/kymatio
def test_sample_scattering():
    """
    Applies scattering on a stored signal to make sure its output agrees with
    a previously calculated version.
    """
    test_data_dir = os.path.dirname(__file__)
    test_data_filename = os.path.join(test_data_dir, 'test_data_1d.pt')
    data = torch.load(test_data_filename, map_location='cpu')

    x = data['x']
    J = data['J']
    Q = data['Q']
    Sx0 = data['Sx']

    T = x.shape[2]

    scattering = Scattering1D(T, J, Q)

    if force_gpu:
        scattering = scattering.cuda()
        x = x.cuda()

    Sx = scattering.forward(x)

    if force_gpu:
        Sx = Sx.cpu()

    assert (Sx - Sx0).abs().max() < 1e-6
コード例 #13
0
    def __init__(self, num_classes=2):
        super(CNNNet, self).__init__()
        self.J = 6
        self.Q = 8
        self.T = 40000
        self.num_classes = num_classes
        self.output_size = 125
        self.hidden_size_1 = 2048
        self.hidden_size_2 = 1024
        self.hidden_size_3 = 512
        self.hidden_size_4 = 256
        self.hidden_size_5 = 128
        self.hidden_size_6 = 64

        self.log_eps = 1e-6

        self.scattering = Scattering1D(self.J, self.T, self.Q)
        self.scattering.cuda()
        self.h1 = nn.Linear(self.output_size, self.hidden_size_1)
        self.h2 = nn.Linear(self.hidden_size_1, self.hidden_size_2)
        self.h3 = nn.Linear(self.hidden_size_2, self.hidden_size_3)
        self.h4 = nn.Linear(self.hidden_size_3, self.hidden_size_4)
        self.h5 = nn.Linear(self.hidden_size_4, self.hidden_size_5)
        self.h6 = nn.Linear(self.hidden_size_5, self.hidden_size_6)
        self.output = nn.Linear(self.hidden_size_6, self.num_classes)

        self.relu = nn.ReLU()
        self.do = nn.Dropout()
コード例 #14
0
def test_scattering_GPU_CPU(random_state=42, test_cuda=None):
    """
    This function tests whether the CPU computations are equivalent to
    the GPU ones

    Note that we can only achieve 1e-4 absolute l_infty error between GPU
    and CPU
    """
    if force_gpu:
        return

    torch.manual_seed(random_state)
    if test_cuda is None:
        test_cuda = torch.cuda.is_available()
    J = 6
    Q = 8
    T = 2**12

    if test_cuda:
        # build the scattering
        scattering = Scattering1D(J, T, Q)

        x = torch.randn(128, T)
        s_cpu = scattering.forward(x)

        scattering = scattering.cuda()
        x_gpu = x.clone().cuda()
        s_gpu = scattering.forward(x_gpu).cpu()
        # compute the distance
        assert torch.max(torch.abs(s_cpu - s_gpu)) < 1e-4
コード例 #15
0
ファイル: network_loss.py プロジェクト: elias-ramzi/ismir2018
 def __init__(self,
              size_domain,
              J_mmd=6,
              Q_mmd=12,
              order2_mmd=False,
              normalize_mmd='l2',
              criterion_amplitude_mmd=1e-2,
              batch_size=128,
              eps=1e-5,
              is_cuda=True,
              order0_mmd=True,
              loss_type_mmd='static',
              **kwargs):
     """
     Loss types: "static", "dynamic" (equality in distribution with respect
         to actual scatterings, not just the mean)
     """
     max_order = 2 if order2_mmd else 1
     self.scatterer = Scattering1D(J_mmd,
                                   size_domain,
                                   Q=Q_mmd,
                                   max_order=max_order,
                                   average=average_U1,
                                   oversampling=0,
                                   vectorize=True)
     if is_cuda:
         self.scatterer = self.scatterer.cuda()
     self.batch_size = batch_size
     self.eps = eps
     self.is_cuda = is_cuda
     self.include_order0 = order0_mmd
     self.loss_type = loss_type_mmd
     self.norm_factors = None
コード例 #16
0
ファイル: network_loss.py プロジェクト: elias-ramzi/ismir2018
 def __init__(self,
              loss_type='scat',
              J_loss=2,
              Q_loss=2,
              xi_max_loss=0.25,
              normalization_loss='l1',
              include_poly_moments_0=True,
              average_U1=False,
              size_domain=256,
              is_cuda=True,
              whole_dataset_cuda=True,
              batch_size=128,
              eps=1e-6,
              apply_normalization=True,
              L=6,
              xi_min_loss=0.04,
              criterion_amplitude=1e-3,
              joint_S1=False,
              joint_U1=False,
              L_joint=3,
              joint_U2=False,
              order2=False,
              backend='cufft',
              l1_regularization_order1=False,
              mu_order1=1e-1,
              p_order=None,
              perceptual=False,
              subsample_factor=1,
              oversampling=0,
              **kwargs):
     self.loss_type = loss_type
     self.is_cuda = is_cuda
     self.whole_dataset_cuda = whole_dataset_cuda
     self.norm_factors = {}  # pre_assignment
     self.batch_size = batch_size  # at runtime, not for pre-computing
     self.eps = eps  # for numerical stability
     self.apply_normalization = apply_normalization
     if (loss_type == 'pyscat') or (loss_type == 'pyscat_mmd'):
         target_type = torch.cuda.FloatTensor if is_cuda\
             else torch.FloatTensor
         max_order = 2 if order2 else 1
         self.scatterer = Scattering1D(J_loss,
                                       size_domain,
                                       Q=Q_loss,
                                       max_order=max_order,
                                       average=average_U1,
                                       vectorize=True,
                                       oversampling=oversampling)
         if is_cuda:
             self.scatterer = self.scatterer.cuda()
         self.p_order = {'scattering': 2} if p_order is None else p_order
     elif loss_type == 'mse':
         # nothing to do here
         pass
     else:
         raise ValueError('Unknown loss type ' + str(loss_type))
コード例 #17
0
ファイル: scattering1d.py プロジェクト: yuweiDu/kymatio
 def setup(self, sc_params, batch_size):
     n_channels = 1
     scattering = Scattering1D(**sc_params)
     scattering.cpu()
     x = torch.randn(batch_size,
                     n_channels,
                     sc_params["shape"],
                     dtype=torch.float32)
     x.cpu()
     self.scattering = scattering
     self.x = x
コード例 #18
0
def test_coordinates(device, backend, random_state=42):
    """
    Tests whether the coordinates correspond to the actual values (obtained
    with Scattering1d.meta()), and with the vectorization
    """

    torch.manual_seed(random_state)
    J = 6
    Q = 8
    T = 2**12

    scattering = Scattering1D(J,
                              T,
                              Q,
                              max_order=2,
                              backend=backend,
                              frontend='torch')

    x = torch.randn(2, T)

    scattering.to(device)
    x = x.to(device)

    for max_order in [1, 2]:
        scattering.max_order = max_order

        scattering.vectorize = False

        if backend.name.endswith('skcuda') and device == 'cpu':
            with pytest.raises(TypeError) as ve:
                s_dico = scattering(x)
            assert "CPU" in ve.value.args[0]
        else:
            s_dico = scattering(x)
            s_dico = {k: s_dico[k].data for k in s_dico.keys()}
        scattering.vectorize = True

        if backend.name.endswith('_skcuda') and device == 'cpu':
            with pytest.raises(TypeError) as ve:
                s_vec = scattering(x)
            assert "CPU" in ve.value.args[0]
        else:
            s_vec = scattering(x)
            s_dico = {k: s_dico[k].cpu() for k in s_dico.keys()}
            s_vec = s_vec.cpu()

        meta = scattering.meta()

        if not backend.name.endswith('_skcuda') or device != 'cpu':
            assert len(s_dico) == s_vec.shape[1]

            for cc in range(s_vec.shape[1]):
                k = meta['key'][cc]
                assert torch.allclose(s_vec[:, cc], torch.squeeze(s_dico[k]))
コード例 #19
0
def normalised_frquency_vector(x,
                               J,
                               Q,
                               epsilon_order_1=1 * 10**-6,
                               epsilon_order_2=1 * 10**-6):
    x = torch.from_numpy(x).float()
    x /= x.abs().max()
    x = x.view(1, -1)
    T = x.shape[-1]
    scattering = Scattering1D(J,
                              T,
                              Q=Q,
                              average=True,
                              oversampling=0,
                              vectorize=True)
    Sx = scattering.forward(x)
    Sx_abs = scattering.forward(np.abs(x))
    meta = Scattering1D.compute_meta_scattering(J, Q)
    order0 = (meta['order'] == 0)
    order1 = (meta['order'] == 1)
    order2 = (meta['order'] == 2)

    Sx1 = normalise_order1(Sx,
                           Sx_abs,
                           order0,
                           order1,
                           order2,
                           epsilon_order_1,
                           frequency_normalisation_order_1_vector=[])
    Sx2 = normalise_order2(J,
                           Q,
                           Sx,
                           order0,
                           order1,
                           order2,
                           epsilon_order_2,
                           frequency_normalisation_order_2_vector=[])
    return np.mean(scale_value(Sx1.numpy()),
                   axis=1), np.mean(scale_value(Sx2.numpy()), axis=1)
コード例 #20
0
def test_computation_Ux(random_state=42):
    """
    Checks the computation of the U transform (no averaging for 1st order)
    """
    rng = np.random.RandomState(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(J,
                              T,
                              Q,
                              average=False,
                              max_order=1,
                              vectorize=False)
    # random signal
    x = torch.from_numpy(rng.randn(1, T)).float()

    if force_gpu:
        scattering.cuda()
        x = x.cuda()

    s = scattering.forward(x)

    # check that the keys in s correspond to the order 0 and second order
    for k in range(len(scattering.psi1_f)):
        assert (k, ) in s.keys()
    for k in s.keys():
        if k is not ():
            assert k[0] < len(scattering.psi1_f)
        else:
            assert True

    scattering.max_order = 2

    s = scattering.forward(x)

    count = 1
    for k1, filt1 in enumerate(scattering.psi1_f):
        assert (k1, ) in s.keys()
        count += 1
        for k2, filt2 in enumerate(scattering.psi2_f):
            if filt2['j'] > filt1['j']:
                assert (k1, k2) in s.keys()
                count += 1

    assert count == len(s)

    with pytest.raises(ValueError) as ve:
        scattering.vectorize = True
        scattering.forward(x)
    assert "mutually incompatible" in ve.value.args[0]
コード例 #21
0
def test_batch_shape_agnostic():
    J, Q = 3, 8
    length = 1024
    shape = (length, )

    length_ds = length / 2**J

    S = Scattering1D(J, shape, Q)

    with pytest.raises(ValueError) as ve:
        S(torch.zeros(()))
    assert "at least one axis" in ve.value.args[0]

    x = torch.zeros(shape)

    if force_gpu:
        x = x.cuda()
        S.cuda()

    Sx = S(x)

    assert Sx.dim() == 2
    assert Sx.shape[-1] == length_ds

    n_coeffs = Sx.shape[-2]

    test_shapes = ((1, ) + shape, (2, ) + shape, (2, 2) + shape,
                   (2, 2, 2) + shape)

    for test_shape in test_shapes:
        x = torch.zeros(test_shape)

        if force_gpu:
            x = x.cuda()

        S.vectorize = True
        Sx = S(x)

        assert Sx.dim() == len(test_shape) + 1
        assert Sx.shape[-1] == length_ds
        assert Sx.shape[-2] == n_coeffs
        assert Sx.shape[:-2] == test_shape[:-1]

        S.vectorize = False
        Sx = S(x)

        assert len(Sx) == n_coeffs
        for k, v in Sx.items():
            assert v.shape[-1] == length_ds
            assert v.shape[-2] == 1
            assert v.shape[:-2] == test_shape[:-1]
コード例 #22
0
def test_batch_shape_agnostic(device, backend):
    J, Q = 3, 8
    length = 1024
    shape = (length, )

    length_ds = length / 2**J

    S = Scattering1D(J, shape, Q, backend=backend, frontend='torch').to(device)

    with pytest.raises(ValueError) as ve:
        S(torch.zeros(()).to(device))
    assert "at least one axis" in ve.value.args[0]

    x = torch.zeros(shape).to(device)

    if backend.name.endswith('_skcuda') and device == 'cpu':
        with pytest.raises(TypeError) as ve:
            Sx = S(x)
        assert "CPU" in ve.value.args[0]
        return

    Sx = S(x)

    assert Sx.dim() == 2
    assert Sx.shape[-1] == length_ds

    n_coeffs = Sx.shape[-2]

    test_shapes = ((1, ) + shape, (2, ) + shape, (2, 2) + shape,
                   (2, 2, 2) + shape)

    for test_shape in test_shapes:
        x = torch.zeros(test_shape).to(device)

        S.vectorize = True
        Sx = S(x)

        assert Sx.dim() == len(test_shape) + 1
        assert Sx.shape[-1] == length_ds
        assert Sx.shape[-2] == n_coeffs
        assert Sx.shape[:-2] == test_shape[:-1]

        S.vectorize = False
        Sx = S(x)

        assert len(Sx) == n_coeffs
        for k, v in Sx.items():
            assert v.shape[-1] == length_ds
            assert v.shape[-2] == 1
            assert v.shape[:-2] == test_shape[:-1]
コード例 #23
0
def test_simple_scatterings(random_state=42):
    """
    Checks the behaviour of the scattering on simple signals
    (zero, constant, pure cosine)
    """
    rng = np.random.RandomState(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(J, T, Q)
    if force_gpu:
        scattering = scattering.cuda()
    else:
        scattering.cpu()
    # zero signal
    x0 = torch.zeros(128, T)
    if force_gpu:
        x0 = x0.cuda()
    s = scattering.forward(x0)
    if force_gpu:
        s = s.cpu()
    # check that s is zero!
    assert torch.max(torch.abs(s)) < 1e-7

    # constant signal
    x1 = rng.randn(1)[0] * torch.ones(1, T)
    if force_gpu:
        x1 = x1.cuda()
    s1 = scattering.forward(x1)
    if force_gpu:
        s1 = s1.cpu()
    # check that all orders above 1 are 0
    assert torch.max(torch.abs(s1[:, 1:])) < 1e-7

    # sinusoid scattering
    meta = scattering.meta()
    for _ in range(50):
        k = rng.randint(1, T // 2, 1)[0]
        x2 = torch.cos(2 * math.pi * float(k) *
                       torch.arange(0, T, dtype=torch.float32) / float(T))
        x2 = x2.unsqueeze(0)
        if force_gpu:
            x2 = x2.cuda()
        s2 = scattering.forward(x2)
        if force_gpu:
            s2 = s2.cpu()

        assert (s2[:, meta['order'] != 1, :].abs().max() < 1e-2)
コード例 #24
0
def test_simple_scatterings(device, backend, random_state=42):
    """
    Checks the behaviour of the scattering on simple signals
    (zero, constant, pure cosine)
    """

    rng = np.random.RandomState(random_state)
    J = 6
    Q = 8
    T = 2**9
    scattering = Scattering1D(J, T, Q, backend=backend,
                              frontend='torch').to(device)
    return

    # zero signal
    x0 = torch.zeros(2, T).to(device)

    if backend.name.endswith('_skcuda') and device == 'cpu':
        with pytest.raises(TypeError) as ve:
            s = scattering(x0)
        assert "CPU" in ve.value.args[0]
        return
    s = scattering(x0)

    # check that s is zero!
    assert torch.max(torch.abs(s)) < 1e-7

    # constant signal
    x1 = rng.randn(1)[0] * torch.ones(1, T).to(device)
    if not backend.name.endswith('_skcuda') or device != 'cpu':
        s1 = scattering(x1)

        # check that all orders above 1 are 0
        assert torch.max(torch.abs(s1[:, 1:])) < 1e-7

    # sinusoid scattering
    meta = scattering.meta()
    for _ in range(3):
        k = rng.randint(1, T // 2, 1)[0]
        x2 = torch.cos(2 * math.pi * float(k) *
                       torch.arange(0, T, dtype=torch.float32) / float(T))
        x2 = x2.unsqueeze(0).to(device)
        if not backend.name.endswith('_skcuda') or device != 'cpu':
            s2 = scattering(x2)

            assert (s2[:, torch.from_numpy(meta['order']) != 1, :].abs().max()
                    < 1e-2)
コード例 #25
0
def test_precompute_size_scattering(device, backend, random_state=42):
    """
    Tests that precompute_size_scattering computes a size which corresponds
    to the actual scattering computed
    """
    torch.manual_seed(random_state)

    J = 6
    Q = 8
    T = 2**12

    scattering = Scattering1D(J,
                              T,
                              Q,
                              vectorize=False,
                              backend=backend,
                              frontend='torch')

    x = torch.randn(2, T)

    scattering.to(device)
    x = x.to(device)
    if not backend.name.endswith('_skcuda') or device != 'cpu':
        for max_order in [1, 2]:
            scattering.max_order = max_order
            s_dico = scattering(x)
            for detail in [True, False]:
                # get the size of scattering
                size = scattering.output_size(detail=detail)
                if detail:
                    num_orders = {0: 0, 1: 0, 2: 0}
                    for k in s_dico.keys():
                        if k is ():
                            num_orders[0] += 1
                        else:
                            if len(k) == 1:  # order1
                                num_orders[1] += 1
                            elif len(k) == 2:
                                num_orders[2] += 1
                    todo = 2 if max_order == 2 else 1
                    for i in range(todo):
                        assert num_orders[i] == size[i]
                        # check that the orders are completely equal
                else:
                    assert len(s_dico) == size
コード例 #26
0
    def test_Scattering1D(self, backend):
        """
        Applies scattering on a stored signal to make sure its output agrees with
        a previously calculated version.
        """
        test_data_dir = os.path.dirname(__file__)

        with open(os.path.join(test_data_dir, 'test_data_1d.npz'), 'rb') as f:
            buffer = io.BytesIO(f.read())
            data = np.load(buffer)

        x = data['x']
        J = data['J']
        Q = data['Q']
        Sx0 = data['Sx']

        T = x.shape[-1]

        scattering = Scattering1D(J, T, Q, backend=backend, frontend='tensorflow')

        Sx = scattering(x)
        assert np.allclose(Sx, Sx0, atol=1e-6, rtol =1e-7)
コード例 #27
0
def test_precompute_size_scattering(random_state=42):
    """
    Tests that precompute_size_scattering computes a size which corresponds
    to the actual scattering computed
    """
    torch.manual_seed(random_state)
    J = 6
    Q = 8
    T = 2**12
    scattering = Scattering1D(J, T, Q, vectorize=False)
    x = torch.randn(128, T)

    if force_gpu:
        scattering.cuda()
        x = x.cuda()

    for max_order in [1, 2]:
        scattering.max_order = max_order
        s_dico = scattering.forward(x)
        for detail in [True, False]:
            # get the size of scattering
            size = scattering.output_size(detail=detail)
            if detail:
                num_orders = {0: 0, 1: 0, 2: 0}
                for k in s_dico.keys():
                    if k is ():
                        num_orders[0] += 1
                    else:
                        if len(k) == 1:  # order1
                            num_orders[1] += 1
                        elif len(k) == 2:
                            num_orders[2] += 1
                todo = 2 if max_order == 2 else 1
                for i in range(todo):
                    assert num_orders[i] == size[i]
                    # check that the orders are completely equal
            else:
                assert len(s_dico) == size
コード例 #28
0
def plot_multi_order_scattering(x,
                                J,
                                Q,
                                order1_frequency_axis=[],
                                normalise_1=False,
                                normalise_2=False,
                                epsilon_order_1=1 * 10**-6,
                                epsilon_order_2=1 * 10**-6,
                                frequency_normalisation_order_1_vector=None,
                                frequency_normalisation_order_2_vector=None):

    x = torch.from_numpy(x).float()
    x /= x.abs().max()
    x = x.view(1, -1)

    T = x.shape[-1]
    scattering = Scattering1D(J,
                              T,
                              Q=Q,
                              average=True,
                              oversampling=0,
                              vectorize=True)
    Sx = scattering.forward(x)
    Sx_abs = scattering.forward(np.abs(x))
    meta = Scattering1D.compute_meta_scattering(J, Q)
    order0 = (meta['order'] == 0)
    order1 = (meta['order'] == 1)
    order2 = (meta['order'] == 2)

    fig = make_subplots(
        rows=3,
        cols=6,
        column_widths=[0.4, 0.4, 0.4, 0.4, 0.4, 0.4],
        row_heights=[0.2, 0.2, 0.2],
        specs=[[{
            "type": "Scatter"
        }, {
            "type": "Heatmap"
        }, {
            "type": "Heatmap"
        }, {
            "type": "Heatmap"
        }, {
            "type": "Heatmap"
        }, {
            "type": "Heatmap"
        }],
               [{
                   "type": "Scatter"
               }, {
                   "type": "Scatter"
               }, {
                   "type": "Scatter"
               }, {
                   "type": "Scatter"
               }, {
                   "type": "Scatter"
               }, {
                   "type": "Scatter"
               }],
               [
                   None, {
                       "type": "Scatter"
                   }, {
                       "type": "Scatter"
                   }, {
                       "type": "Scatter"
                   }, {
                       "type": "Scatter"
                   }, {
                       "type": "Scatter"
                   }
               ]],
        subplot_titles=(
            'Temporal signal', 'Scattering Order 1',
            'Scattering Order 1 Normalised', 'Scattering Order 2',
            'Scattering Order 2 Normalised', 'Order 2 Frequency',
            "Scattering Order 0", 'Scattering Order 1 mean',
            'Scattering Order 1 mean Normalised', 'Scattering Order 2 mean',
            'Scattering Order 2 mean Normalised', 'Order 2 Frequency mean',
            None, 'Scattering Order 1 max',
            'Scattering Order 1 max Normalised', 'Scattering Order 2 max',
            'Scattering Order 2 max Normalised', 'Order 2 Frequency max'))

    fig.add_trace(go.Scatter(y=x[0, :].numpy(), name="Negative"), row=1, col=1)
    fig.add_trace(go.Scatter(y=Sx[0, order0, :].numpy().ravel(),
                             name="Negative"),
                  row=2,
                  col=1)

    if normalise_1:
        Sx1 = normalise_order1(Sx,
                               Sx_abs,
                               order0,
                               order1,
                               order2,
                               epsilon_order_1,
                               frequency_normalisation_order_1_vector=
                               frequency_normalisation_order_1_vector)
    else:
        Sx1 = Sx[0, order1, :]

    if (len(order1_frequency_axis) != 0):
        fig.add_trace(go.Heatmap(z=scale_value(Sx[0, order1, :].numpy()),
                                 y=order1_frequency_axis,
                                 colorscale='Viridis',
                                 showscale=False),
                      row=1,
                      col=2)
        fig.add_trace(go.Heatmap(z=scale_value(Sx1.numpy()),
                                 y=order1_frequency_axis,
                                 colorscale='Viridis',
                                 showscale=False),
                      row=1,
                      col=3)
    else:
        fig.add_trace(go.Heatmap(z=scale_value(Sx[0, order1, :].numpy()),
                                 colorscale='Viridis',
                                 showscale=False),
                      row=1,
                      col=2)
        fig.update_yaxes(autorange="reversed", row=1, col=2)
        fig.add_trace(go.Heatmap(z=scale_value(Sx1.numpy()),
                                 colorscale='Viridis',
                                 showscale=False),
                      row=1,
                      col=3)
        fig.update_yaxes(autorange="reversed", row=1, col=3)
    fig.add_trace(go.Scatter(y=np.mean(scale_value(Sx[0, order1, :].numpy()),
                                       axis=1),
                             name="Negative"),
                  row=2,
                  col=2)
    fig.add_trace(go.Scatter(y=np.max(scale_value(Sx[0, order1, :].numpy()),
                                      axis=1),
                             name="Negative"),
                  row=3,
                  col=2)
    fig.add_trace(go.Scatter(y=np.mean(scale_value(Sx1.numpy()), axis=1),
                             name="Negative"),
                  row=2,
                  col=3)
    fig.add_trace(go.Scatter(y=np.max(scale_value(Sx1.numpy()), axis=1),
                             name="Negative"),
                  row=3,
                  col=3)

    if normalise_2:
        Sx2 = normalise_order2(J,
                               Q,
                               Sx,
                               order0,
                               order1,
                               order2,
                               epsilon_order_2,
                               frequency_normalisation_order_2_vector=
                               frequency_normalisation_order_2_vector)
    else:
        Sx2 = Sx[0, order2, :]

    fig.add_trace(go.Heatmap(z=scale_value(Sx[0, order2, :].numpy()),
                             colorscale='Viridis',
                             showscale=False),
                  row=1,
                  col=4)
    fig.update_yaxes(autorange="reversed", row=1, col=4)
    fig.add_trace(go.Heatmap(z=scale_value(Sx2.numpy()),
                             colorscale='Viridis',
                             showscale=False),
                  row=1,
                  col=5)
    fig.update_yaxes(autorange="reversed", row=1, col=5)
    fig.add_trace(go.Scatter(y=np.mean(scale_value(Sx[0, order2, :].numpy()),
                                       axis=1),
                             name="Negative"),
                  row=2,
                  col=4)
    fig.add_trace(go.Scatter(y=np.max(scale_value(Sx[0, order2, :].numpy()),
                                      axis=1),
                             name="Negative"),
                  row=3,
                  col=4)
    fig.add_trace(go.Scatter(y=np.mean(scale_value(Sx2.numpy()), axis=1),
                             name="Negative"),
                  row=2,
                  col=5)
    fig.add_trace(go.Scatter(y=np.max(scale_value(Sx2.numpy()), axis=1),
                             name="Negative"),
                  row=3,
                  col=5)
    fig.update_layout(showlegend=False)

    Sx2_Bis = select_frequency(Sx2, T, J, Q, index_frequency=None)

    fig.add_trace(go.Heatmap(z=scale_value(Sx2_Bis),
                             colorscale='Viridis',
                             showscale=False),
                  row=1,
                  col=6)
    fig.update_yaxes(autorange="reversed", row=1, col=6)
    fig.add_trace(go.Scatter(y=np.mean(scale_value(Sx2_Bis), axis=1),
                             name="Negative"),
                  row=2,
                  col=6)
    fig.add_trace(go.Scatter(y=np.max(scale_value(Sx2_Bis), axis=1),
                             name="Negative"),
                  row=3,
                  col=6)
    fig.show()
コード例 #29
0
    def __init__(self, J=6, T=2**14, Q=7, device='cuda'):
        self.scattering = Scattering1D(J, T, Q)
        self.log_eps = 1e-6

        if device is 'cuda':
            self.scattering = self.scattering.cuda()
コード例 #30
0
    # If it's too long, truncate it.
    if x.numel() > T:
        x = x[:T]

    # If it's too short, zero-pad it.
    start = (T - x.numel()) // 2

    x_all[k, start:start + x.numel()] = x
    y_all[k] = y

###############################################################################
# Log-scattering transform
# ------------------------
# We now create the `Scattering1D` object that will be used to calculate the
# scattering coefficients.
scattering = Scattering1D(J, T, Q)

###############################################################################
# If we are using CUDA, the scattering transform object must be transferred to
# the GPU by calling its `cuda()` method. The data is similarly transferred.
if use_cuda:
    scattering.cuda()
    x_all = x_all.cuda()
    y_all = y_all.cuda()

###############################################################################
# Compute the scattering transform for all signals in the dataset.
Sx_all = scattering.forward(x_all)

###############################################################################
# Since it does not carry useful information, we remove the zeroth-order