コード例 #1
0
    def test_Scattering2D(self, backend):
        test_data_dir = os.path.dirname(__file__)
        data = None
        with open(os.path.join(test_data_dir, 'test_data_2d.npz'), 'rb') as f:
            buffer = io.BytesIO(f.read())
            data = np.load(buffer)

        x = data['x']
        S = data['Sx']
        J = data['J']
        pre_pad = data['pre_pad']

        M = x.shape[2]
        N = x.shape[3]

        scattering = Scattering2D(J,
                                  shape=(M, N),
                                  pre_pad=pre_pad,
                                  frontend='numpy',
                                  backend=backend)

        x = x
        S = S
        Sg = scattering(x)
        assert np.allclose(Sg, S)

        scattering = Scattering2D(J,
                                  shape=(M, N),
                                  pre_pad=pre_pad,
                                  max_order=1,
                                  frontend='numpy',
                                  backend=backend)

        S1x = scattering(x)
        assert np.allclose(S1x, S[..., :S1x.shape[-3], :, :])
コード例 #2
0
    def test_scattering2d_errors(self, backend):
        S = Scattering2D(3, (32, 32), frontend='numpy', backend=backend)

        with pytest.raises(TypeError) as record:
            S(None)
        assert 'input should be' in record.value.args[0]

        x = np.random.randn(32)

        with pytest.raises(RuntimeError) as record:
            S(x)
        assert 'have at least two dimensions' in record.value.args[0]

        x = np.random.randn(31, 31)

        with pytest.raises(RuntimeError) as record:
            S(x)
        assert 'NumPy array must be of spatial size' in record.value.args[0]

        S = Scattering2D(3, (32, 32),
                         pre_pad=True,
                         frontend='numpy',
                         backend=backend)

        with pytest.raises(RuntimeError) as record:
            S(x)
        assert 'Padded array must be of spatial size' in record.value.args[0]
コード例 #3
0
    def test_Scattering2D(self, backend_device):
        backend, device = backend_device

        test_data_dir = os.path.dirname(__file__)
        with open(os.path.join(test_data_dir, 'test_data_2d.npz'), 'rb') as f:
            buffer = io.BytesIO(f.read())
            data = np.load(buffer)

        x = torch.from_numpy(data['x'])
        S = torch.from_numpy(data['Sx'])
        J = data['J']
        pre_pad = data['pre_pad']

        M = x.shape[2]
        N = x.shape[3]

        scattering = Scattering2D(J, shape=(M, N), pre_pad=pre_pad,
                                  backend=backend, frontend='torch')
        Sg = []
        x = x.to(device)
        scattering.to(device)
        S = S.to(device)
        Sg = scattering(x)
        assert torch.allclose(Sg, S)

        scattering = Scattering2D(J, shape=(M, N), pre_pad=pre_pad,
                                  max_order=1, frontend='torch',
                                  backend=backend)
        scattering.to(device)

        S1x = scattering(x)
        assert torch.allclose(S1x, S[..., :S1x.shape[-3], :, :])
コード例 #4
0
def main():

    transforms_to_apply = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            (0.5, 0.5, 0.5),
            (0.5, 0.5, 0.5))  # Pixel values should be in [-1,1]
    ])

    mnist_dir = get_dataset_dir("MNIST", create=True)
    dataset = datasets.MNIST(mnist_dir,
                             train=False,
                             download=False,
                             transform=transforms_to_apply)
    dataloader = DataLoader(dataset,
                            batch_size=20,
                            shuffle=True,
                            pin_memory=True)

    fixed_dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
    fixed_batch = next(iter(fixed_dataloader))
    fixed_batch = fixed_batch[0].float().cuda()

    scattering = Scattering(J=2, shape=(28, 28))
    scattering.cuda()

    for _, current_batch in enumerate(dataloader):
        batch_images = Variable(current_batch[0]).float().cuda()
        batch_scattering = scattering(batch_images).squeeze(1)

        print(batch_scattering.shape)
        exit()
コード例 #5
0
def test_scattering2d_frontend():
    scattering = Scattering2D(2, shape=(10, 10))
    assert isinstance(
        scattering,
        ScatteringTorch2D), 'Torch frontend is not selected by default'

    with pytest.raises(RuntimeError) as ve:
        scattering = Scattering2D(2, shape=(10, 10), frontend='doesnotexist')
    assert "is not valid" in ve.value.args[0]
コード例 #6
0
    def test_inputs(self):
        fake_backend = namedtuple('backend', ['name',])
        fake_backend.name = 'fake'

        with pytest.raises(ImportError) as ve:
            scattering = Scattering2D(2, shape=(10, 10), frontend='tensorflow', backend=fake_backend)
        assert 'not supported' in ve.value.args[0]

        with pytest.raises(RuntimeError) as ve:
            scattering = Scattering2D(10, shape=(10, 10), frontend='tensorflow')
        assert 'smallest dimension' in ve.value.args[0]
コード例 #7
0
    def test_input_size_agnostic(self, backend_device):
        backend, device = backend_device

        for N in [31, 32, 33]:
            for J in [1, 2, 4]:
                scattering = Scattering2D(J,
                                          shape=(N, N),
                                          backend=backend,
                                          frontend='torch')
                x = torch.zeros(3, 3, N, N)

                x = x.to(device)
                scattering.to(device)

                S = scattering(x)
                scattering = Scattering2D(J,
                                          shape=(N, N),
                                          pre_pad=True,
                                          backend=backend,
                                          frontend='torch')
                x = torch.zeros(3, 3, scattering.M_padded, scattering.N_padded)

                x = x.to(device)
                scattering.to(device)

        N = 32
        J = 5
        scattering = Scattering2D(J,
                                  shape=(N, N),
                                  backend=backend,
                                  frontend='torch')
        x = torch.zeros(3, 3, N, N)

        x = x.to(device)
        scattering.to(device)

        S = scattering(x)
        assert S.shape[-2:] == (1, 1)

        N = 32
        J = 5
        scattering = Scattering2D(J,
                                  shape=(N + 5, N),
                                  backend=backend,
                                  frontend='torch')
        x = torch.zeros(3, 3, N + 5, N)

        x = x.to(device)
        scattering.to(device)

        S = scattering(x)
        assert S.shape[-2:] == (1, 1)
コード例 #8
0
    def __init__(self,
                 block=BasicBlock,
                 num_classes=10,
                 input_shape=(96, 96),
                 J=2,
                 L=8):
        super(ScattResNet34, self).__init__()

        self.scattering = Scattering2D(J=J, shape=input_shape)
        if torch.cuda.is_available():
            print("Move scattering to GPU")
            self.scattering = self.scattering.cuda()
        self.K = 1 + J * L + L**2 * (J - 1) * J // 2
        self.K *= 3  # rgb images
        self.scatt_output_shape = tuple([x // 2**J for x in input_shape])
        self.bn = nn.BatchNorm2d(self.K)

        self.inplanes = self.K
        self.layer1 = self._make_layer(block, 256, 3)
        self.layer2 = self._make_layer(block, 256, 3, stride=2)
        self.layer3 = self._make_layer(block, 256, 3, stride=2)
        # self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(256 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
コード例 #9
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    shape = (64,) * 2
    img_1 = np.zeros(shape, dtype=np.float32)
    # img_1[10:20, 15:20] = 1.
    img_1[15:20, 10:20] = 1.

    plt.figure(1, figsize=(12, 2))
    plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.6)

    for i, J in enumerate(range(2, 6)):
        scattering = Scattering2D(J=J, shape=shape, max_order=1).cuda()
        img_1_scatt = scattering(torch.from_numpy(img_1).to(device)).cpu().numpy()

        means = list()
        for shift in range(shape[0] - 5):
            img = np.zeros(shape, dtype=np.float32)
            img[10:20, shift:shift + 5] = 1.
            img_scatt = scattering(torch.from_numpy(img).to(device)).cpu().numpy()
            means.append(np.mean((img_1_scatt - img_scatt) ** 2))

        num = 152 + i
        plt.subplot(num)
        plt.plot(means)
        plt.ylim([0, 0.0012])
        plt.title("J = {}".format(J))

    plt.subplot(151)
    plt.imshow(img_1, cmap='gray')
    plt.title("Image référence")
    plt.show()
コード例 #10
0
def get_scattering(patches, pd, J=4, L=8, rotinvariant=True):
    """
    you need to average all paths of the form
    (theta1, theta2=theta1+delta)
    for varying theta1 and fixed delta.

    so for example with L=4, average (0, 0) with (1, 1), (2, 2) etc.
    then average (0, 1) with (1, 2), (2, 3), etc.
    then average (0, 2) with (1, 3), (2, 0), etc.
    then average (0, 3) with (1, 0), (2, 1), etc.

    that invariance will bring the number of orientations from L*L to L
    """
    import torch
    from kymatio import Scattering2D
    #dim = max(pd[0], pd[1])
    #dim = int(2**np.ceil(np.log(dim)/np.log(2)))
    dim = pd[0]

    print("Initializing scattering transform...")
    scattering = Scattering2D(shape=(dim, dim), J=J, L=L)
    print("Computing scattering coefficients...")
    WTemp = torch.zeros((1, patches.shape[0], pd[0], pd[1]))
    WTemp[0, :, :, :] = torch.from_numpy(
        np.reshape(patches, (patches.shape[0], pd[0], pd[1])))
    coeffs = scattering(WTemp).numpy()
    # 1 zero order, J first order, and (J, 2)*L second order averaged paths
    #res = np.zeros((patches.shape[0], 2+L))
    return np.reshape(coeffs,
                      (patches.shape[0],
                       coeffs.shape[2] * coeffs.shape[3] * coeffs.shape[4]))
コード例 #11
0
    def __init__(self,
                 block=BasicBlock,
                 num_classes=10,
                 input_shape=(32, 32),
                 J=2,
                 L=8):
        super(ScattResNet18, self).__init__()

        self.scattering = Scattering2D(J=J, shape=input_shape)
        if torch.cuda.is_available():
            print("Move scattering to GPU")
            self.scattering = self.scattering.cuda()
        self.K = 1 + J * L + L**2 * (J - 1) * J // 2
        self.scatt_output_shape = tuple([x // 2**J for x in input_shape])
        self.bn = nn.BatchNorm2d(self.K * 3)

        self.in_planes = self.K * 3
        self.conv1 = nn.Conv2d(self.K * 3,
                               self.K * 3,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.K * 3)
        self.layer1 = self._make_layer(block, self.K * 3, 3, stride=1)
        # self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=1)
        # self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=1)
        self.layer4 = self._make_layer(block, 512, 2, stride=2)
        self.linear = nn.Linear(512 * block.expansion, num_classes)
コード例 #12
0
ファイル: extract_feature.py プロジェクト: jainszhang/LearnDM
def scat_data(data_dir, outdata_dir, M, N, J):
    from kymatio import Scattering2D
    filename_list = os.listdir(data_dir)  #read the directory files's name
    filename_list.sort()
    #np.save('./test_scatname_list.npy',np.array(filename_list))
    count = len(filename_list)
    scat = Scattering2D(J=J, shape=(M, N)).cuda()  # scattering transform
    for i in range(0, count):
        imgDir = os.path.join(data_dir, os.path.basename(filename_list[i]))
        if (os.path.splitext(imgDir)[1] != '.jpg'): continue

        # img = np.float16((np.array(Image.open(imgDir))/127.5) - 1.0)#与x保持一致,归一化到[-1,1]之间
        img = np.float16(np.array(Image.open(imgDir)))  #与x保持一致,归一化到[-1,1]之间

        img = img.reshape(1, M, N, 3)  #1*128*128*3
        img_data = img.transpose(0, 3, 1, 2).astype(np.float32)  # 1*3*128*128
        img_data = torch.from_numpy(img_data).cuda()
        out_data = scat(img_data).cpu()  #1*3*417*8*8

        str1 = filename_list[i].split('.')
        # str2 = imgName.split('.')

        np.save(outdata_dir + '/' + str1[0] + '.npy', out_data)
        if i % 100 == 0:
            print("step is %d" % i)
    return 0
コード例 #13
0
    def __init__(self,
                 datapath="../features_covers80",
                 chroma_type='crema',
                 shortname='Covers80',
                 wins_per_block=20,
                 K=10,
                 niters=10,
                 norm_per_path=True):
        """
        Attributes
        """

        self.wins_per_block = wins_per_block
        self.chroma_type = chroma_type
        self.K = K
        self.niters = niters
        self.norm_per_path = norm_per_path
        self.shingles = {}
        CoverAlgorithm.__init__(self,
                                "StructureHash",
                                datapath=datapath,
                                shortname=shortname)
        print("Initializing scattering transform...")
        J = 6
        L = 8
        NPaths = L * L * J * (J - 1) / 2 + J * L + 1
        tic = time.time()
        self.scattering = Scattering2D(shape=(FINAL_SIZE, FINAL_SIZE),
                                       J=J,
                                       L=L)  #.cuda()
        print("Elapsed Time: %.3g" % (time.time() - tic))
        self.ITemp = torch.zeros((1, 1, FINAL_SIZE, FINAL_SIZE))
コード例 #14
0
    def test_batch_shape_agnostic(self, backend):
        J = 3
        L = 8
        shape = (32, 32)

        shape_ds = tuple(n // (2**J) for n in shape)

        S = Scattering2D(J, shape, L, backend=backend, frontend='numpy')

        x = np.zeros(shape)

        Sx = S(x)

        assert len(Sx.shape) == 3
        assert Sx.shape[-2:] == shape_ds

        n_coeffs = Sx.shape[-3]

        test_shapes = ((1, ) + shape, (2, ) + shape, (2, 2) + shape,
                       (2, 2, 2) + shape)

        for test_shape in test_shapes:
            x = np.zeros(test_shape)

            Sx = S(x)

            assert len(Sx.shape) == len(test_shape) + 1
            assert Sx.shape[-2:] == shape_ds
            assert Sx.shape[-3] == n_coeffs
            assert Sx.shape[:-3] == test_shape[:-2]
コード例 #15
0
    def __init__(self, params, num_classes, is_training):
        super(ScatteringPoolingCirculantModel, self).__init__()
        self.params = params
        self.model_params = params.model_params
        self.use_batch_norm = self.model_params.get('use_batch_norm', False)
        n_layers = self.model_params['n_layers']
        pooling = self.model_params.get('pooling', None)
        assert pooling is not None, "Pooling not defined in config"

        shape = 3 * 81 * 8 * 8
        self.scattering = Scattering2D(J=2, shape=(32, 32))
        self.scattering = self.scattering.cuda()
        if pooling == 'avg':
            self.pooling = nn.AvgPool2d(2)
        elif pooling == 'max':
            self.pooling = nn.MaxPool2d(2)

        shape = 3 * 81 * 4 * 4
        if self.use_batch_norm:
            self.batch_norm = nn.BatchNorm1d(shape)

        self.layers = nn.ModuleList([])
        for _ in range(self.params.model_params['n_layers']):
            self.layers.append(
                DiagonalCirculantLayer(shape, shape,
                                       **self.params.model_params))
        self.last = DiagonalCirculantLayer(shape, 10,
                                           **self.params.model_params)
コード例 #16
0
    def __init__(self, params, num_classes, is_training):
        super(ScatteringByChannelCirculantModel, self).__init__()
        self.params = params
        self.model_params = params.model_params
        self.use_batch_norm = self.model_params.get('use_batch_norm', False)
        n_layers = self.model_params['n_layers']
        pooling = self.model_params.get('pooling', None)
        assert pooling is not None, "Pooling not defined in config"

        shape = 81 * 8 * 8
        self.scattering = Scattering2D(J=2, shape=(32, 32))
        self.scattering = self.scattering.cuda()
        if self.use_batch_norm:
            self.batch_norm1 = nn.BatchNorm1d(shape)
            self.batch_norm2 = nn.BatchNorm1d(shape)
            self.batch_norm3 = nn.BatchNorm1d(shape)

        self.circ_channel1 = DiagonalCirculantLayer(shape, shape,
                                                    **self.params.model_params)
        self.circ_channel2 = DiagonalCirculantLayer(shape, shape,
                                                    **self.params.model_params)
        self.circ_channel3 = DiagonalCirculantLayer(shape, shape,
                                                    **self.params.model_params)

        self.layers = nn.ModuleList([])
        for _ in range(self.params.model_params['n_layers']):
            self.layers.append(
                DiagonalCirculantLayer(shape, shape,
                                       **self.params.model_params))
            self.last = DiagonalCirculantLayer(shape, 10,
                                               **self.params.model_params)
コード例 #17
0
def scattering_transform_mnist(save_to_disk=True, train=True):
    # here we want untransformed mnist data
    transform = transforms.Compose([transforms.ToTensor()])
    mnist_train = datasets.MNIST(os.getcwd() + "/mnist",
                                 train=True,
                                 transform=transform,
                                 download=True)
    mnist_test = datasets.MNIST(os.getcwd() + "/mnist",
                                train=False,
                                transform=transform,
                                download=True)

    # construct the scattering object
    scattering = Scattering2D(J=2, shape=(28, 28))
    batch_size = 1000 if torch.cuda.is_available() else 100
    dataloader = DataLoader(mnist_train if train else mnist_test,
                            batch_size=batch_size)

    print("Running scattering transform")
    extractor = FeatureExtractor(scattering)
    out_features, out_labels = extractor.features(
        dataloader,
        save_to_disk=save_to_disk,
        train=train,
        flatten_config={"start_dim": 2})
コード例 #18
0
 def __getitem__(self,index):
     '''
     Return a tuple containing the image tensor and corresponding class for the given index.
     Parameter:
     index: This is the index created by _init_, it's the key of the dict in _init_
            Notice that a single patient could have multiple index associated.
     '''
         
     img = imageio.imread(self.jpg_list[index][0]) #rgb
     tag = self.jpg_list[index][1] #pixle and label    
         
    #isolating green channel:
     if self.green:
         img = img[:,:,1]
         
     img = transforms.ToPILImage()(img)
     img = transforms.functional.resize(img,(60,60))
     
     scattering = Scattering2D(J=2, shape=(60, 60))
     #K = 81*3
 
     if self.transform:
         img = self.transform(img)
     if torch.cuda.is_available():
         img_gpu = img.cuda()
         Simg_gpu = scattering(img)
         return (Simg_gpu,tag)
コード例 #19
0
ファイル: extract_feature.py プロジェクト: jainszhang/LearnDM
def scat_data1(data_dir, outdata_dir, M, N, J):
    from kymatio import Scattering2D
    filename_list = os.listdir(data_dir)  #read the directory files's name
    filename_list.sort()
    count = len(filename_list)

    scat = Scattering2D(J=J, shape=(M, N)).cuda()  # scattering transform
    batch_size = 256
    batch_image = []
    for count_idx in range(0, count):

        imgDir = os.path.join(data_dir,
                              os.path.basename(filename_list[count_idx]))
        img = np.float32(
            (np.array(Image.open(imgDir)) / 127.5)).transpose(2, 0, 1)
        batch_image.append(img)
        if ((count_idx + 1) % batch_size == 0):
            batch_image = torch.from_numpy(np.array(batch_image)).cuda()
            batch_scat = scat.forward(batch_image)
            batch_scat = batch_scat.cpu()

            for c in range(batch_size):
                img_scat = batch_scat[c]
                str1 = filename_list[c + (int(count_idx / batch_size)) *
                                     batch_size].split('.')

                np.save(outdata_dir + '/' + str1[0] + '.npy', img_scat)

            batch_image = []
            print(count_idx)

    print("over")
    return 0
コード例 #20
0
    def __init__(self, params, num_classes, is_training):
        super(LDRModel, self).__init__()
        self.params = params
        self.model_params = params.model_params
        rank = self.model_params['rank']
        class_type = self.model_params['class_type']

        shape = 3 * 81 * 8 * 8
        self.scattering = Scattering2D(J=2, shape=(32, 32))
        self.scattering = self.scattering.cuda()
        self.batch_norm = nn.BatchNorm1d(shape)

        if class_type == 'ldr-sd':
            self.ldr = layer.LDRSubdiagonal(layer_size=shape,
                                            corner=False,
                                            r=rank)
        elif class_type == 'ldr-td':
            self.ldr = layer.LDRTridiagonal(layer_size=shape,
                                            corner=False,
                                            r=rank)
        elif class_type == 'toeplitz':
            self.ldr = layer.ToeplitzLike(layer_size=shape,
                                          corner=False,
                                          r=rank)
        self.ldr = self.ldr.cuda()
コード例 #21
0
    def __init__(self,
                 order,
                 input_shape=(96, 96),
                 J=2,
                 L=8,
                 block=BasicBlock,
                 layers=(3, 4, 6, 3),
                 num_classes=10):
        super(ResNet34, self).__init__()

        # Scattering
        if order == 1:
            self.K = 1 + J * L
        elif order == 2:
            self.K = 1 + J * L + L**2 * (J - 1) * J // 2
        else:
            raise ValueError("Wrong order param: ", order)
        self.order = order
        self.scattering = Scattering2D(J=J,
                                       shape=input_shape,
                                       L=L,
                                       max_order=order)
        if torch.cuda.is_available():
            print("Move scattering to GPU")
            self.scattering = self.scattering.cuda()
        self.K *= 3  # RGB images
        self.scatt_output_shape = tuple([x // 2**J for x in input_shape])
        self.bn = nn.BatchNorm2d(self.K)

        self.inplanes = self.K
        self.conv1 = nn.Conv2d(self.K,
                               self.K,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.K)
        self.relu = nn.ReLU(inplace=True)
        # self.layer1 = self._make_layer(block, 64, layers[0])
        # self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=1)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            if isinstance(m, Bottleneck):
                nn.init.constant_(m.bn3.weight, 0)
            elif isinstance(m, BasicBlock):
                nn.init.constant_(m.bn2.weight, 0)
コード例 #22
0
def main():

    # load data
    training_x = np.load('../weak_lensing_data/sparse_grid_final_512pix_x_' +
                         train_name + '.npy')[:, :, :, 0]
    print(training_x.shape)

    # add noise
    training_x = add_shape_noise(training_x, ng)

    # smooth the image
    for i in range(training_x.shape[0]):
        training_x[i, :, :] = smooth(training_x[i, :, :])
        if i % 1e3 == 0:
            print(i)

#----------------------------------------------------------------------------------------------------------
# define scattering
    scattering = Scattering2D(J=5,
                              shape=(training_x[0, :, :].shape),
                              L=2,
                              max_order=3)
    scattering.cuda()

    # initiate results array
    Sx = []

    #----------------------------------------------------------------------------------------------------------
    # loop over batches of 500 objects
    for i in range(training_x.shape[0] // 100 + 1):
        print(i)

        # record time
        start_time = time.time()

        # transform to torch tensors
        tensor_training_x = torch.from_numpy(
            training_x[100 * i:100 * (i + 1), :, :]).type(
                torch.cuda.FloatTensor)

        # perform scattering
        Sx.append(
            scattering(tensor_training_x).mean(dim=(2,
                                                    3)).cpu().detach().numpy())
        print(time.time() - start_time)


#----------------------------------------------------------------------------------------------------------
# save results
    for i in range(len(Sx)):
        try:
            Sx_array = np.vstack([Sx_array, Sx[i]])
        except:
            Sx_array = Sx[i]
    print(Sx_array.shape)
    np.save("Sx_" + train_name + "ing_expected_" + str(ng) + ".npy", Sx_array)
コード例 #23
0
    def test_gpu_only(self, backend):
        if backend.name == 'torch_skcuda':
            scattering = Scattering2D(3, shape=(32, 32), backend=backend,
                                      frontend='torch')

            x = torch.rand(32, 32)

            with pytest.raises(TypeError) as ve:
                Sg = scattering(x)
            assert 'CUDA' in ve.value.args[0]
コード例 #24
0
    def test_gradients(self, backend_device):
        backend, device = backend_device

        if backend.name == 'torch_skcuda':
            pytest.skip('The gradients are currently not implemented with '
                        'the skcuda backend.')
        else:
            scattering = Scattering2D(2, shape=(8, 8), backend=backend,
                                      frontend='torch').double().to(device)
            x = torch.rand(2, 1, 8, 8).double().to(device).requires_grad_()
            gradcheck(scattering, x, nondet_tol=1e-5)
コード例 #25
0
    def test_scattering2d_errors(self, backend_device):
        backend, device = backend_device

        S = Scattering2D(3, (32, 32), backend=backend, frontend='torch')

        S.to(device)

        with pytest.raises(TypeError) as record:
            S(None)
        assert 'input should be' in record.value.args[0]

        x = torch.randn(4, 4)
        y = x[::2, ::2]

        with pytest.raises(RuntimeError) as record:
            S(y)
        assert 'must be contiguous' in record.value.args[0]

        x = torch.randn(31, 31)

        with pytest.raises(RuntimeError) as record:
            S(x)
        assert 'Tensor must be of spatial size' in record.value.args[0]

        S = Scattering2D(3, (32, 32),
                         pre_pad=True,
                         backend=backend,
                         frontend='torch')

        with pytest.raises(RuntimeError) as record:
            S(x)
        assert 'Padded tensor must be of spatial size' in record.value.args[0]

        x = torch.randn(8, 8)
        S = Scattering2D(2, (8, 8), backend=backend, frontend='torch')

        x = x.to(device)
        S = S.to(device)
        if not (device == 'cpu' and backend.name.endswith('_skcuda')):
            y = S(x)
            assert x.device == y.device
コード例 #26
0
 def setup(self, sc_params, batch_size):
     n_channels = 1
     scattering = Scattering2D(**sc_params)
     scattering.cpu()
     x = torch.randn(
         batch_size,
         n_channels,
         sc_params["shape"][0], sc_params["shape"][1],
         dtype=torch.float32)
     x.cpu()
     self.scattering = scattering
     self.x = x
コード例 #27
0
ファイル: StrucScattering.py プロジェクト: ctralie/acoss
 def __init__(self, wins_per_block, K, hop_length, sr, n_iters, datapath, shortname, pad=2000, binary_kappa=0, win_fac=0, ssm_cachedir='/var/acoss/cache', norm_per_path=True):
     super(StrucScattering, self).__init__(wins_per_block, K, hop_length, sr, n_iters, datapath, shortname, pad=pad, binary_kappa=binary_kappa, win_fac=win_fac, ssm_cachedir=ssm_cachedir)
     self.ftm_feats = {}
     self.norm_per_path = norm_per_path
     self.shingles = {}
     J = 6
     L = 8
     NPaths = L*L*J*(J-1)/2 + J*L + 1
     tic = time.time()
     self.scattering = Scattering2D(shape=(pad, pad), J=J, L=L)#.cuda()
     print("Elapsed Time: %.3g"%(time.time()-tic))
     self.ITemp = torch.zeros((1, 1, pad, pad))
コード例 #28
0
    def __init__(self,
                 order=1,
                 input_shape=(32, 32),
                 J=2,
                 L=8,
                 block=BasicBlock,
                 layers=[2, 2, 2, 2],
                 num_classes=10):
        super(ScattResNet, self).__init__()
        # Scattering
        if order == 1:
            self.K = 1 + J * L
        elif order == 2:
            self.K = 1 + J * L + L**2 * (J - 1) * J // 2
        else:
            raise ValueError("Wrong order param: ", order)
        self.order = order
        self.scattering = Scattering2D(J=J,
                                       L=L,
                                       shape=input_shape,
                                       max_order=order)
        if torch.cuda.is_available():
            print("Move scattering to GPU")
            self.scattering = self.scattering.cuda()
        self.K *= 3  # RGB images
        self.scatt_output_shape = tuple([x // 2**J for x in input_shape])
        self.bn = nn.BatchNorm2d(self.K)

        self.inplanes = self.K
        # self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=1)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        # self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(256 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        for m in self.modules():
            if isinstance(m, BasicBlock):
                nn.init.constant_(m.bn2.weight, 0)
コード例 #29
0
def main():

    # load data
    f = h5py.File('../Ens_saved_YST.mat', 'r')
    training_x = np.array(f['Vels'])
    print(training_x.shape)

    # define scattering
    scattering = Scattering2D(J=5,
                              shape=(training_x[0, :, :].shape),
                              L=4,
                              max_order=2)
    scattering.cuda()

    # initiate results array
    Sx = []

    #----------------------------------------------------------------------------------------------------------
    # loop over batches of 500 objects
    for i in range(training_x.shape[0] // 100 + 1):
        print(i)

        # record time
        start_time = time.time()

        # transform to torch tensors
        tensor_training_x = torch.from_numpy(
            training_x[100 * i:100 * (i + 1), :, :]).type(
                torch.cuda.FloatTensor)

        # perform scattering
        Sx.append(
            scattering(tensor_training_x).mean(dim=(2,
                                                    3)).cpu().detach().numpy())
        print(time.time() - start_time)


#----------------------------------------------------------------------------------------------------------
# save results
    for i in range(len(Sx)):
        try:
            Sx_array = np.vstack([Sx_array, Sx[i]])
        except:
            Sx_array = Sx[i]
    print(Sx_array.shape)

    np.save("Sx_2D.npy", Sx_array)
コード例 #30
0
    def __init__(self, input_shape=(28, 28), J=2, L=8):
        super(ScattDense, self).__init__()

        self.scattering = Scattering2D(J=J, shape=input_shape)
        if torch.cuda.is_available():
            print("Move scattering to GPU")
            self.scattering = self.scattering.cuda()
        self.K = 1 + J * L + L**2 * (J - 1) * J // 2
        self.scatt_output_shape = tuple([x // 2**J for x in input_shape])
        self.bn = nn.BatchNorm2d(self.K)

        self.in_features = self.K * self.scatt_output_shape[
            0] * self.scatt_output_shape[1]
        self.fc1 = nn.Linear(in_features=self.in_features,
                             out_features=self.in_features // 2)
        self.fc2 = nn.Linear(in_features=self.in_features // 2,
                             out_features=10)