Exemple #1
0
    def forward(self, x):
        """Forward pass.

        Parameters
        ----------
        x: torch.Tensor
            Batch of EEG windows of shape (batch_size, n_channels, n_times).
        """
        deep_out = self.reduced_deep_model(x)
        shallow_out = self.reduced_shallow_model(x)

        n_diff_deep_shallow = deep_out.size()[2] - shallow_out.size()[2]

        if n_diff_deep_shallow < 0:
            deep_out = ConstantPad2d((0, 0, -n_diff_deep_shallow, 0),
                                     0)(deep_out)
        elif n_diff_deep_shallow > 0:
            shallow_out = ConstantPad2d((0, 0, n_diff_deep_shallow, 0),
                                        0)(shallow_out)

        merged_out = torch.cat((deep_out, shallow_out), dim=1)
        linear_out = self.final_conv(merged_out)
        softmaxed = nn.LogSoftmax(dim=1)(linear_out)
        squeezed = softmaxed.squeeze(3)
        return squeezed
Exemple #2
0
    def collate_fn(self, batch):
        lengths_sequences = []
        # calculate max word len + max char len
        for A, x, e, l in batch:
            lengths_sequences.append(A.shape[0])

        # in order to pad all batch to a single dimension max length is needed
        seq_max_len = np.max(lengths_sequences)

        # new batch variables
        adjacency_batch = []
        x_batch = []
        embeddings_batch = []
        labels_batch = []
        for A, x, e, l in batch:
            # pad word vectors
            adjacency_pad = ConstantPad2d(
                (0, seq_max_len - A.shape[0], 0, seq_max_len - A.shape[0]), 0)
            adjacency_batch.append(adjacency_pad(A).tolist())
            vec_pad = ConstantPad2d((0, 0, 0, seq_max_len - A.shape[0]), 0)
            x_batch.append(vec_pad(x).tolist())
            embeddings_batch.append(
                vec_pad(e).tolist() if self._is_external_data
                and self._external_data.is_embed else e)
            labels_batch.append(l)

        return Tensor(adjacency_batch), Tensor(x_batch), Tensor(
            embeddings_batch).long(), Tensor(labels_batch).long()
Exemple #3
0
def forward_diff(grid):
    nd = len(grid.data.shape)
    slice1 = [slice(None)] * nd
    slice2 = [slice(None)] * nd

    # Forward difference along x direction
    axis = 3
    slice1[axis] = slice(1, None)
    slice2[axis] = slice(None, -1)
    slice1 = tuple(slice1)
    slice2 = tuple(slice2)
    # gridx = torch.nn.ConstantPad2d((0,1,0,0),0)(grid).clone()
    diffx = grid[slice1] - grid[slice2]
    diffx = ConstantPad2d((0, 1, 0, 0), 0)(diffx)
    # Forward difference along y direction
    axis = 2
    slice1 = [slice(None)] * nd
    slice2 = [slice(None)] * nd
    slice1[axis] = slice(1, None)
    slice2[axis] = slice(None, -1)
    slice1 = tuple(slice1)
    slice2 = tuple(slice2)

    diffy = grid[slice1] - grid[slice2]
    diffy = ConstantPad2d((0, 0, 1, 0), 0)(diffy)

    return torch.cat((diffx, diffy), 1)
Exemple #4
0
 def __init__(self,input_nc,output_nc,norm_layer=None,use_dropout=False):
     """Construct the generator of this network.
     
     Parameters:
         input_nc (int) -- the number of channels that the network'input.
         output_nc (int) -- the number of channels that the network'output.
         norm_layer (Norm2d) -- the normalization layer.
         use_dropout (bool) -- if use dropout layer.
     """
     super(P2netGenerator, self).__init__()
     self.rdp1oneconv=nn.Conv2d(64,32,1)
     self.rdp1conv=nn.Conv2d(32,96,3)
     self.rdp2oneconv=nn.Conv2d(128,96,1)
     self.rdp2conv=nn.Conv2d(96,160,3)
     self.ddp1oneconv=nn.Conv2d(224,160,1)
     self.ddp1conv=nn.Conv2d(160,96,3)
     self.ddp2oneconv=nn.Conv2d(96,32,1)
     self.ddp2conv=nn.Conv2d(32,output_nc,3)
     
     lkReLU=ReLU(True)
     # lkReLU=torch.nn.functional.ge
     sigmoid=Sigmoid()
     rpad=ConstantPad2d(1,0)
     
     self.prel=nn.Sequential(rpad,nn.Conv2d(input_nc,16,3),lkReLU,nn.Conv2d(16,8,1),lkReLU,rpad,nn.Conv2d(8,32,3),lkReLU)
     self.prer=nn.Sequential(rpad,nn.Conv2d(input_nc,16,3),lkReLU,nn.Conv2d(16,8,1),lkReLU,rpad,nn.Conv2d(8,32,3),lkReLU)
     
     self.rdp1=nn.Sequential(self.rdp1oneconv,lkReLU,rpad,self.rdp1conv,lkReLU)
     self.rdp2=nn.Sequential(self.rdp2oneconv,lkReLU,rpad,self.rdp2conv,lkReLU)
     
     self.dd=nn.Sequential(self.ddp1oneconv,lkReLU,rpad,self.ddp1conv,lkReLU,self.ddp2oneconv,lkReLU,rpad,self.ddp2conv,Tanh())
Exemple #5
0
 def __unpool_main(self, mesh_index, unroll_target):
     mesh = self.__meshes[mesh_index]
     queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.faces_count],
                                mesh.faces_count, mesh)
     face_groups = MeshUnion(mesh.faces_count, self.__fe.device)
     vs_groups = MeshUnion(self.v_count[mesh_index], self.__fe.device)
     not_split = np.ones(mesh.faces_count, dtype=np.bool)
     while mesh.faces_count + 6 < unroll_target:
         # print("face count " + str(mesh.faces_count))
         value, face_id = heappop(queue)
         face_id = int(face_id)
         if self.check_valid(mesh, face_id) and not_split[face_id]:
             not_split = self.__unpool_face(mesh, mesh_index, face_id,
                                            face_groups, vs_groups,
                                            not_split)
     mesh.pool_count -= 1
     mask = np.ones(mesh.faces_count, dtype=np.bool)
     # mesh.export(name='unpool')
     fe = face_groups.rebuild_features(self.__fe[mesh_index], mask,
                                       self.unroll_target)
     padding_b = self.vs_target - vs_groups.groups.shape[1]
     if padding_b > 0:
         padding_b = ConstantPad2d((0, padding_b), 0)
         vs_groups.groups = padding_b(vs_groups.groups)
     vs = vs_groups.rebuild_vs_average(self.__vs[mesh_index],
                                       self.vs_target)
     self.__updated_fe[mesh_index] = fe
     self.__updated_vs[mesh_index] = vs
 def prepare_groups(self, features, mask):
     tensor_mask = torch.from_numpy(mask)
     self.groups = torch.clamp(self.groups[tensor_mask, :], 0, 1).transpose_(1, 0)
     padding_a = features.shape[1] - self.groups.shape[0]
     if padding_a > 0:
         padding_a = ConstantPad2d((0, 0, 0, padding_a), 0)
         self.groups = padding_a(self.groups)
def transform_patch(adv_patch, angle, scale):
    # angle converteren naar radialen
    print(scale)
    angle = math.pi / 180 * angle
    # maak een mask die ook getransformeerd kan worden (om de padding later te kunnen verwijderen)
    mask = torch.ones(adv_patch.size())
    # pad adv_patch zodat de gedraaide adv_patch niet buiten de grenzen van de afbeelding valt
    p_height = adv_patch.size(2)
    p_width = adv_patch.size(3)
    padding = (math.sqrt((p_height / 2)**2 +
                         (p_width / 2)**2) - max(p_height, p_width) / 2) * abs(
                             np.sin(2 * angle)) + (scale - 1) / 4 * p_height
    print('padding', padding)
    mypad = ConstantPad2d(math.ceil(padding * scale), 0)
    padded_patch = mypad(adv_patch)
    padded_mask = mypad(mask)
    # construeer een affine_grid dat de transformatie zal uitvoeren
    theta = torch.zeros(1, 2, 3)
    theta[:, :, :2] = torch.FloatTensor([[np.cos(angle),
                                          np.sin(angle)],
                                         [-np.sin(angle),
                                          np.cos(angle)]])
    theta[:, :, 2] = 0
    theta = theta / scale
    grid = F.affine_grid(theta, padded_patch.size())
    # voer de rotatie uit door grid_sample te doen
    rot_patch = F.grid_sample(padded_patch, grid, padding_mode='zeros')
    rot_mask = F.grid_sample(padded_mask, grid, padding_mode='zeros')
    print(rot_patch.shape)
    # zorg dat de padding naar waarde 2 gezet wordt
    rot_patch[rot_mask == 0] = 2
    return rot_patch
Exemple #8
0
    def forward(self, x):
        deep_out = self.reduced_deep_model(x)
        shallow_out = self.reduced_shallow_model(x)

        n_diff_deep_shallow = deep_out.size()[2] - shallow_out.size()[2]

        if n_diff_deep_shallow < 0:
            deep_out = ConstantPad2d((0, 0, -n_diff_deep_shallow, 0),
                                     0)(deep_out)
        elif n_diff_deep_shallow > 0:
            shallow_out = ConstantPad2d((0, 0, n_diff_deep_shallow, 0),
                                        0)(shallow_out)

        merged_out = th.cat((deep_out, shallow_out), dim=1)
        linear_out = self.final_conv(merged_out)
        softmaxed = nn.LogSoftmax(dim=1)(linear_out)
        squeezed = softmaxed.squeeze(3)
        return squeezed
Exemple #9
0
 def rebuild_vs_average(self, vs, target_vs):
     v = torch.matmul(self.groups, vs)
     occurrences = torch.sum(self.groups, 1)
     v = v / occurrences.unsqueeze(1)
     padding_b = target_vs - v.shape[0]
     if padding_b > 0:
         padding_b = ConstantPad2d((0, 0, 0, padding_b), 0)
         v = padding_b(v)
     return v
Exemple #10
0
 def rebuild_features_average(self, features, mask, target_edges):
     self.prepare_groups(features, mask)
     fe = torch.matmul(features.squeeze(-1), self.groups)
     occurrences = torch.sum(self.groups, 0).expand(fe.shape)
     fe = fe / occurrences
     padding_b = target_edges - fe.shape[1]
     if padding_b > 0:
         padding_b = ConstantPad2d((0, padding_b, 0, 0), 0)
         fe = padding_b(fe)
     return fe
Exemple #11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 edge_network,
                 *,
                 L=4,
                 make_bidirectional=False,
                 neighbor_nl=False):

        # add, because we do our own normalization
        super().__init__(aggr='add')

        self.L = L
        self.neighbor_nl = neighbor_nl
        self.edge_network = edge_network
        self.make_bidirectional = make_bidirectional

        self.in_channels = in_channels

        self.self_loop_weight = Parameter(torch.ones(L))
        self.lin = Linear(L * in_channels, out_channels)

        # two linear layers instead of one in case of extra per-message
        #   nonlinearity
        if self.neighbor_nl:
            self.lin = Linear(L * in_channels, 2 * out_channels)
            self.lin2 = Linear(2 * out_channels, out_channels)

        # expansion of in-channels in case of bidirectionality
        #   introduced inside the architecture itself
        if self.make_bidirectional:
            self.lin = Linear(2 * L * in_channels, out_channels)
            if self.neighbor_nl:
                # only need to redefine first layer,
                #   second remains the same
                self.lin = Linear(2 * L * in_channels, 2 * out_channels)

            # padding functions for the latent representations
            #   zeros appended to original, zeros prepended to
            #   carbon copy in other direction.
            self.padding_func1 = ConstantPad2d((0, L, 0, 0), 0)
            self.padding_func2 = ConstantPad2d((L, 0, 0, 0), 0)
Exemple #12
0
    def __init__(self):
        super(Net, self).__init__()

        self.cnnLayers = Sequential(
            # padding添加1层常数1,设定卷积核为2*2
            ConstantPad2d(1, 1),
            Conv2d(1, 1, kernel_size=2, stride=2, bias=True)
        )
        self.linearLayers = Sequential(
            Linear(9, 2)
        )
Exemple #13
0
 def __init__(self,input_nc,output_nc,norm_layer=None,use_dropout=False):
     """Initialize this network.
     
     Parameters:
         input_nc (int) -- the number of channels that the network'input.
         output_nc (int) -- the number of channels that the network'output.
         norm_layer (object) -- the normalization layer.
         use_drop (bool) -- if use dropout layer.
     """
     super(P2netGeneratorV3,self).__init__()
     self.input_nc=input_nc
     self.output_nc=output_nc
     self.activate=ReLU(True)
     self.pad=ConstantPad2d(1,0)
     self.fea_in=nn.Sequential(ConstantPad2d(4,0),nn.Conv2d(self.input_nc,64,9),self.activate)
     self.rb1=nn.Sequential(self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate)
     self.rb2=nn.Sequential(self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate)
     self.cat=nn.Sequential(self.pad,nn.Conv2d(128,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate)
     self.rb3=nn.Sequential(self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate)
     self.rb4=nn.Sequential(self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate)
     self.after=nn.Sequential(self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,64,3),self.activate,self.pad,nn.Conv2d(64,1,3),Tanh())
Exemple #14
0
 def __init__(self,input_nc,output_nc,norm_layer=None,use_dropout=False):
     super(P2netGeneratorV2, self).__init__()
           
     lkReLU=Tanh()
     rpad=ConstantPad2d(1,0)
     
     self.prel=nn.Sequential(rpad,nn.Conv2d(input_nc,16,3),lkReLU,nn.Conv2d(16,8,1),lkReLU,rpad,nn.Conv2d(8,32,3),lkReLU,nn.Conv2d(32,16,1),lkReLU,rpad,nn.Conv2d(16,64,3),lkReLU)
     self.prer=nn.Sequential(rpad,nn.Conv2d(input_nc,16,3),lkReLU,nn.Conv2d(16,8,1),lkReLU,rpad,nn.Conv2d(8,32,3),lkReLU,nn.Conv2d(32,16,1),lkReLU,rpad,nn.Conv2d(16,64,3),lkReLU)
     
     self.rdp1=nn.Sequential(nn.Conv2d(128,64,1),lkReLU,nn.Conv2d(64,64,1),lkReLU,rpad,nn.Conv2d(64,192,3),lkReLU)
     self.rdp2=nn.Sequential(nn.Conv2d(224,128,1),lkReLU,rpad,nn.Conv2d(128,256,3),lkReLU)
     self.rdp3=nn.Sequential(nn.Conv2d(320,224,1),lkReLU,rpad,nn.Conv2d(224,384,3),lkReLU)
     
     self.dd=nn.Sequential(nn.Conv2d(512,128,1),lkReLU,nn.Conv2d(128,128,1),lkReLU,rpad,nn.Conv2d(128,64,3),lkReLU,rpad,nn.Conv2d(64,32,3),lkReLU,rpad,nn.Conv2d(32,1,3),lkReLU)
Exemple #15
0
    def rebuild_features_average(self, features, mask, target_edges):
        self.prepare_groups(features, mask)

        self.groups = self.groups.to(self.device)
        fe = torch.matmul(self.groups.transpose(0, 1),
                          features.squeeze(-1).transpose(1,
                                                         0)).transpose(0, 1)
        occurrences = torch.sparse.sum(self.groups, 0).to_dense()
        fe = fe / occurrences
        padding_b = target_edges - fe.shape[1]
        if padding_b > 0:
            padding_b = ConstantPad2d((0, padding_b, 0, 0), 0)
            fe = padding_b(fe)
        return fe
def create_MNIST_model():
    feature_model = nn.Sequential(
        ConstantPad2d((2, 2, 2, 2), 0),
        SubsampleSplitter(stride=2, checkerboard=True),
        rev_block(4, 25),
        rev_block(4, 25),
        SubsampleSplitter(stride=2, checkerboard=True),
        rev_block(16, 50),
        rev_block(16, 50),
        SubsampleSplitter(stride=2, checkerboard=True),
        rev_block(64, 100),
        rev_block(64, 100),
        SubsampleSplitter(stride=2, checkerboard=True),
        rev_block(256, 200),
        rev_block(256, 200),
        ViewAs((-1, 256, 2, 2), (-1, 256 * 2 * 2)), )
    return feature_model
Exemple #17
0
def pad2D(x, max_seq_count):
    '''
    pad first dimension (sequence count) of documents
    '''

    # current sentence count
    seq_count = x.shape[0]

    # append zeros, if sequence count too low
    if seq_count < max_seq_count:
        padding_back = max_seq_count - seq_count
        pad = ConstantPad2d((0, 0, 0, padding_back), 0)
        x = pad(x)

    # truncate document
    elif seq_count > max_seq_count:
        x = x[:max_seq_count]

    return x
Exemple #18
0
    def rebuild_features_average(self, features, mask, target_edges):
        self.prepare_groups(features, mask)

        if not self.sparse_groups.is_coalesced():
            self.sparse_groups = self.sparse_groups.coalesce()
        idxs, values = transpose(self.sparse_groups._indices(),
                                 self.sparse_groups._values(),
                                 self.sparse_groups.shape[0],
                                 self.sparse_groups.shape[1])
        fe = spmm(idxs, values, self.sparse_groups.shape[1],
                  self.sparse_groups.shape[0],
                  features.squeeze(-1).T).T

        # fe = torch.matmul(features.squeeze(-1), self.groups)
        #occurrences = torch.sum(self.groups, 0).expand(fe.shape)
        occurrences = torch.sparse.sum(self.sparse_groups,
                                       0).to_dense().expand(fe.shape)
        fe = fe / occurrences
        padding_b = target_edges - fe.shape[1]
        if padding_b > 0:
            padding_b = ConstantPad2d((0, padding_b, 0, 0), 0)
            fe = padding_b(fe)
        return fe
    def test_2d(self):
        pad = ConstantPad2d((5, 0, 0, 0), 0)
        x = Variable(torch.ones((2, 3, 4, 5)))

        res = pad(x)
        print(res.size())