Пример #1
0
    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.pool1 = Pool(2, device=self.device)

        self.fc1 = Linear(6*12*12, 100, noise_std=1e-0, act='TanH', device=self.device)
        self.act2 = Activation('TanH')
        self.fc2 = Linear(100, 10, noise_std=1e-0, act='TanH', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.fc1, self.fc2]
Пример #2
0
 def __init__(self, dataset, config, downsample_matrices, upsample_matrices,
              adjacency_matrices, num_nodes):
     super(Coma, self).__init__()
     self.n_layers = config['n_layers']
     self.filters = config['num_conv_filters']
     self.filters.insert(
         0, dataset.num_features)  # To get initial features per node
     self.K = config['polygon_order']
     self.z = config['z']
     self.downsample_matrices = downsample_matrices
     self.upsample_matrices = upsample_matrices
     self.adjacency_matrices = adjacency_matrices
     self.A_edge_index, self.A_norm = zip(*[
         ChebConv_Coma.norm(self.adjacency_matrices[i]._indices(),
                            num_nodes[i]) for i in range(len(num_nodes))
     ])
     self.cheb = torch.nn.ModuleList([
         ChebConv_Coma(self.filters[i], self.filters[i + 1], self.K[i])
         for i in range(len(self.filters) - 2)
     ])
     self.cheb_dec = torch.nn.ModuleList([
         ChebConv_Coma(self.filters[-i - 1], self.filters[-i - 2],
                       self.K[i]) for i in range(len(self.filters) - 1)
     ])
     self.cheb_dec[-1].bias = None  # No bias for last convolution layer
     self.pool = Pool()
     self.enc_lin_mu = torch.nn.Linear(
         self.downsample_matrices[-1].shape[0] * self.filters[-1], self.z)
     self.enc_lin_logvar = torch.nn.Linear(
         self.downsample_matrices[-1].shape[0] * self.filters[-1], self.z)
     self.dec_lin = torch.nn.Linear(
         self.z, self.filters[-1] * self.upsample_matrices[-1].shape[1])
     self.reset_parameters()
    def __init__(self, nstack=1, inp_dim=256, oup_dim=6, bn=False, increase=0):
        super(DiscConfNet, self).__init__()

        self.nstack = nstack
        self.pre = nn.Sequential(
            Conv(6, 64, 7, 2, bn=True,
                 relu=True),  ##  in place of 15  3 was there
            Residual(64, 128),
            Pool(2, 2),
            Residual(128, 128),
            Residual(128, inp_dim))

        self.hgs = nn.ModuleList([
            nn.Sequential(Hourglass(4, inp_dim, bn, increase), )
            for i in range(nstack)
        ])

        self.features = nn.ModuleList([
            nn.Sequential(Residual(inp_dim, inp_dim),
                          Conv(inp_dim, inp_dim, 1, bn=True, relu=True))
            for i in range(nstack)
        ])

        self.outs = nn.ModuleList([
            Conv(inp_dim, oup_dim, 1, relu=False, bn=False)
            for i in range(nstack)
        ])
        self.merge_features = nn.ModuleList(
            [Merge(inp_dim, inp_dim) for i in range(nstack - 1)])
        self.merge_preds = nn.ModuleList(
            [Merge(oup_dim, inp_dim) for i in range(nstack - 1)])
        self.nstack = nstack
        self.fc1 = nn.Linear(64 * 64 * 6, 6)
Пример #4
0
    def __init__(self, dataset, config, downsample_matrices, upsample_matrices, adjacency_matrices, num_nodes, V_ref):
        super(ComaAtt, self).__init__()
        self.n_layers = config['n_layers']
        self.filters_enc = config['filter_enc']
        self.filters_dec = config['filter_dec']
        self.K = config['polygon_order']
        self.z = config['z']
        self.downsample_att = config['downsample_att']
        self.upsample_att = config['upsample_att']
        self.downsample_matrices = downsample_matrices
        self.upsample_matrices = upsample_matrices
        self.adjacency_matrices = adjacency_matrices
        self.A_edge_index, self.A_norm = zip(*[ChebConv_Coma.norm(self.adjacency_matrices[i]._indices(),
                                                                  num_nodes[i]) for i in range(len(num_nodes))])
        
        self.cheb_enc = torch.nn.ModuleList([ChebConv_Coma(self.filters_enc[i], self.filters_enc[i+1], self.K[i])
                                             for i in range(len(self.filters_enc)-1)])
        self.cheb_dec = torch.nn.ModuleList([ChebConv_Coma(self.filters_dec[i], self.filters_dec[i+1], self.K[i])
                                             for i in range(len(self.filters_dec)-1)])
        self.cheb_dec[-1].bias = None  # No bias for last convolution layer
        self.pool = Pool()
        self.enc_lin = torch.nn.Linear(self.downsample_matrices[-1].shape[0]*self.filters_enc[-1], self.z)
        self.dec_lin = torch.nn.Linear(self.z, self.filters_dec[0]*self.upsample_matrices[-1].shape[1])

        if self.upsample_att is True or self.downsample_att is True:
            self.attpoolenc, self.attpooldec = self.init_attpool(config, V_ref)
            self.attpoolenc = torch.nn.ModuleList(self.attpoolenc)
            self.attpooldec = torch.nn.ModuleList(self.attpooldec)
        self.reset_parameters()
Пример #5
0
class ShallowConvNet(Net):

    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='TanH', device=self.device)
        self.act1 = Activation('TanH')
        self.pool1 = Pool(2, device=self.device)

        self.fc1 = Linear(6*12*12, 100, noise_std=1e-0, act='TanH', device=self.device)
        self.act2 = Activation('TanH')
        self.fc2 = Linear(100, 10, noise_std=1e-0, act='TanH', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.fc1, self.fc2]

    def forward(self, input):
        conv_out_1 = self.conv1.forward(input)
        act_out_1 = self.act1.forward(conv_out_1)
        pool_out_1 = self.pool1.forward(act_out_1)

        pool_out_1 = pool_out_1.reshape(len(pool_out_1), -1)

        fc_out_1 = self.fc1.forward(pool_out_1)
        act_out_2 = self.act2.forward(fc_out_1)
        fc_out_2 = self.fc2.forward(act_out_2)
        output = self.softmax.forward(fc_out_2)

        return output
 def __init__(self, nstack, inp_dim, oup_dim, bn=False, increase=0):
     super(PoseNet, self).__init__()
     
     self.nstack = nstack
     self.pre = nn.Sequential(
         Conv(3, 64, 7, 2, bn=True, relu=True),
         Residual(64, 128),
         Pool(2, 2),
         Residual(128, 128),
         Residual(128, inp_dim)
     )
     
     self.hgs = nn.ModuleList( [
     nn.Sequential(
         Hourglass(4, inp_dim, bn, increase),
     ) for i in range(nstack)] )
     
     self.features = nn.ModuleList( [
     nn.Sequential(
         Residual(inp_dim, inp_dim),
         Conv(inp_dim, inp_dim, 1, bn=True, relu=True)
     ) for i in range(nstack)] )
     
     self.outs = nn.ModuleList( [Conv(inp_dim, oup_dim, 1, relu=False, bn=False) for i in range(nstack)] )
     self.merge_features = nn.ModuleList( [Merge(inp_dim, inp_dim) for i in range(nstack-1)] )
     self.merge_preds = nn.ModuleList( [Merge(oup_dim, inp_dim) for i in range(nstack-1)] )
     self.nstack = nstack
Пример #7
0
    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act1 = Activation('ReLU')
        self.pool1 = Pool(2, device=self.device)

        self.conv2 = Conv(6, 16, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act2 = Activation('ReLU')
        self.pool2 = Pool(2, device=self.device)

        self.fc1 = Linear(256, 120, noise_std=1e-0, act='ReLU', device=self.device)
        self.act3 = Activation('ReLU')
        self.fc2 = Linear(120, 84, noise_std=1e-0, act='ReLU', device=self.device)
        self.act4 = Activation('ReLU')
        self.fc3 = Linear(84, 10, noise_std=1e-0, act='ReLU', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.conv2, self.fc1, self.fc2, self.fc3]
Пример #8
0
class LeNet5(Net):

    def __init__(self, device=torch.device('cuda:0')):
        super().__init__(device)

        self.conv1 = Conv(1, 6, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act1 = Activation('ReLU')
        self.pool1 = Pool(2, device=self.device)

        self.conv2 = Conv(6, 16, kernel_size=5, noise_std=1e-0, act='ReLU', device=self.device)
        self.act2 = Activation('ReLU')
        self.pool2 = Pool(2, device=self.device)

        self.fc1 = Linear(256, 120, noise_std=1e-0, act='ReLU', device=self.device)
        self.act3 = Activation('ReLU')
        self.fc2 = Linear(120, 84, noise_std=1e-0, act='ReLU', device=self.device)
        self.act4 = Activation('ReLU')
        self.fc3 = Linear(84, 10, noise_std=1e-0, act='ReLU', device=self.device)
        self.softmax = Activation('Softmax')

        self.layers = [self.conv1, self.conv2, self.fc1, self.fc2, self.fc3]

    def forward(self, input):
        conv_out_1 = self.conv1.forward(input)
        act_out_1 = self.act1.forward(conv_out_1)
        pool_out_1 = self.pool1.forward(act_out_1)

        conv_out_2 = self.conv2.forward(pool_out_1)
        act_out_2 = self.act2.forward(conv_out_2)
        pool_out_2 = self.pool2.forward(act_out_2)

        pool_out_2 = pool_out_2.reshape(len(pool_out_2), -1)

        fc_out_1 = self.fc1.forward(pool_out_2)
        act_out_3 = self.act3.forward(fc_out_1)
        fc_out_2 = self.fc2.forward(act_out_3)
        act_out_4 = self.act4.forward(fc_out_2)
        fc_out_3 = self.fc3.forward(act_out_4)
        output = self.softmax.forward(fc_out_3)

        return output
Пример #9
0
NN = FF([LSTMFullCon(Tanh, Sigmoid, Softmax, (4, 50, 4))], RNNCrossEntropy)
NN.SGD(train, eta=1e-3)
#%% RNN: WORKS
NN = FF([RecurrentFullCon(Tanh, Softmax, (4, 50, 4))], RNNCrossEntropy)
NN.SGD(train, eta=1e-4)
#%% Networks: WORKS
NNS = [
    FF([FullCon(Sigmoid, (28 * 28, 30)),
        FullCon(Sigmoid, (30, 10))], CrossEntropy) for i in range(3)
]
NN = FF([Networks(Sigmoid, NNS)], CrossEntropy)
NN.SGD(part_train)
#%% Conv+Pool+Dropout: WORKS
NN = FF([
    Conv(Identity, (25, 3)),
    Pool('mean', (3, -1), (2, 2)),
    Dropout('binomial', 0.9),
    FullCon(Sigmoid, (3 * 13 * 13, 10))
], CrossEntropy)
NN.SGD(part_train)
#%% FullCon Swish: WORKS
b = 10.
NN = FF([FullCon(Swish(b), (28 * 28, 30)),
         FullCon(Swish(b), (30, 10))], CrossEntropy)
NN.SGD(part_train)
#%% FullCon: WORKS
NN = FF([FullCon(Sigmoid, (28 * 28, 30)),
         FullCon(Sigmoid, (30, 10))], CrossEntropy)
NN.SGD(part_train)
#%% TO DO
# Expand dataset