Ejemplo n.º 1
0
    def _build_nn(self):

        layers = []

        if self.num_hidden_layers == 0:
            layers.append(torch.nn.Linear(self.num_inputs, self.num_outputs))

        else:
            layers.append(
                torch.nn.Linear(self.num_inputs,
                                self.neurons_per_hidden_layer))
            #Hidden layers have ReLU activation
            layers.append(torch.nn.ReLU())

            for i in range(self.num_hidden_layers - 1):
                layers.append(
                    torch.nn.Linear(self.neurons_per_hidden_layer,
                                    self.neurons_per_hidden_layer))
                layers.append(torch.ReLU())

            layers.append(
                torch.nn.Linear(self.neurons_per_hidden_layer,
                                self.num_outputs))

        #Final layer goes through a softmax
        layers.append(torch.nn.Softmax(dim=0))

        self.nn = torch.nn.Sequential(*layers).double()
Ejemplo n.º 2
0
 def __init__(self, num_classes = 10):
     super(AlexNet, self).__init__()
     self.features = torch.nn.Sequential(torch.nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2),
                                         torch.nn.ReLU(inplace=True),
                                         torch.nn.MaxPool2d(kernel_size=3, stride=1),
                                         torch.nn.Conv2d(64, 192, kernel_size=3, padding=2),
                                         torch.nn.ReLU(inplace=True),
                                         torch.nn.MaxPool2d(kernel_size=3, stride=2),
                                         torch.nn.Conv2d(192, 384, kernel_size=3, padding=1),
                                         torch.nn.ReLU(inplace=True),
                                         torch.nn.Conv2d(384, 256, kernel_size=3, padding=1),
                                         torch.nn.ReLU(inplace=True),
                                         torch.nn.Conv2d(256, 256, kernel_size=3, padding=1),
                                         torch.nn.ReLU(inplace=True),
                                         torch.nn.MaxPool2d(kernel_size=3, stride=2))
     self.classifier = torch.nn.Sequential(torch.nn.Dropout(),
                                           torch.nn.Linear(256 * 6 * 6, 4096),
                                           torch.nn.ReLU(inplace=True),
                                           torch.nn.Dropout(),
                                           torch.nn.Linear(4096, 4096),
                                           torch.ReLU(inplace=True),
                                           torch.nn.Linear(4096, num_classes))
Ejemplo n.º 3
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 k=16,
                 n_layers=5,
                 activation=nn.ReLU(),
                 bias=True):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        module = nn.ModuleList()

        module.append(DoubleLayer(in_dim, k, bias=bias))
        module.append(activation)

        for ll in range(n_layers - 1):
            module.append(DoubleLayer(k, k, bias=bias))
            module.append(activation)

        module.append(DoubleLayer(k, 2, bias=bias))
        module.append(torch.ReLU())
        module.append(nn.Linear(2, 1, bias=False))
        self.sequential = nn.Sequential(*module)
        self.set_weights()
Ejemplo n.º 4
0
 def __forward__(self, input):
     out = self.layer1(input)
     out = nn.ReLU(out)
     out = self.layer2(out)
     return out