示例#1
0
    def __init__(self, D_in, H, D_out):

        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(3 * 32 * 32, 400)
        self.linear1.weight.data.normal_(0, 0.001)
        self.linear1.bias.data.fill_(0)
        self.batch_norm = CustomBatchNormAutograd(400)
        self.linear2 = torch.nn.Linear(400, 200)
        self.linear2.weight.data.normal_(0, 0.001)
        self.linear2.bias.data.fill_(0)
        self.batch_norm1 = CustomBatchNormAutograd(200)
        ##self.linear3=torch.nn.Linear(200,100)
        self.linear4 = torch.nn.Linear(200, 10)
        self.linear4.weight.data.normal_(0, 0.0001)
        self.linear4.bias.data.fill_(0)
示例#2
0
 def test_autograd(self):
   np.random.seed(42)
   torch.manual_seed(42)
   for test_num in range(10):
     n_batch = int(np.random.choice(range(32, 128)))
     n_neurons = int(np.random.choice(range(1, 10)))
     x = 2 * torch.randn(n_batch, n_neurons, requires_grad=True) + 10
     bn_auto = CustomBatchNormAutograd(n_neurons)
     y_auto = bn_auto(x)
     self.assertLess(np.max(np.abs(y_auto.mean(dim=0).data.numpy())), 1e-5)
     self.assertLess(np.max(np.abs(y_auto.var(dim=0).data.numpy() - 1)), 1e-1)
示例#3
0
文件: mlp_pytorch.py 项目: frank/dl
    def __init__(self,
                 n_inputs,
                 n_hidden,
                 n_classes,
                 dropout=0.,
                 batchnorm=False):
        """
        Initializes MLP object.

        Args:
          n_inputs: number of inputs.
          n_hidden: list of ints, specifies the number of units
                    in each linear layer. If the list is empty, the MLP
                    will not have any linear layers, and the model
                    will simply perform a multinomial logistic regression.
          n_classes: number of classes of the classification problem.
                     This number is required in order to specify the
                     output dimensions of the MLP
        """
        super(MLP, self).__init__()
        # list with all of the sizes between layers
        self.layer_sizes = [n_inputs] + n_hidden + [n_classes]

        # list to append all layers to
        layers = []
        for layer_n in range(len(self.layer_sizes) - 1):
            if layer_n < (len(self.layer_sizes) - 2):
                # every hidden layer also gets a ReLU nonlinearity
                layers.append(
                    nn.Linear(self.layer_sizes[layer_n],
                              self.layer_sizes[layer_n + 1]))
                layers.append(nn.ReLU())
                layers.append(
                    CustomBatchNormAutograd(self.layer_sizes[layer_n + 1]))
                if dropout > 0.:
                    layers.append(nn.Dropout(0.2))
            else:
                # the output layer gets no ReLU
                layers.append(
                    nn.Linear(self.layer_sizes[layer_n],
                              self.layer_sizes[layer_n + 1]))
        self.model = nn.Sequential(*layers)