示例#1
0
 def __init__(self):
     """
     hidden_layer_num: 隐藏层数目
     hidden_size: 隐藏层大小
     classes_num: 分类数目
     """
     super(MLP, self).__init__()
     self.LossFunc = FocalLoss()
     self.layers = [
         Linear(256, 256),
         Batchnorm(256),
         ReLU(),
         Dropout(0.5),
         Linear(256, 128),
         Batchnorm(128),
         ReLU(),
         Dropout(0.5),
         Linear(128, 128),
         Batchnorm(128),
         ReLU(),
         Dropout(0.5),
         Linear(128, 4),
         Sigmoid(), self.LossFunc
     ]
     self.parameters = {}
     for l in self.layers:
         if hasattr(l, 'parameters'):
             self.parameters[id(l)] = l.parameters
示例#2
0
 def __init__(self):
     super().__init__(CrossEntropy, learning_rate=0.05)
     self.a1 = ReLu(self)
     self.a2 = SoftMax(self)
     self.l1 = Linear(self, 784, 256)
     self.l2 = Linear(self, 256, 120)
     self.l3 = Linear(self, 120, 64)
     self.l4 = Linear(self, 64, 10)
示例#3
0
 def layer(name, dim1, dim2, ind):
     if "input" == name:
         assert False, "input layer must be add-hot pluged to neural net at forward pass per new input!"
     if "lin" == name:
         return Linear(dim1, dim2, ind)
     if "sig" == name:
         return Sigmoid(dim1, dim2, ind)
     if "relu" == name:
         return ReLU(dim1, dim2, ind)
     if "ass" == name:
         return AntiSaturationSig(dim1, ind)
     assert False, "space-layer : <{}> not implemented!".format(name)
     return None
示例#4
0
 def __init__(self):
     super().__init__(MSE, learning_rate=0.02)
     self.a1 = Passive(self)
     self.l1 = Linear(self, 1, 5)
     self.l2 = Linear(self, 5, 1)