Esempio n. 1
0
 def _lr_callback_init_policies(self, classifier_module, policy, instance,
                                **kwargs):
     X, y = make_classification(1000, 20, n_informative=10, random_state=0)
     X = X.astype(np.float32)
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(classifier_module,
                               max_epochs=2,
                               callbacks=[lr_policy])
     net.fit(X, y)
     assert any(
         list(
             map(
                 lambda x: isinstance(getattr(x[1], '_lr_scheduler', None),
                                      instance), net.callbacks_)))
Esempio n. 2
0
    def test_lr_callback_batch_steps_correctly(
            self,
            classifier_module,
            classifier_data,
            policy,
            kwargs,
    ):
        num_examples = 1000
        batch_size = 100
        max_epochs = 2

        X, y = classifier_data
        lr_policy = LRScheduler(policy, **kwargs)
        net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
                                  batch_size=batch_size, callbacks=[lr_policy])
        net.fit(X, y)
        expected = (num_examples // batch_size) * max_epochs
        # pylint: disable=protected-access
        assert lr_policy.lr_scheduler_.last_batch_idx == expected
Esempio n. 3
0
 def test_lr_callback_steps_correctly(
         self,
         classifier_module,
         classifier_data,
         policy,
         kwargs,
 ):
     max_epochs = 2
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module(),
         max_epochs=max_epochs,
         batch_size=16,
         callbacks=[lr_policy],
     )
     net.fit(X, y)
     # pylint: disable=protected-access
     assert lr_policy.lr_scheduler_.last_epoch == max_epochs - 1
Esempio n. 4
0
 def test_lr_callback_init_policies(
         self,
         classifier_module,
         classifier_data,
         policy,
         instance,
         kwargs,
 ):
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module, max_epochs=2, callbacks=[lr_policy]
     )
     net.fit(X, y)
     assert any(list(map(
         lambda x: isinstance(
             getattr(x[1], 'lr_scheduler_', None), instance),
         net.callbacks_
     )))
Esempio n. 5
0
 def test_lr_callback_steps_correctly(
     self,
     classifier_module,
     classifier_data,
     policy,
     kwargs,
 ):
     max_epochs = 2
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module(),
         max_epochs=max_epochs,
         batch_size=16,
         callbacks=[lr_policy],
     )
     net.fit(X, y)
     # pylint: disable=protected-access
     assert lr_policy.lr_scheduler_.last_epoch == max_epochs - 1
Esempio n. 6
0
 def test_lr_callback_init_policies(
     self,
     classifier_module,
     classifier_data,
     policy,
     instance,
     kwargs,
 ):
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(classifier_module,
                               max_epochs=2,
                               callbacks=[lr_policy])
     net.fit(X, y)
     assert any(
         list(
             map(
                 lambda x: isinstance(getattr(x[1], 'lr_scheduler_', None),
                                      instance), net.callbacks_)))
Esempio n. 7
0
    def test_lr_callback_batch_steps_correctly(
        self,
        classifier_module,
        classifier_data,
        policy,
        kwargs,
    ):
        num_examples = 1000
        batch_size = 100
        max_epochs = 2

        X, y = classifier_data
        lr_policy = LRScheduler(policy, **kwargs)
        net = NeuralNetClassifier(classifier_module(),
                                  max_epochs=max_epochs,
                                  batch_size=batch_size,
                                  callbacks=[lr_policy])
        net.fit(X, y)
        expected = (num_examples // batch_size) * max_epochs
        # pylint: disable=protected-access
        assert lr_policy.lr_scheduler_.last_batch_idx == expected
Esempio n. 8
0
    def __init__(self):
        super().__init__()
        self.layer1 = torch.nn.Linear(21, 17)
        self.layer2 = torch.nn.Linear(17, 13)
        self.layer3 = torch.nn.Linear(13, 9)
        self.layer4 = torch.nn.Linear(9, 5)
        self.dropout = torch.nn.Dropout(0.5)

    def forward(self, x):
        out1 = F.relu(self.layer1(x))
        out1 = self.dropout(out1)
        out2 = F.relu(self.layer2(out1))
        out3 = F.relu(self.layer3(out2))
        y_pred = F.softmax(self.layer4(out3))

        return y_pred.float()


net = NeuralNetClassifier(
    Model,
    max_epochs=1000,
    lr=0.01
    # use_cuda=True,  # uncomment this to train with CUDA
)

pipe = Pipeline([
    ('scale', StandardScaler()),
    ('net', net),
])

pipe.fit(X, y)
        x = x.view(in_size, -1)  # flatten the tensor

        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)

        x = F.softmax(x)

        return x


nnet = NeuralNetClassifier(
    module=Net,
    max_epochs=10,
    lr=0.06,
    criterion=nn.NLLLoss,
    optimizer=torch.optim.SGD,
    optimizer_momentum=0.9,
)
# nnet=Net()
# print(X)
# print(Ytrain)

X = X.astype(np.float32)
Ytrain = Ytrain.astype(np.int64)

print(type(X))
print(type(Ytrain))

print(type(X[0]))
print(type(X[0][0]))
Esempio n. 10
0
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, x.size(1) * x.size(2) * x.size(3)) # flatten over channel, height and width = 1600
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.softmax(x, dim=-1)
        return x
    
cnn = NeuralNetClassifier(
    Cnn,
    max_epochs=8,
    lr=1,
    optimizer=torch.optim.Adadelta,
    # device='cuda',  # uncomment this to train with CUDA
)
#train the module
cnn.fit(XCnn_train, y_train);
#Use validation set to see the accuracy
cnn_pred = cnn.predict(XCnn_test)
print(np.mean(cnn_pred == y_test))
#predict the test set
cnn_pred_test = cnn.predict(test)


# In[80]:

#write to .csv file
Esempio n. 11
0
class Net(nn.Module):
	def __init__(self):
		super(Net, self).__init__()
		self.fc1 = nn.Linear(28 * 28,num)
		self.fc2 = nn.Linear(num, 20)

	def forward(self, X, **kwargs):
		X = F.sigmoid(self.fc1(X))
		X = F.softmax(self.fc2(X))
		return X


nnet=NeuralNetClassifier(
    Net,
    max_epochs=20,
    lr=0.0165,
   	criterion=nn.NLLLoss,
   	optimizer=torch.optim.SGD,
)

print(X)
print(Ytrain)

X = X.astype(np.float32)
Ytrain = Ytrain.astype(np.int64)


print(type(X))
print(type(Ytrain))

print(type(X[0]))
Esempio n. 12
0
module1 = MyModule1()
module2 = MyModule2()

#这样子,我先把下面的东西理顺,再说和上面相结合的问题咯。
net = NeuralNetClassifier(
    module=module1,
    lr=0.1,
    #原来这里并不是GPU而是cuda呀S
    #好像pytorch版本框架二报错是因为cuda
    device="cuda",
    #device="cpu",
    max_epochs=10,
    #criterion=torch.nn.NLLLoss,
    optimizer=torch.optim.Adam,
    criterion=torch.nn.CrossEntropyLoss,
    #http://skorch.readthedocs.io/en/latest/user/neuralnet.html
    #上面的链接中的callback的部分讲解了如何传入自己的callback
    #然后查看net.py中的_default_callbacks函数能够知道如何实现函数
    #最后一个细节是如何打断训练,我现在查阅过max_epochs以及KeyboardInterrupt并不知如何代码打断训练
    #http://skorch.readthedocs.io/en/latest/callbacks.html上面有skorch.callbacks.EarlyStopping
    #然而我这里并不能够发现这个库,可能是库的版本的问题,我运行skorch的安装命令就直接更新到最新版本了
    #果然是更新到最新版本就可以了,从此妈妈再也不用担心我的max_epochs的需要枚举咯
    #按照时间推算的话,这个skroch版本的应该是最近一个月才出来的吧,花了这么多时间总算搞定这个问题了,看看代码就完事儿了
    #说真的这个东西让我自己实现起来还是挺麻烦的,主要是对于这些代码或者库的机制不够熟悉,比如说为何Checkpoint要实现_sink
    callbacks=[skorch.callbacks.EarlyStopping(patience=5)])

#确定流程是否达到预期而非仅仅是能够运行
#选择合适的参数进行数据集增强最终提交模型预测结果(可能涉及到模型的保存)
#以后将特征工程进行封装形成框架三吧,这样子的话数据过来自动特征工程,然后自动数据集增强并提交结果
params = {
Esempio n. 13
0
epochs = 200
batch_size = 16

class Mydnn(nn.Module):
  def __init__(self, time_window=2, stride=2, n_elec=64):
    super(Mydnn, self).__init__()
    self.wavelet = Wavelet_cnn(time_window, stride)
    self.conv1 = nn.Conv2d(n_elec, 128, (25, 2))
    self.conv2 = nn.Conv2d(128, 256, (25, 2))
    self.conv3 = nn.Conv2d(256, 512, (25, 2))
    self.pool = nn.AvgPool2d((2, 1))
    self.conv_class = nn.Conv2d(512, 2, (1, 1))
    self.pool_class = nn.AdaptiveAvgPool2d(output_size=(1, 1))

  def forward(self, x):
    scalegram, _ = self.wavelet(x)
    scalegram = scalegram ** 2 # power
    scalegram = scalegram.transpose(1, 3) # (batch, elec, seq, level)
    h = self.pool(F.relu(self.conv1(scalegram)))
    h = self.pool(F.relu(self.conv2(h)))
    h = self.pool(F.relu(self.conv3(h)))
    h = F.softmax(self.pool_class(self.conv_class(h)).squeeze())
    return h

model = NeuralNetClassifier(module=Mydnn,
                            max_epochs=100,
                            lr=1e-4)

model.fit(X_train, y_train)