Beispiel #1
0
 def _lr_callback_init_policies(self, classifier_module, policy, instance,
                                **kwargs):
     X, y = make_classification(1000, 20, n_informative=10, random_state=0)
     X = X.astype(np.float32)
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(classifier_module,
                               max_epochs=2,
                               callbacks=[lr_policy])
     net.fit(X, y)
     assert any(
         list(
             map(
                 lambda x: isinstance(getattr(x[1], '_lr_scheduler', None),
                                      instance), net.callbacks_)))
    def test_lr_callback_batch_steps_correctly(
            self,
            classifier_module,
            classifier_data,
            policy,
            kwargs,
    ):
        num_examples = 1000
        batch_size = 100
        max_epochs = 2

        X, y = classifier_data
        lr_policy = LRScheduler(policy, **kwargs)
        net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
                                  batch_size=batch_size, callbacks=[lr_policy])
        net.fit(X, y)
        expected = (num_examples // batch_size) * max_epochs
        # pylint: disable=protected-access
        assert lr_policy.lr_scheduler_.last_batch_idx == expected
 def test_lr_callback_steps_correctly(
         self,
         classifier_module,
         classifier_data,
         policy,
         kwargs,
 ):
     max_epochs = 2
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module(),
         max_epochs=max_epochs,
         batch_size=16,
         callbacks=[lr_policy],
     )
     net.fit(X, y)
     # pylint: disable=protected-access
     assert lr_policy.lr_scheduler_.last_epoch == max_epochs - 1
 def test_lr_callback_init_policies(
         self,
         classifier_module,
         classifier_data,
         policy,
         instance,
         kwargs,
 ):
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module, max_epochs=2, callbacks=[lr_policy]
     )
     net.fit(X, y)
     assert any(list(map(
         lambda x: isinstance(
             getattr(x[1], 'lr_scheduler_', None), instance),
         net.callbacks_
     )))
Beispiel #5
0
 def test_lr_callback_steps_correctly(
     self,
     classifier_module,
     classifier_data,
     policy,
     kwargs,
 ):
     max_epochs = 2
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(
         classifier_module(),
         max_epochs=max_epochs,
         batch_size=16,
         callbacks=[lr_policy],
     )
     net.fit(X, y)
     # pylint: disable=protected-access
     assert lr_policy.lr_scheduler_.last_epoch == max_epochs - 1
Beispiel #6
0
 def test_lr_callback_init_policies(
     self,
     classifier_module,
     classifier_data,
     policy,
     instance,
     kwargs,
 ):
     X, y = classifier_data
     lr_policy = LRScheduler(policy, **kwargs)
     net = NeuralNetClassifier(classifier_module,
                               max_epochs=2,
                               callbacks=[lr_policy])
     net.fit(X, y)
     assert any(
         list(
             map(
                 lambda x: isinstance(getattr(x[1], 'lr_scheduler_', None),
                                      instance), net.callbacks_)))
Beispiel #7
0
    def test_lr_callback_batch_steps_correctly(
        self,
        classifier_module,
        classifier_data,
        policy,
        kwargs,
    ):
        num_examples = 1000
        batch_size = 100
        max_epochs = 2

        X, y = classifier_data
        lr_policy = LRScheduler(policy, **kwargs)
        net = NeuralNetClassifier(classifier_module(),
                                  max_epochs=max_epochs,
                                  batch_size=batch_size,
                                  callbacks=[lr_policy])
        net.fit(X, y)
        expected = (num_examples // batch_size) * max_epochs
        # pylint: disable=protected-access
        assert lr_policy.lr_scheduler_.last_batch_idx == expected
# nnet=Net()
# print(X)
# print(Ytrain)

X = X.astype(np.float32)
Ytrain = Ytrain.astype(np.int64)

print(type(X))
print(type(Ytrain))

print(type(X[0]))
print(type(X[0][0]))
print(type(Ytrain[0]))

assert (len(X) == len(Ytrain))
nnet.fit(X, Ytrain)

from sklearn.model_selection import cross_val_predict

# ytrain_pred = cross_val_predict(nnet, X, Ytrain,cv=5)

print("Training done")

Ytrain_pred = nnet.predict(X)
correct = 0
for i in range(len(X)):
    if (Ytrain_pred[i] == Ytrain[i]):
        correct += 1

print(Ytrain_pred)
print("Training Accuracy={}%".format(correct / len(X) * 100))
Beispiel #9
0
        x = x.view(-1, x.size(1) * x.size(2) * x.size(3)) # flatten over channel, height and width = 1600
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.softmax(x, dim=-1)
        return x
    
cnn = NeuralNetClassifier(
    Cnn,
    max_epochs=8,
    lr=1,
    optimizer=torch.optim.Adadelta,
    # device='cuda',  # uncomment this to train with CUDA
)
#train the module
cnn.fit(XCnn_train, y_train);
#Use validation set to see the accuracy
cnn_pred = cnn.predict(XCnn_test)
print(np.mean(cnn_pred == y_test))
#predict the test set
cnn_pred_test = cnn.predict(test)


# In[80]:

#write to .csv file
ID = np.arange(1,20001)
ID = ID.tolist()
data = zip(ID,cnn_pred_test)
with open('CNN_v6.csv', 'w',newline='') as outfile:
    mywriter = csv.writer(outfile)
Beispiel #10
0
#我还以为是cv=3造成的比较好的结果,原来设置cv=10结果也不错的呀,神经网络远强于传统模型。。
#其实搞清楚了(随机)超参搜索的原理,以及本持者更多的数据大概率更好的结果,n_folds改为20.。
skf = StratifiedKFold(Y_split_train,
                      n_folds=20,
                      shuffle=True,
                      random_state=None)

#refit默认是True,直接使用默认值就好了,不用在RS设置refit=True。反倒是scoring='accuracy'可以设置一下
#因为scoring的默认值是none。如果设置为None的时候模型的默认scorer或者说metrics会被调用
#因为skorch版本的模型并没有默认的socring设置所以超参搜索的时候必须手动设置否则无法运行咯
#那么又有一个问题咯,就是net.score他妈的用的到底是啥准则呢?这个会不会是refit设置为False的理由呢?
#这个应该不是refit设置为False的理由吧,如果net.score没有准则或者无法设置准则的话超参搜索也无法设置准则的吧?
#这个n_iter对于其他模型的时候似乎是控制迭代次数,但是对于NeuralNetClassifier好像是计算了cv*n_iter次数咯。
#经过我的验证device设置为cuda确实比cpu快太多了,而且对于NeuralNetClassifier确实是计算了cv*n_iter次数咯。
#他妈的,可算是被我测出来了,这个随机超参搜索用同一个net计算cv然后选择最好的那个。。可能sklearn的超参就这么搞的,我才知道。。
"""
#我还以为是net的fit函数存在一定的问题,这个实验说明问题不在这里吧
net.fit(X_split_train.values.astype(np.float32), Y_split_train.values.astype(np.longlong))
Y_pred = net.predict(X_split_train.values.astype(np.float32))
counts = (Y_pred==Y_split_train).sum()
print("准确率为:",counts/len(Y_split_train)) 
"""

#感觉除了层数和每层隐节点的个数,也没啥好调的。其它参数,近两年论文基本都用同样的参数设定:
#迭代几十到几百epoch。sgd,mini batch size从几十到几百皆可。步长0.1,可手动收缩,weight decay取0.005,
#momentum取0.9。dropout加relu。weight用高斯分布初始化,bias全初始化为0。最后记得输入特征和预测目标都做好归一化。
#做完这些你的神经网络就应该跑出基本靠谱的结果,否则反省一下自己的人品
search = RandomizedSearchCV(net, params, cv=skf, n_iter=1, scoring='accuracy')
search.fit(X_split_train.values.astype(np.float32),
           Y_split_train.values.astype(np.longlong))
score1 = search.best_score_
Beispiel #11
0
epochs = 200
batch_size = 16

class Mydnn(nn.Module):
  def __init__(self, time_window=2, stride=2, n_elec=64):
    super(Mydnn, self).__init__()
    self.wavelet = Wavelet_cnn(time_window, stride)
    self.conv1 = nn.Conv2d(n_elec, 128, (25, 2))
    self.conv2 = nn.Conv2d(128, 256, (25, 2))
    self.conv3 = nn.Conv2d(256, 512, (25, 2))
    self.pool = nn.AvgPool2d((2, 1))
    self.conv_class = nn.Conv2d(512, 2, (1, 1))
    self.pool_class = nn.AdaptiveAvgPool2d(output_size=(1, 1))

  def forward(self, x):
    scalegram, _ = self.wavelet(x)
    scalegram = scalegram ** 2 # power
    scalegram = scalegram.transpose(1, 3) # (batch, elec, seq, level)
    h = self.pool(F.relu(self.conv1(scalegram)))
    h = self.pool(F.relu(self.conv2(h)))
    h = self.pool(F.relu(self.conv3(h)))
    h = F.softmax(self.pool_class(self.conv_class(h)).squeeze())
    return h

model = NeuralNetClassifier(module=Mydnn,
                            max_epochs=100,
                            lr=1e-4)

model.fit(X_train, y_train)