def cv_train(model_class, criterion_class, optimizer_class, X, y,
             epoch=100, num_of_cv=10, batch_size=16, lr=1e-4, shuffle=True):
    kf = KFold(n_splits=num_of_cv, shuffle=True)
    accuracy = []
    for train_idx, val_idx in kf.split(X=X, y=y):
        train_x, val_x = X[train_idx], X[val_idx]
        train_y, val_y = y[train_idx], y[val_idx]
        train_loader = data_loader(train_x, train_y, batch_size=batch_size,
                           shuffle=True, gpu=False)
        val_loader = data_loader(val_x, val_y, batch_size=batch_size)
        model = model_class().cuda()
        criterion = criterion_class()
        optimizer = optimizer_class(model.parameters(), lr=lr)
        trainer = Trainer(model, criterion, optimizer,
                  train_loader, val_loader,
                  val_num=1, gpu=True, )
        trainer.run(epochs=epoch)
        accuracy.append(trainer.val_best_acc)
    return accuracy
Exemple #2
0
# test_X, test_y = get_data_multi(sub_id_range=[50, 55], event_code=[6,10,14], filter=None, t=[0.5, 4.0])
# test_X = test_X.reshape(test_X.shape[0],
#                         1, test_X.shape[1],
#                         test_X.shape[2]).transpose(0,1,3,2)

f_dim = train_X.shape[3]
seq_len = train_X.shape[2]
'''
モデル
'''
epochs = 200
batch_size = 512
lr = 1e-6
train_loader = data_loader(train_X,
                           train_y,
                           batch_size=batch_size,
                           shuffle=True,
                           gpu=False)
# test_loader = data_loader(test_, test_t, batch_size=batch_size)
val_loader = data_loader(test_X, test_y, batch_size=batch_size)


class Conv_lstm(nn.Module):
    def __init__(self):
        super(Conv_lstm, self).__init__()
        self.conv_time = nn.Conv2d(1, 40, (24, 1))
        self.conv_spat = nn.Conv2d(40, 40, (1, 64), bias=False)
        self.batchnorm = nn.BatchNorm2d(40)
        self.pool = nn.AvgPool2d(kernel_size=(80, 1), stride=(20, 1))
        self.dropout = nn.Dropout2d(p=0.5)
        self.lstm = LSTM(40, 10, batch_size, gpu=True, return_seq=False)
Exemple #3
0
print(val_.shape)
print(train_.shape)
print(test_.shape)

f_dim = train_.shape[3]
seq_len = train_.shape[2]
'''
モデル
'''
epochs = 60
batch_size = 32
lr = 1e-6
train_loader = data_loader(train_,
                           train_t,
                           batch_size=batch_size,
                           shuffle=True,
                           gpu=False)
test_loader = data_loader(test_, test_t, batch_size=batch_size)
val_loader = data_loader(val_, val_t, batch_size=batch_size)


class Conv_lstm(nn.Module):
    def __init__(self):
        super(Conv_lstm, self).__init__()
        self.conv_time = nn.Conv2d(1, 40, (25, 1))
        self.conv_spat = nn.Conv2d(40, 40, (1, 5), bias=False)
        self.batchnorm = nn.BatchNorm2d(40)
        self.pool = nn.AvgPool2d(kernel_size=(25, 1), stride=(5, 1))
        self.dropout = nn.Dropout2d(p=0.5)
        self.lstm = LSTM(40, 10, batch_size, gpu=True, return_seq=False)
Exemple #4
0
      #   break

  def model_saver(self):
    torch.save(model.state_dict(), 'weight_convLSTM.pth')



 
X, y = make_class(subject_id=[1,50], crop=False, problem='hf', all_subject=False)
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
X, y = shuffle(X, y)
X = elec_map2d(X)
print(X.shape)
X = X.transpose(0,2,1,3,4)
print(X.shape)
# Split dataset
# Train : Test = 8 : 2
X_train, X_test, y_train, y_test =\
    train_test_split(X, y, test_size=0.2)

train_loader = data_loader(X_train, y_train, batch_size=128,
                           shuffle=True, gpu=False)
val_loader = data_loader(X_test, y_test, batch_size=128)
model = Conv3d_convLSTM().cuda()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
trainer = Trainer(model, criterion, optimizer,
            train_loader, val_loader,
            val_num=1, gpu=True, )
trainer.run(epochs=400)
Exemple #5
0
  valtarget = T[cut:]
  return eeg, target, valeeg, valtarget

eeg1, target1, valeeg1, valtarget1 = load_eeg(number='a', split=0.8)
eeg2, target2, valeeg2, valtarget2 = load_eeg(number='b', split=0.8)
eeg3, target3, valeeg3, valtarget3 = load_eeg(number='c', split=0.8)
eeg4, target4, valeeg4, valtarget4 = load_eeg(number='d', split=0.8)
eeg5, target5, valeeg5, valtarget5 = load_eeg(number='e', split=0.8)

eeg = np.r_[eeg2, eeg3, eeg4, eeg5]
valeeg = np.r_[valeeg2, valeeg3, valeeg4, valeeg5]
target = np.r_[target2, target3, target4, target5]
valtarget = np.r_[valtarget2, valtarget3, valtarget4, valtarget5]


train_loader = data_loader(eeg, target, batch_size=2048,
                           shuffle=True, gpu=False)
val_loader = data_loader(valeeg, valtarget, batch_size=2048)



class Res_dense(nn.Module):
  def __init__(self, in_units, hidden_units, out_units, dropout):
    super(Res_dense, self).__init__()
    self.bn1 = nn.BatchNorm1d(in_units)
    self.l1 = nn.Linear(in_units, hidden_units)
    self.bn2 = nn.BatchNorm1d(hidden_units)
    self.l2 = nn.Linear(hidden_units, hidden_units)
    self.bn3 = nn.BatchNorm1d(hidden_units)
    self.l3 = nn.Linear(hidden_units, out_units)
    self.dropout = nn.Dropout(p=dropout)
Exemple #6
0
sys.path.append(os.pardir)
from mymodule.trainer import Trainer
from mymodule.utils import data_loader, evaluator
from tensorboardX import SummaryWriter

writer = SummaryWriter()

batch_size = 1024
epochs = 100
'''
データの生成
'''
train, test, label = get_data(idx=1)
train_loader = data_loader(train,
                           label,
                           batch_size=batch_size,
                           shuffle=True,
                           gpu=False)
test_loader = data_loader(test,
                          label,
                          batch_size=batch_size,
                          shuffle=False,
                          gpu=False)


class MLP(nn.Module):
    def __init__(self, training=True):
        super(MLP, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=64,
                               padding=1,
Exemple #7
0
                           filter=[5, 25],
                           t=[0, 4],
                           time_window=1.0)

X_crop.shape
y_crop.shape
'''
モデル
'''
epochs = 200
seq_len = X_crop.shape[-1]
batch_size = 1024
lr = 1e-5
train_loader = data_loader(X_crop[:40000],
                           y_crop[:40000],
                           batch_size=batch_size,
                           shuffle=True,
                           gpu=False)
# test_loader = data_loader(test_, test_t, batch_size=batch_size)
val_loader = data_loader(X_crop[40000:], y_crop[40000:], batch_size=batch_size)

### resnet
res_ch = [64, 128]
pooling = [int(seq_len / 2), int(seq_len / 4)]
res_dropout = 0.4

### lstm
lstm_units = [res_ch[-1], 16]
lstm_dropout = 0.4
bi = True