Beispiel #1
0
        {'params': net_parameters_id['score_final.weight']  , 'lr': lr*0.001, 'weight_decay': weight_decay},
        {'params': net_parameters_id['score_final.bias']    , 'lr': lr*0.002, 'weight_decay': 0.},
        {'params': net_parameters_id['score_final_h.weight']  , 'lr': lr*0.001, 'weight_decay': weight_decay},
        {'params': net_parameters_id['score_final_h.bias']    , 'lr': lr*0.002, 'weight_decay': 0.},
    ], lr=lr, momentum=momentum, weight_decay=weight_decay)
scheduler = lr_scheduler.StepLR(optimizer, step_size=stepsize, gamma=gamma)




#___________________   Train the model

train_loss = []
train_loss_detail = []
for epoch in range(0, maxepoch):
    tr_avg_loss, tr_detail_loss = train(train_loader, model,
                                        optimizer, epoch,itersize,maxepoch,print_freq)

    #save_file = os.path.join(TMP_DIR, 'checkpoint_epoch{}.pth'.format(epoch))
    #save_checkpoint({
        #'epoch': epoch,
        #'state_dict': model.state_dict(),
        #'optimizer': optimizer.state_dict()
        #             }, filename=save_file)
    scheduler.step() # will adjust learning rate
    # save train/val loss/accuracy, save every epoch in case of early stop
    train_loss.append(tr_avg_loss)
    train_loss_detail += tr_detail_loss 



torch.save(model,'EdgeNet1')
visualize_model(layers, model_desc, vol_size=(256, 256, 1))
train_dir = os.path.join('..', 'data', 'train', 'aug')
train_generator = data_generator(train_dir)
print('We have ' + str(len(train_generator)) + ' images available')

learning_rate = 0.001  # Rate at which our gradients will change during each back propogation, typically in range of 1e-2 to 1e-5
number_of_epochs = 100  # The number of epochs to be trained, one epoch means that you have seen the entirety of your dataset
# However, since we defined steps per epoch this might not apply
steps_per_epoch = len(train_generator)
loss_function = 'categorical_crossentropy'
batch_normalization = True

model, callbacks = create_model(layers,
                                model_desc,
                                batch_norm=batch_normalization,
                                data_generator=train_generator)
new_call_back = TensorBoardImage(
    "", os.path.join('..', 'Tensorboard_models', model_desc), train_generator)
tensorboard_output = os.path.join('..', 'Tensorboard_models', model_desc)
if not os.path.exists(tensorboard_output):
    os.makedirs(tensorboard_output)
# new_call_back = new_tensorboard(log_dir=tensorboard_output, batch_size=2, write_graph=True, write_grads=False,
#                           write_images=True, update_freq='epoch', histogram_freq=0)
# new_call_back.set_training_model(train_generator)

# callbacks = callbacks + [new_call_back]
train(model, train_generator, callbacks, learning_rate, number_of_epochs,
      steps_per_epoch, loss_function)

xxx = 1
Beispiel #3
0
trainDataSet = Data.TensorDataset(trainInput, trainTarget)
trainDataLoader = Data.DataLoader(trainDataSet, batch_size, shuffle=True)  # 打乱

# LSTM
model = LSTM(vocab_size,
             emb_size,
             hidden_size,
             num_classes,
             num_layers=num_layers,
             dropout=dropout,
             bidirectional=bidirectional).to(device)
criterion = nn.CrossEntropyLoss().to(device)  # 损失函数
optimizer = optim.Adam(model.parameters(), lr=lr)  # 优化器

# Training
train(model, epoch, trainDataLoader, criterion, optimizer)

# testSet及处理
test_sentences = ["i hate me", "you love me"]
test_labels = [0, 1]
testInput, testTarget = make_data(test_sentences, word2idx, test_labels)
testInput = torch.LongTensor(testInput).to(device)
testTarget = torch.LongTensor(testTarget).to(device)

# 封装成数据集 加载器
testDataSet = Data.TensorDataset(testInput, testTarget)
testDataLoader = Data.DataLoader(testDataSet, 2, shuffle=False)  # 不打乱

# Predict
model = model.eval()
for testInput, _ in testDataLoader:
Beispiel #4
0
        'Encoding': [64]
    }
}
model_desc = 'Shallow_net2'  # Name of your model
# The numbers inside are the number of filter banks, you can have mulitple filter banks per layer

train_generator = data_generator(atlas_vol, data_dir)
for i in range(len(train_generator)):
    x, _ = train_generator.__getitem__(i)
    plot_scroll_Image(x[1][0, x[0].shape[1] // 2, ...])
print('We have ' + str(len(train_generator)) + ' registrations available')
Moving_names = glob.glob(
    r'K:\Morfeus\AAPM_SummerSchool\voxelmorph_all_data\*Moving_Data.npy')
x, y = train_generator.__getitem__(0)
learning_rate = 0.001  # Rate at which our gradients will change during each back propogation, typically in range of 1e-2 to 1e-5
number_of_epochs = 10  # The number of epochs to be trained, one epoch means that you have seen the entirety of your dataset
# However, since we defined steps per epoch this might not apply
regularization_parameter = 0.01  # Lambda in regularization equation
steps_per_epoch = 10
loss_function = 'mse'
batch_normalization = True

model, callbacks = create_model(layers,
                                atlas_vol.shape[1:-1],
                                model_desc,
                                batch_norm=batch_normalization,
                                data_generator=train_generator)

train(model, train_generator, callbacks, learning_rate, number_of_epochs,
      regularization_parameter, steps_per_epoch, loss_function)