コード例 #1
0
ファイル: train.py プロジェクト: wendy-xiaozong/pytorch_Unet
def get_model_and_optimizer(device):
    model = UNet(
        in_channels=1,
        out_classes=1,
        dimensions=3,
        normalization='Group',
        num_encoding_blocks=3,
        out_channels_first_layer=8,
        upsampling_type='conv',
        padding=2,
        activation='PReLU',
        dropout=0,
    ).to(device)
    optimizer = torch.optim.AdamW(model.parameters())
    return model, optimizer
コード例 #2
0
                             color_jitter_params=None)

# creating checkpoint folder
model_name = args.model_name
file_dir = os.path.split(os.path.realpath(__file__))[0]
results_folder = os.path.join(file_dir, '..', 'checkpoints', model_name)
if not os.path.exists(results_folder):
    os.makedirs(results_folder)

# load model
unet = UNet(3, 3)
if args.trained_model_path is not None:
    unet.load_state_dict(torch.load(args.trained_model_path))

loss = SoftDiceLoss(weight=torch.Tensor([1, 5, 5]))
optimizer = optim.Adam(unet.parameters(), lr=args.initial_lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                 patience=50,
                                                 verbose=True)

cuda_device = torch.device(args.device)
model = ModelWrapper(unet,
                     loss=loss,
                     optimizer=optimizer,
                     scheduler=scheduler,
                     results_folder=results_folder,
                     cuda_device=cuda_device)

model.train_model(train_dataset,
                  validation_dataset=validate_dataset,
                  n_batch=args.batch_size,