Esempio n. 1
0
# 加载模型
sys.path.append(os.path.join(".", experiment_path, experiment_name))
if arch == "Custom":
    from custom import Custom
    model = Custom(cfg=cfg.model)
    model = torch.nn.DataParallel(model, list(range(
        torch.cuda.device_count()))).to(device)
else:
    raise NotImplementedError

# 建立tensorboard的实例
from tensorboardX import SummaryWriter
writer = SummaryWriter(os.path.join(".", board_path, experiment_name))

# 建立优化器
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer,
                                                 milestones=[10, 20, 30, 40],
                                                 gamma=0.5)

# 建立loss
loss_L1 = nn.MSELoss().to(device)
loss_L2 = nn.L1Loss().to(device)
# crossEntropy
loss_BCE = nn.BCELoss().to(device)
# SSIM
loss_SSIM = SSIM(window_size=11, size_average=True)
# L1 + L2 mean
loss_L1_L2 = L1_L2_Loss().to(device)

# 训练的部分
sys.path.append(os.path.join(".", experiment_path, experiment_name))
if arch == "Custom":
    from custom import Custom
    model = Custom(cfg=cfg.model)
    model = model.to(device)
    model = torch.nn.DataParallel(model, list(range(
        torch.cuda.device_count()))).to(device)
else:
    raise NotImplementedError

# 建立tensorboard的实例
from tensorboardX import SummaryWriter
writer = SummaryWriter(os.path.join(".", board_path, experiment_name))

# 建立优化器
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999))
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer,
                                                 milestones=[10, 20, 30, 40],
                                                 gamma=0.5)

# 建立loss
loss_L1 = nn.MSELoss().to(device)
loss_L2 = nn.L1Loss().to(device)
# crossEntropy
loss_BCE = nn.BCELoss().to(device)
# SSIM
loss_SSIM = SSIM(window_size=11, size_average=True)

loss_L1_L2 = L1_L2_Loss().to(device)

# 训练的部分