Пример #1
0
    val_loader, _ = loading_data(target_data_path, mode='val')

    net = CSRNet().to(device)
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=lr,
                          momentum=0.9)
    criterion_dens = nn.MSELoss(size_average=False)
    criterion_count = nn.L1Loss(size_average=False)

    # 开始训练和验证
    best_mae = sys.maxsize
    for epoch in range(start_epoch, epoch_num + 1):
        print('Epoch {}/{}'.format(epoch, epoch_num))
        # 训练阶段
        optimizer = update_lr(optimizer, epoch, steps, decay_rate)
        net.train()

        running_loss = 0.0
        running_mse = 0.0
        running_mae = 0.0
        totalnum = 0
        for idx, (image, densityMap) in enumerate(train_loader):
            image = image.to(device)
            densityMap = densityMap.to(device)

            optimizer.zero_grad()
            duration = time.time()
            predDensityMap = net(image)
            predDensityMap = torch.squeeze(predDensityMap)
            densityMap = torch.squeeze(densityMap)
            loss = criterion_dens(predDensityMap, densityMap)
Пример #2
0
if __name__=="__main__":
    # argument parsing.
    args = parser.parse_args()
    cfg = Config(args)                                                          # configuration
    model = CSRNet().to(cfg.device)                                         # model
    criterion = nn.MSELoss(size_average=False)                              # objective
    optimizer = torch.optim.Adam(model.parameters(),lr=cfg.lr)              # optimizer

    train_dataloader = create_train_dataloader(cfg.train_dataset_root, use_flip=True, batch_size=cfg.batch_size)
    test_dataloader  = create_test_dataloader(cfg.test_dataset_root)             # dataloader

    min_mae = sys.maxsize
    min_mae_epoch = -1
    for epoch in range(1, cfg.epochs):                          # start training
        model.train()
        epoch_loss = 0.0
        for i, data in enumerate(tqdm(train_dataloader)):
            image = data['image'].to(cfg.device)
            gt_densitymap = data['densitymap'].to(cfg.device) * 16# todo 1/4 rescale effect때문에
            et_densitymap = model(image)                        # forward propagation
            loss = criterion(et_densitymap,gt_densitymap)       # calculate loss
            epoch_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()                                     # back propagation
            optimizer.step()                                    # update network parameters
        cfg.writer.add_scalar('Train_Loss', epoch_loss/len(train_dataloader), epoch)
        model.eval()
        with torch.no_grad():
            epoch_mae = 0.0
            for i, data in enumerate(tqdm(test_dataloader)):
Пример #3
0
    targetloader_iter = enumerate(target_loader)
    best_mae = sys.maxsize
    loss_dens_value = 0.0
    loss_adv_value = 0.0
    loss_D_value = 0.0

    running_mse = 0.0
    running_mae = 0.0
    totalnum = 0
    iter_count = 0
    for epoch in range(start_epoch, epoch_num + 1):
        iter_count = iter_count + 1
        print('Iteration {}/{}'.format(epoch, epoch_num))

        optimizer = update_lr(optimizer, epoch, steps, decay_rate)
        net.train(True)
        net_D.train(True)
        optimizer_D = adjust_learning_rate_D(optimizer_D, epoch, lr_D,
                                             epoch_num, power)
        optimizer.zero_grad()
        optimizer_D.zero_grad()

        # train G
        for param in net_D.parameters():
            param.requires_grad = False

        # train with source
        _, (image, Dmap) = next(trainloader_iter)
        image = image.to(device)
        Dmap = Dmap.to(device)