Exemplo n.º 1
0
def cal_mae(img_root,gt_dmap_root,model_param_path):
    '''
    Calculate the MAE of the test data.
    img_root: the root of test image data.
    gt_dmap_root: the root of test ground truth density-map data.
    model_param_path: the path of specific mcnn parameters.
    '''
    device=torch.device("cpu")
    model=CANNet()
    model.load_state_dict(torch.load(model_param_path))
    model.to(device)
    dataset=CrowdDataset(img_root,gt_dmap_root,8,phase='test')
    dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
    model.eval()
    mae=0
    with torch.no_grad():
        for i,(img,gt_dmap) in enumerate(tqdm(dataloader)):
            img=img.to(device)
            gt_dmap=gt_dmap.to(device)
            # forward propagation
            et_dmap=model(img)
            mae+=abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()
            del img,gt_dmap,et_dmap

    print("model_param_path:"+model_param_path+" mae:"+str(mae/len(dataloader)))
Exemplo n.º 2
0
def estimate_density_map(img_root,gt_dmap_root,model_param_path,index):
    '''
    Show one estimated density-map.
    img_root: the root of test image data.
    gt_dmap_root: the root of test ground truth density-map data.
    model_param_path: the path of specific mcnn parameters.
    index: the order of the test image in test dataset.
    '''
    device=torch.device("cuda")
    model=CANNet().to(device)
    model.load_state_dict(torch.load(model_param_path))
    dataset=CrowdDataset(img_root,gt_dmap_root,8,phase='test')
    dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
    model.eval()
    for i,(img,gt_dmap) in enumerate(dataloader):
        if i==index:
            img=img.to(device)
            gt_dmap=gt_dmap.to(device)
            # forward propagation
            et_dmap=model(img).detach()
            et_dmap=et_dmap.squeeze(0).squeeze(0).cpu().numpy()
            print(et_dmap.shape)
            plt.imshow(et_dmap,cmap=CM.jet)
            plt.show()
            break
Exemplo n.º 3
0
def estimate_density_map(img_root, gt_dmap_root, model_param_path, index):

    device = torch.device("cuda")
    model = CSRNet().to(device)
    model.load_state_dict(torch.load(model_param_path))
    dataset = CrowdDataset(img_root, gt_dmap_root, 8)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False)
    model.eval()
    for i, (img, gt_dmap) in enumerate(dataloader):
        if i == index:
            img = img.to(device)
            gt_dmap = gt_dmap.to(device)
            # forward propagation
            et_dmap = model(img).detach()
            et_dmap = et_dmap.squeeze(0).squeeze(0).cpu().numpy()
            print(et_dmap.shape)
            plt.imshow(et_dmap, cmap=CM.jet)
            break
Exemplo n.º 4
0
def estimate_density_map(img_root,gt_dmap_root,model_param_path):
    '''
    Show one estimated density-map.
    img_root: the root of test image data.
    gt_dmap_root: the root of test ground truth density-map data.
    model_param_path: the path of specific mcnn parameters.
    index: the order of the test image in test dataset.
    '''
    model=CANNet().cuda()
    model.load_state_dict(torch.load(model_param_path))
    model.eval()
    dataset=CrowdDataset(img_root,gt_dmap_root,8,phase='test')
    dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
    print('dataloader = ',dataloader)

    for img,gt_dmap,img_name in dataloader:
        print('img_shape = ',img.shape)
        st = time.time()
        img=img.cuda()
        gt_dmap=gt_dmap.cuda()
        # forward propagation
        et_dmap=model(img).detach()
        et_dmap=et_dmap.squeeze(0).squeeze(0).cpu().numpy()
        pred_frame = plt.gca()
        plt.imshow(et_dmap,cmap=CM.jet)
        plt.show()
        pred_frame.axes.get_yaxis().set_visible(False)
        pred_frame.axes.get_xaxis().set_visible(False)
        pred_frame.spines['top'].set_visible(False)
        pred_frame.spines['bottom'].set_visible(False)
        pred_frame.spines['left'].set_visible(False)
        pred_frame.spines['right'].set_visible(False)
        plt.savefig( 'result/'+str(img_name)+'_out' + '.png',bbox_inches='tight', pad_inches=0, dpi=200)
        plt.close()
        print('tt = ',time.time()-st)
        break
def main(args):
    wandb.init(project="crowd", config=args)
    args = wandb.config
    # print(args)

    # vis=visdom.Visdom()
    torch.cuda.manual_seed(args.seed)
    model=CANNet().to(args.device)
    criterion=nn.MSELoss(reduction='sum').to(args.device)
    # optimizer=torch.optim.SGD(model.parameters(), args.lr,
    #                           momentum=args.momentum,
    #                           weight_decay=0)
    optimizer=torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.decay)
    train_dataset = CrowdDataset(args.train_image_root, args.train_dmap_root, gt_downsample=8, phase='train')
    train_loader  = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    val_dataset   = CrowdDataset(args.val_image_root, args.val_dmap_root, gt_downsample=8, phase='test')
    val_loader    = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
    
    if not os.path.exists('./checkpoints'):
        os.mkdir('./checkpoints')
    
    min_mae = 10000
    min_epoch = 0
    for epoch in tqdm(range(0, args.epochs)):
        # training phase
        model.train()
        model.zero_grad()
        train_loss = 0
        train_mae = 0
        train_bar = tqdm(train_loader)
        for i, (img,gt_dmap) in enumerate(train_bar):
            # print(img.shape, gt_dmap.shape)
            img = img.to(args.device)
            gt_dmap = gt_dmap.to(args.device)
            
            # forward propagation
            et_dmap = model(img)
            # calculate loss
            # print(et_dmap.shape, gt_dmap.shape)
            loss = criterion(et_dmap, gt_dmap)
            train_loss += loss.item()
            train_mae += abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()
            loss = loss/args.gradient_accumulation_steps
            loss.backward()
            if (i+1)%args.gradient_accumulation_steps == 0:
                optimizer.step()
                model.zero_grad()
            train_bar.set_postfix(loss=train_loss/(i+1), mae=train_mae/(i+1))
        optimizer.step()
        model.zero_grad()
#        print("epoch:",epoch,"loss:",epoch_loss/len(dataloader))
        torch.save(model.state_dict(),'./checkpoints/epoch_'+str(epoch)+".pth")
    
        # testing phase
        model.eval()
        val_loss = 0
        val_mae = 0
        for i, (img,gt_dmap) in enumerate((val_loader)):
            img = img.to(args.device)
            gt_dmap = gt_dmap.to(args.device)

            # forward propagation
            et_dmap = model(img)
            loss = criterion(et_dmap, gt_dmap)
            val_loss += loss.item()
            val_mae += abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()
            del img,gt_dmap,et_dmap

        if val_mae/len(val_loader) < min_mae:
            min_mae = val_mae/len(val_loader)
            min_epoch = epoch
        # print("epoch:" + str(epoch) + " error:" + str(mae/len(val_loader)) + " min_mae:"+str(min_mae) + " min_epoch:"+str(min_epoch))
        wandb.log({"loss/train": train_loss/len(train_loader),
                   "mae/train": train_mae/len(train_loader),
                   "loss/val": val_loss/len(val_loader),
                   "mae/val": val_mae/len(val_loader),
        }, commit=False)

        # show an image
        index = random.randint(0, len(val_loader)-1)
        img, gt_dmap = val_dataset[index]
        gt_dmap = gt_dmap.squeeze(0).detach().cpu().numpy()
        wandb.log({"image/img": [wandb.Image(img)]}, commit=False)
        wandb.log({"image/gt_dmap": [wandb.Image(gt_dmap/(gt_dmap.max())*255, caption=str(gt_dmap.sum()))]}, commit=False)

        img = img.unsqueeze(0).to(args.device)
        et_dmap = model(img)
        et_dmap = et_dmap.squeeze(0).detach().cpu().numpy()
        wandb.log({"image/et_dmap": [wandb.Image(et_dmap/(et_dmap.max())*255, caption=str(et_dmap.sum()))]})
        
    
    import time
    print(time.strftime('%Y.%m.%d %H:%M:%S',time.localtime(time.time())))
Exemplo n.º 6
0
    workers = 4
    seed = time.time()
    print_freq = 30

    vis = visdom.Visdom()
    device = torch.device(gpu_or_cpu)
    torch.cuda.manual_seed(seed)
    model = CANNet().to(device)
    criterion = nn.MSELoss(size_average=False).to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                momentum=momentum,
                                weight_decay=0)
    #    optimizer=torch.optim.Adam(model.parameters(),lr)
    train_dataset = CrowdDataset(train_image_root,
                                 train_dmap_root,
                                 gt_downsample=8)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=1,
                                               shuffle=True)
    test_dataset = CrowdDataset(test_image_root,
                                test_dmap_root,
                                gt_downsample=8)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False)

    if not os.path.exists('./checkpoints'):
        os.mkdir('./checkpoints')
    min_mae = 10000
    min_epoch = 0
Exemplo n.º 7
0
def cal_mae(img_root, gt_dmap_root):
    model_path = './saved_models/CSR_Shanghai_A_900.h5'

    model_name = os.path.basename(model_path).split('.')[0]

    # 保存图片
    if not os.path.exists('./defense_CSR'):
        os.mkdir('./defense_CSR')

    if not os.path.exists('./defense_CSR/density_map_adv'):
        os.mkdir('./defense_CSR/density_map_adv')

    if not os.path.exists('./defense_CSR/images_adv_patch'):
        os.mkdir('./defense_CSR/images_adv_patch')

    if not os.path.exists('./defense_CSR/images_adv_ablated'):
        os.mkdir('./defense_CSR/images_adv_ablated')

    model = CSRNet()

    # model.load_state_dict(torch.load(model_param_path))
    trained_model = os.path.join(model_path)
    load_net(trained_model, model)

    model.to(device)
    dataset = CrowdDataset(img_root, gt_dmap_root, 8)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False)
    model.eval()
    mae = 0
    with torch.no_grad():

        for i, (img, gt_dmap) in enumerate(tqdm(dataloader)):

            patch, mask, patch_shape = init_patch_circle(
                image_size, patch_size)
            patch_init = patch.copy()
            patch_shape_orig = patch_shape

            img = img.to(device)
            gt_dmap = gt_dmap.to(device)

            im_data_var = Variable(img)

            # 先进行adv_patch添加

            if patch_type == 'circle':
                patch_full, mask_full, _, rx, ry, _ = circle_transform(
                    patch, mask, patch_init, data_shape, patch_shape)

            patch_full, mask_full = torch.FloatTensor(
                patch_full), torch.FloatTensor(mask_full)

            patch_full, mask_full = patch_full.to(device), mask_full.to(device)

            patch_var, mask_var = Variable(patch_full), Variable(mask_full)

            # 在image上面生成adv patch
            adv_tgt_img_var = torch.mul(
                (1 - mask_var), im_data_var) + torch.mul(mask_var, patch_var)

            adv_img = adv_tgt_img_var.data.cpu().numpy()

            # *****************************************************************
            # 存adv_patch攻击的img

            adv_tgt_img = adv_img[0][0]
            adv_tgt_img = Image.fromarray(adv_tgt_img).convert('RGB')

            adv_tgt_img.save(
                './defense_CSR/images_adv_patch/{}.png'.format(full_imgname))

            # plt.imsave('./defense_MCNN/images_adv_patch/{}'.format(full_imgname), adv_tgt_img
            # , format='png', cmap='gray')

            img_final = random_mask_batch_one_sample(adv_img,
                                                     keep,
                                                     reuse_noise=True)
            img_final_var = Variable(img_final)

            et_dmap = model(img_final_var)

            # *************************************************
            # 存ablated   adv_patch攻击img

            im_fi = img_final_var.data.detach().cpu().numpy()
            im_fi_save = im_fi[0][0]
            plt.imsave(
                './defense_CSR/images_adv_ablated/{}'.format(full_imgname),
                im_fi_save,
                format='png',
                cmap='gray')

            density_map = density_map.data.detach().cpu().numpy()
            adv_out = density_map[0][0]
            plt.imsave('./defense_CSR/density_map_adv/{}'.format(full_imgname),
                       adv_out,
                       format='png',
                       cmap=cm.plt.jet)

            mae += abs(et_dmap.data.sum() - gt_dmap.data.sum()).item()

            del img, gt_dmap, et_dmap

    print("model_param_path:" + model_param_path + " mae:" +
          str(mae / len(dataloader)))
    steps             = [-1,1,100,150]
    scales            = [1,1,1,1]
    workers           = 4
    seed              = time.time()
    print_freq        = 30 
    
    vis=visdom.Visdom()
    device=torch.device(gpu_or_cpu)
    torch.cuda.manual_seed(seed)
    model=CANNet().to(device)
    criterion=nn.MSELoss(size_average=False).to(device)
    optimizer=torch.optim.SGD(model.parameters(),lr,
                              momentum=momentum,
                              weight_decay=0)
#    optimizer=torch.optim.Adam(model.parameters(),lr)
    train_dataset=CrowdDataset(train_image_root,train_dmap_root,gt_downsample=8,phase='train')
    train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=1,shuffle=True)
    test_dataset=CrowdDataset(test_image_root,test_dmap_root,gt_downsample=8,phase='test')
    test_loader=torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
    
    if not os.path.exists('./checkpoints'):
        os.mkdir('./checkpoints')
    min_mae=10000
    min_epoch=0
    train_loss_list=[]
    epoch_list=[]
    test_error_list=[]
    for epoch in range(0,epochs):
        # training phase
        model.train()
        epoch_loss=0