コード例 #1
0
    def pwc_net(self, im1_fn, im2_fn):
        pwc_model_fn = '/app/PWC-Net/Multi_Frame_Flow/pwc_net.pth.tar'

        im_all = [imread(img) for img in [im1_fn, im2_fn]]
        im_all = [im[:, :, :3] for im in im_all]

        # rescale the image size to be multiples of 64
        divisor = 64.
        H = im_all[0].shape[0]
        W = im_all[0].shape[1]

        H_ = int(ceil(H / divisor) * divisor)
        W_ = int(ceil(W / divisor) * divisor)
        for i in range(len(im_all)):
            im_all[i] = cv2.resize(im_all[i], (W_, H_))

        for _i, _inputs in enumerate(im_all):
            im_all[_i] = im_all[_i][:, :, ::-1]
            im_all[_i] = 1.0 * im_all[_i] / 255.0

            im_all[_i] = np.transpose(im_all[_i], (2, 0, 1))
            im_all[_i] = torch.from_numpy(im_all[_i])
            im_all[_i] = im_all[_i].expand(1, im_all[_i].size()[0],
                                           im_all[_i].size()[1],
                                           im_all[_i].size()[2])
            im_all[_i] = im_all[_i].float()

        im_all = torch.autograd.Variable(torch.cat(im_all, 1).cuda(),
                                         volatile=True)

        net = models.pwc_dc_net(pwc_model_fn)
        net = net.cuda()
        net.eval()

        flo = net(im_all)
        flo = flo[0] * 20.0
        flo = flo.cpu().data.numpy()

        # scale the flow back to the input size
        flo = np.swapaxes(np.swapaxes(flo, 0, 1), 1, 2)  #
        u_ = cv2.resize(flo[:, :, 0], (W, H))
        v_ = cv2.resize(flo[:, :, 1], (W, H))
        u_ *= W / float(W_)
        v_ *= H / float(H_)
        flo = np.dstack((u_, v_))
        return flo
コード例 #2
0
def pwc(prev,nextf):
	pwc_model_fn = './pwc_net.pth.tar'
	im_all=[prev,nextf]
	divisor = 64.
	H = im_all[0].shape[0]
	W = im_all[0].shape[1]

	H_ = int(ceil(H/divisor) * divisor)
	W_ = int(ceil(W/divisor) * divisor)
	for i in range(len(im_all)):
		im_all[i] = cv2.resize(im_all[i], (W_, H_))

	for _i, _inputs in enumerate(im_all):
		im_all[_i] = 1.0 * im_all[_i]/255.0
		im_all[_i] = np.transpose(im_all[_i], (2, 0, 1))
		im_all[_i] = torch.from_numpy(im_all[_i])
		im_all[_i] = im_all[_i].expand(1, im_all[_i].size()[0], im_all[_i].size()[1], im_all[_i].size()[2])	
		im_all[_i] = im_all[_i].float()
		
	im_all = torch.autograd.Variable(torch.cat(im_all,1).cuda(), volatile=True)

	net = models.pwc_dc_net(pwc_model_fn)
	net = net.cuda()
	net.eval()

	flo = net(im_all)
	flo = flo[0] * 20.0
	flo = flo.cpu().data.numpy()

	# scale the flow back to the input size 
	flo = np.swapaxes(np.swapaxes(flo, 0, 1), 1, 2) # 
	u_ = cv2.resize(flo[:,:,0],(W,H))
	v_ = cv2.resize(flo[:,:,1],(W,H))
	u_ *= W/ float(W_)
	v_ *= H/ float(H_)
	flo = np.dstack((u_,v_))
	return flo
コード例 #3
0
ファイル: main.py プロジェクト: zhuoliny/flowattack
def main():
    global args, best_error, n_iter
    args = parser.parse_args()
    save_path = Path(args.name)
    args.save_path = 'checkpoints' / save_path  #/timestamp
    print('=> will save everything to {}'.format(args.save_path))
    args.save_path.makedirs_p()
    torch.manual_seed(args.seed)

    training_writer = SummaryWriter(args.save_path)
    output_writer = SummaryWriter(args.save_path / 'valid')

    # Data loading code
    flow_loader_h, flow_loader_w = 384, 1280

    train_transform = custom_transforms.Compose([
        custom_transforms.RandomHorizontalFlip(),
        custom_transforms.RandomScaleCrop(h=256, w=256),
        custom_transforms.ArrayToTensor(),
    ])

    valid_transform = custom_transforms.Compose([
        custom_transforms.Scale(h=flow_loader_h, w=flow_loader_w),
        custom_transforms.ArrayToTensor()
    ])

    print("=> fetching scenes in '{}'".format(args.data))
    train_set = SequenceFolder(args.data,
                               transform=train_transform,
                               seed=args.seed,
                               train=True,
                               sequence_length=3)

    if args.valset == "kitti2015":
        from datasets.validation_flow import ValidationFlowKitti2015
        val_set = ValidationFlowKitti2015(root=args.kitti_data,
                                          transform=valid_transform)
    elif args.valset == "kitti2012":
        from datasets.validation_flow import ValidationFlowKitti2012
        val_set = ValidationFlowKitti2012(root=args.kitti_data,
                                          transform=valid_transform)

    if args.DEBUG:
        train_set.__len__ = 32
        train_set.samples = train_set.samples[:32]

    print('{} samples found in {} train scenes'.format(len(train_set),
                                                       len(train_set.scenes)))
    print('{} samples found in valid scenes'.format(len(val_set)))
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        val_set,
        batch_size=
        1,  # batch size is 1 since images in kitti have different sizes
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True,
        drop_last=True)

    if args.epoch_size == 0:
        args.epoch_size = len(train_loader)

    # create model
    print("=> creating model")

    if args.flownet == 'SpyNet':
        flow_net = getattr(models, args.flownet)(nlevels=6, pretrained=True)
    elif args.flownet == 'Back2Future':
        flow_net = getattr(
            models, args.flownet)(pretrained='pretrained/b2f_rm_hard.pth.tar')
    elif args.flownet == 'PWCNet':
        flow_net = models.pwc_dc_net(
            'pretrained/pwc_net_chairs.pth.tar')  # pwc_net.pth.tar')
    else:
        flow_net = getattr(models, args.flownet)()

    if args.flownet in ['SpyNet', 'Back2Future', 'PWCNet']:
        print("=> using pre-trained weights for " + args.flownet)
    elif args.flownet in ['FlowNetC']:
        print("=> using pre-trained weights for FlowNetC")
        weights = torch.load('pretrained/FlowNet2-C_checkpoint.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    elif args.flownet in ['FlowNetS']:
        print("=> using pre-trained weights for FlowNetS")
        weights = torch.load('pretrained/flownets.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    elif args.flownet in ['FlowNet2']:
        print("=> using pre-trained weights for FlowNet2")
        weights = torch.load('pretrained/FlowNet2_checkpoint.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    else:
        flow_net.init_weights()

    pytorch_total_params = sum(p.numel() for p in flow_net.parameters())
    print("Number of model paramters: " + str(pytorch_total_params))

    flow_net = flow_net.cuda()

    cudnn.benchmark = True
    if args.patch_type == 'circle':
        patch, mask, patch_shape = init_patch_circle(args.image_size,
                                                     args.patch_size)
        patch_init = patch.copy()
    elif args.patch_type == 'square':
        patch, patch_shape = init_patch_square(args.image_size,
                                               args.patch_size)
        patch_init = patch.copy()
        mask = np.ones(patch_shape)
    else:
        sys.exit("Please choose a square or circle patch")

    if args.patch_path:
        patch, mask, patch_shape = init_patch_from_image(
            args.patch_path, args.mask_path, args.image_size, args.patch_size)
        patch_init = patch.copy()

    if args.log_terminal:
        logger = TermLogger(n_epochs=args.epochs,
                            train_size=min(len(train_loader), args.epoch_size),
                            valid_size=len(val_loader),
                            attack_size=args.max_count)
        logger.epoch_bar.start()
    else:
        logger = None

    for epoch in range(args.epochs):

        if args.log_terminal:
            logger.epoch_bar.update(epoch)
            logger.reset_train_bar()

        # train for one epoch
        patch, mask, patch_init, patch_shape = train(patch, mask, patch_init,
                                                     patch_shape, train_loader,
                                                     flow_net, epoch, logger,
                                                     training_writer)

        # Validate
        errors, error_names = validate_flow_with_gt(patch, mask, patch_shape,
                                                    val_loader, flow_net,
                                                    epoch, logger,
                                                    output_writer)

        error_string = ', '.join('{} : {:.3f}'.format(name, error)
                                 for name, error in zip(error_names, errors))
        #
        if args.log_terminal:
            logger.valid_writer.write(' * Avg {}'.format(error_string))
        else:
            print('Epoch {} completed'.format(epoch))

        for error, name in zip(errors, error_names):
            training_writer.add_scalar(name, error, epoch)

        torch.save(patch, args.save_path / 'patch_epoch_{}'.format(str(epoch)))

    if args.log_terminal:
        logger.epoch_bar.finish()
コード例 #4
0
    im_all[_i] = torch.from_numpy(im_all[_i])
    im_all[_i] = im_all[_i].expand(1, im_all[_i].size()[0],
                                   im_all[_i].size()[1], im_all[_i].size()[2])
    im_all[_i] = im_all[_i].float()

# compute two frame flows
input_01 = [im_all[0].cuda(), im_all[1].cuda()]
input_01_var = torch.autograd.Variable(torch.cat(input_01, 1), volatile=True)

input_12 = [im_all[1].cuda(), im_all[2].cuda()]
input_12_var = torch.autograd.Variable(torch.cat(input_12, 1), volatile=True)

input_10 = [im_all[1].cuda(), im_all[0].cuda()]
input_10_var = torch.autograd.Variable(torch.cat(input_10, 1), volatile=True)

net = models.pwc_dc_net(pwc_model_fn)
net = net.cuda()
net.eval()
for p in net.parameters():
    p.requires_grad = False

cur_flow = net(input_12_var) * 20.0
prev_flow = net(input_01_var) * 20.0
prev_flow_back = net(input_10_var) * 20.0

# perfom flow fusion
net_fusion = models.netfusion_custom(path="./fusion_net.pth.tar",
                                     div_flow=20.0,
                                     batchNorm=False)
net_fusion = net_fusion.cuda()
net_fusion.eval()
コード例 #5
0
ファイル: test_humanflow.py プロジェクト: wkailiu/humanflow2
def main():
    global args
    args = parser.parse_args()
    test_list = make_dataset(args.data)
    # test_list = make_real_dataset(args.data)

    if args.arch == 'pwc':
        model = models.pwc_dc_net('models/pwc_net_ft.pth.tar').cuda()
    elif args.arch == 'spynet':
        model = models.spynet(nlevels=5, strmodel='F').cuda()
    elif args.arch == 'flownet2':
        model = models.FlowNet2().cuda()
        print("=> using pre-trained weights for FlowNet2")
        weights = torch.load('models/FlowNet2_checkpoint.pth.tar')
        model.load_state_dict(weights['state_dict'])

    if args.pretrained is not None:
        network_data = torch.load(args.pretrained)
        args.arch = network_data['arch']
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](data=network_data).cuda()
        if 'div_flow' in network_data.keys():
            args.div_flow = network_data['div_flow']

    model.eval()
    flow_epe = AverageMeter()
    avg_mot_err = AverageMeter()

    avg_parts_epe = {}
    for bk in BODY_MAP.keys():
        avg_parts_epe[bk] = AverageMeter()

    if args.no_norm:
        input_transform = transforms.Compose([
            flow_transforms.ArrayToTensor(),
            transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255])
        ])
    else:
        input_transform = transforms.Compose([
            flow_transforms.ArrayToTensor(),
            transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
            transforms.Normalize(mean=[0.411, 0.432, 0.45], std=[1, 1, 1])
        ])

    target_transform = transforms.Compose([
        flow_transforms.ArrayToTensor(),
        transforms.Normalize(mean=[0, 0], std=[args.div_flow, args.div_flow])
    ])

    for i, (img_paths, flow_path, seg_path) in enumerate(tqdm(test_list)):
        raw_im1 = flow_transforms.ArrayToTensor()(imread(img_paths[0],
                                                         mode='RGB')[:, :, :3])
        raw_im2 = flow_transforms.ArrayToTensor()(imread(img_paths[1],
                                                         mode='RGB')[:, :, :3])

        img1 = input_transform(imread(img_paths[0], mode='RGB')[:, :, :3])
        img2 = input_transform(imread(img_paths[1], mode='RGB')[:, :, :3])

        if flow_path is None:
            _, h, w = img1.size()
            new_h = int(np.floor(h / 256) * 256)
            new_w = int(np.floor(w / 448) * 448)

            # if i>744:
            #     import ipdb; ipdb.set_trace()
            img1 = F.upsample(img1.unsqueeze(0), (new_h, new_w),
                              mode='bilinear').squeeze()
            img2 = F.upsample(img2.unsqueeze(0), (new_h, new_w),
                              mode='bilinear').squeeze()

        if flow_path is not None:
            gtflow = target_transform(load_flo(flow_path))
            segmask = flow_transforms.ArrayToTensor()(cv2.imread(seg_path))

        input_var = torch.cat([img1, img2]).unsqueeze(0)

        if flow_path is not None:
            gtflow_var = gtflow.unsqueeze(0)
            segmask_var = segmask.unsqueeze(0)

        input_var = input_var.to(device)

        if flow_path is not None:
            gtflow_var = gtflow_var.to(device)
            segmask_var = segmask_var.to(device)

        # compute output
        output = model(input_var)

        if flow_path is not None:
            epe = args.div_flow * realEPE(
                output,
                gtflow_var,
                sparse=True if 'KITTI' in args.dataset else False)
            epe_parts = partsEPE(output, gtflow_var, segmask_var)
            epe_parts.update(
                (x, args.div_flow * y) for x, y in epe_parts.items())

            # record EPE
            flow_epe.update(epe.item(), gtflow_var.size(0))
            for bk in avg_parts_epe:
                if epe_parts[bk].item() > 0:
                    avg_parts_epe[bk].update(epe_parts[bk].item(),
                                             gtflow_var.size(0))

        # record motion warping error
        raw_im1 = raw_im1.cuda().unsqueeze(0)
        raw_im2 = raw_im2.cuda().unsqueeze(0)
        mot_err = motion_warping_error(raw_im1, raw_im2,
                                       args.div_flow * output)
        avg_mot_err.update(mot_err.item(), raw_im1.size(0))

        if args.output_dir is not None:
            if flow_path is not None:
                _, h, w = gtflow.size()
                output_path = flow_path.replace(args.data, args.output_dir)
                output_path = output_path.replace('/test/', '/')
                os.system('mkdir -p ' + output_path[:-15])
            else:
                output_path = img_paths[0].replace(args.data, args.output_dir)
                os.system('mkdir -p ' + output_path[:-10])
                output_path = output_path.replace('.png', '.flo')
            output_path = output_path.replace('/flow/', '/')
            upsampled_output = F.interpolate(output, (h, w),
                                             mode='bilinear',
                                             align_corners=False)
            flow_write(output_path,
                       upsampled_output.cpu()[0].data.numpy()[0],
                       upsampled_output.cpu()[0].data.numpy()[1])

    if args.save_name is not None:
        epe_dict = {}
        for bk in BODY_MAP.keys():
            epe_dict[bk] = avg_parts_epe[bk].avg
        epe_dict['full_epe'] = flow_epe.avg
        np.save(os.path.join('results', args.save_name), epe_dict)

    print("Averge EPE", flow_epe.avg)
    print("Motion warping error", avg_mot_err.avg)
コード例 #6
0
    def pwc_fusion(self, im0_fn, im1_fn, im2_fn):
        pwc_model_fn = './pwc_net.pth.tar'

        im_all = [imread(img) for img in [im0_fn, im1_fn, im2_fn]]
        im_all = [im[:, :, :3] for im in im_all]

        # rescale the image size to be multiples of 64
        divisor = 64.
        H = im_all[0].shape[0]
        W = im_all[0].shape[1]

        H_ = int(ceil(H / divisor) * divisor)
        W_ = int(ceil(W / divisor) * divisor)
        for i in range(len(im_all)):
            im_all[i] = cv2.resize(im_all[i], (W_, H_))

        for _i, _inputs in enumerate(im_all):
            im_all[_i] = im_all[_i][:, :, ::-1]
            im_all[_i] = 1.0 * im_all[_i] / 255.0

            im_all[_i] = np.transpose(im_all[_i], (2, 0, 1))
            im_all[_i] = torch.from_numpy(im_all[_i])
            im_all[_i] = im_all[_i].expand(1, im_all[_i].size()[0],
                                           im_all[_i].size()[1],
                                           im_all[_i].size()[2])
            im_all[_i] = im_all[_i].float()

        # compute two frame flows
        input_01 = [im_all[0].cuda(), im_all[1].cuda()]
        input_01_var = torch.autograd.Variable(torch.cat(input_01, 1),
                                               volatile=True)

        input_12 = [im_all[1].cuda(), im_all[2].cuda()]
        input_12_var = torch.autograd.Variable(torch.cat(input_12, 1),
                                               volatile=True)

        input_10 = [im_all[1].cuda(), im_all[0].cuda()]
        input_10_var = torch.autograd.Variable(torch.cat(input_10, 1),
                                               volatile=True)

        net = models.pwc_dc_net(pwc_model_fn)
        net = net.cuda()
        net.eval()
        for p in net.parameters():
            p.requires_grad = False

        cur_flow = net(input_12_var) * 20.0
        prev_flow = net(input_01_var) * 20.0
        prev_flow_back = net(input_10_var) * 20.0

        # perfom flow fusion
        net_fusion = models.netfusion_custom(
            path="/app/PWC-Net/Multi_Frame_Flow/fusion_net.pth.tar",
            div_flow=20.0,
            batchNorm=False)
        net_fusion = net_fusion.cuda()
        net_fusion.eval()
        for p in net_fusion.parameters():
            p.requires_grad = False

        upsample_layer = torch.nn.Upsample(scale_factor=4, mode='bilinear')

        cur_flow = upsample_layer(cur_flow)
        prev_flow = upsample_layer(prev_flow)
        prev_flow_back = upsample_layer(prev_flow_back)
        input_var_cat = torch.cat(
            (input_12_var, cur_flow, prev_flow, prev_flow_back), dim=1)
        flo = net_fusion(input_var_cat)

        flo = flo[0] * 20.0
        flo = flo.cpu().data.numpy()

        # scale the flow back to the input size
        flo = np.swapaxes(np.swapaxes(flo, 0, 1), 1, 2)  #
        u_ = cv2.resize(flo[:, :, 0], (W, H))
        v_ = cv2.resize(flo[:, :, 1], (W, H))
        u_ *= W / float(W_)
        v_ *= H / float(H_)
        flo = np.dstack((u_, v_))
        return flo
コード例 #7
0
def main():
    global args
    args = parser.parse_args()
    save_path = Path(args.name)
    args.save_path = 'results' / save_path  #/timestamp
    print('=> will save everything to {}'.format(args.save_path))
    args.save_path.makedirs_p()
    output_vis_dir = args.save_path / 'images'
    output_vis_dir.makedirs_p()

    args.batch_size = 1

    output_writer = SummaryWriter(args.save_path / 'valid')

    # Data loading code
    flow_loader_h, flow_loader_w = 384, 1280

    normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                            std=[0.5, 0.5, 0.5])

    # valid_transform = custom_transforms.Compose([custom_transforms.Scale(h=flow_loader_h, w=flow_loader_w),
    #                         custom_transforms.ArrayToTensor(), normalize])
    valid_transform = custom_transforms.Compose([
        custom_transforms.Scale(h=flow_loader_h, w=flow_loader_w),
        custom_transforms.ArrayToTensor()
    ])

    if args.valset == "kitti2015":
        # from datasets.validation_flow import ValidationFlowKitti2015MV
        # val_set = ValidationFlowKitti2015MV(root='/ps/project/datasets/AllFlowData/kitti/kitti2015', transform=valid_transform, compression=args.compression, raw_root='/is/rg/avg/jjanai/data/Kitti_2012_2015/Raw', example=args.example, true_motion=args.true_motion)
        from datasets.validation_flow import ValidationFlowKitti2015
        # # val_set = ValidationFlowKitti2015(root='/is/ps2/aranjan/AllFlowData/kitti/kitti2015', transform=valid_transform, compression=args.compression)
        val_set = ValidationFlowKitti2015(
            root='/ps/project/datasets/AllFlowData/kitti/kitti2015',
            transform=valid_transform,
            compression=args.compression,
            raw_root='/is/rg/avg/jjanai/data/Kitti_2012_2015/Raw',
            example=args.example,
            true_motion=args.true_motion)
    elif args.valset == "kitti2012":
        from datasets.validation_flow import ValidationFlowKitti2012
        # val_set = ValidationFlowKitti2012(root='/is/ps2/aranjan/AllFlowData/kitti/kitti2012', transform=valid_transform, compression=args.compression)
        val_set = ValidationFlowKitti2012(
            root='/ps/project/datasets/AllFlowData/kitti/kitti2012',
            transform=valid_transform,
            compression=args.compression,
            raw_root='/is/rg/avg/jjanai/data/Kitti_2012_2015/Raw')

    print('{} samples found in valid scenes'.format(len(val_set)))

    val_loader = torch.utils.data.DataLoader(
        val_set,
        batch_size=
        1,  # batch size is 1 since images in kitti have different sizes
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True,
        drop_last=True)

    result_file = open(os.path.join(args.save_path, 'results.csv'), 'a')
    result_scene_file = open(os.path.join(args.save_path, 'result_scenes.csv'),
                             'a')

    # create model
    print("=> fetching model")

    if args.flownet == 'SpyNet':
        flow_net = getattr(models, args.flownet)(nlevels=6, pretrained=True)
    elif args.flownet == 'Back2Future':
        flow_net = getattr(
            models, args.flownet)(pretrained='pretrained/b2f_rm_hard.pth.tar')
    elif args.flownet == 'PWCNet':
        flow_net = models.pwc_dc_net(
            'pretrained/pwc_net_chairs.pth.tar')  # pwc_net.pth.tar')
    else:
        flow_net = getattr(models, args.flownet)()

    if args.flownet in ['SpyNet', 'Back2Future', 'PWCNet']:
        print("=> using pre-trained weights for " + args.flownet)
    elif args.flownet in ['FlowNetC']:
        print("=> using pre-trained weights for FlowNetC")
        weights = torch.load('pretrained/FlowNet2-C_checkpoint.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    elif args.flownet in ['FlowNetS']:
        print("=> using pre-trained weights for FlowNetS")
        weights = torch.load('pretrained/flownets.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    elif args.flownet in ['FlowNet2']:
        print("=> using pre-trained weights for FlowNet2")
        weights = torch.load('pretrained/FlowNet2_checkpoint.pth.tar')
        flow_net.load_state_dict(weights['state_dict'])
    else:
        flow_net.init_weights()

    flow_net = flow_net.cuda()

    cudnn.benchmark = True

    if args.whole_img == 0 and args.compression == 0:
        print("Loading patch from ", args.patch_path)
        patch = torch.load(args.patch_path)
        patch_shape = patch.shape
        if args.mask_path:
            mask_image = load_as_float(args.mask_path)
            mask_image = imresize(mask_image,
                                  (patch_shape[-1], patch_shape[-2])) / 256.
            mask = np.array([mask_image.transpose(2, 0, 1)])
        else:
            if args.patch_type == 'circle':
                mask = createCircularMask(patch_shape[-2],
                                          patch_shape[-1]).astype('float32')
                mask = np.array([[mask, mask, mask]])
            elif args.patch_type == 'square':
                mask = np.ones(patch_shape)
    else:
        # add gaussian noise
        mean = 0
        var = 1
        sigma = var**0.5
        patch = np.random.normal(mean, sigma,
                                 (flow_loader_h, flow_loader_w, 3))
        patch = patch.reshape(3, flow_loader_h, flow_loader_w)
        mask = np.ones(patch.shape) * args.whole_img

    #import ipdb; ipdb.set_trace()
    error_names = ['epe', 'adv_epe', 'cos_sim', 'adv_cos_sim']
    errors = AverageMeter(i=len(error_names))

    # header
    result_file.write("{:>10}, {:>10}, {:>10}, {:>10}\n".format(*error_names))
    result_scene_file.write("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}\n".format(
        *(['scene'] + error_names)))

    flow_net.eval()

    # set seed for reproductivity
    np.random.seed(1337)

    for i, (ref_img_past, tgt_img, ref_img, flow_gt, disp_gt, calib,
            poses) in enumerate(tqdm(val_loader)):
        tgt_img_var = Variable(tgt_img.cuda(), volatile=True)
        ref_past_img_var = Variable(ref_img_past.cuda(), volatile=True)
        ref_img_var = Variable(ref_img.cuda(), volatile=True)
        flow_gt_var = Variable(flow_gt.cuda(), volatile=True)

        if type(flow_net).__name__ == 'Back2Future':
            flow_fwd = flow_net(ref_past_img_var, tgt_img_var, ref_img_var)
        else:
            flow_fwd = flow_net(tgt_img_var, ref_img_var)

        data_shape = tgt_img.cpu().numpy().shape

        margin = 0
        if len(calib) > 0:
            margin = int(disp_gt.max())

        random_x = args.fixed_loc_x
        random_y = args.fixed_loc_y
        if args.whole_img == 0:
            if args.patch_type == 'circle':
                patch_full, mask_full, _, random_x, random_y, _ = circle_transform(
                    patch,
                    mask,
                    patch.copy(),
                    data_shape,
                    patch_shape,
                    margin,
                    norotate=args.norotate,
                    fixed_loc=(random_x, random_y))
            elif args.patch_type == 'square':
                patch_full, mask_full, _, _, _ = square_transform(
                    patch,
                    mask,
                    patch.copy(),
                    data_shape,
                    patch_shape,
                    norotate=args.norotate)
            patch_full, mask_full = torch.FloatTensor(
                patch_full), torch.FloatTensor(mask_full)
        else:
            patch_full, mask_full = torch.FloatTensor(
                patch), torch.FloatTensor(mask)

        patch_full, mask_full = patch_full.cuda(), mask_full.cuda()
        patch_var, mask_var = Variable(patch_full), Variable(mask_full)

        patch_var_future = patch_var_past = patch_var
        mask_var_future = mask_var_past = mask_var

        # adverserial flow
        bt, _, h_gt, w_gt = flow_gt_var.shape
        forward_patch_flow = Variable(torch.cat((torch.zeros(
            (bt, 2, h_gt, w_gt)), torch.ones((bt, 1, h_gt, w_gt))), 1).cuda(),
                                      volatile=True)

        # project patch into 3D scene
        if len(calib) > 0:
            # #################################### ONLY WORKS WITH BATCH SIZE 1 ####################################
            imu2vel = calib['imu2vel']["RT"][0].numpy()
            imu2cam = calib['P_imu_cam'][0].numpy()
            imu2img = calib['P_imu_img'][0].numpy()

            pose_past = poses[0][0].numpy()
            pose_ref = poses[1][0].numpy()
            inv_pose_ref = inv(pose_ref)
            pose_fut = poses[2][0].numpy()

            # get point in IMU
            patch_disp = disp_gt[0, random_y:random_y + patch_shape[-2],
                                 random_x:random_x + patch_shape[-1]]
            valid = (patch_disp > 0)
            # set to object or free space disparity
            if False and args.fixed_loc_x > 0 and args.fixed_loc_y > 0:
                # disparity = patch_disp[valid].mean() - 3  # small correction for gps errors
                disparity = patch_disp[valid].mean()
            else:
                subset = patch_disp[valid]
                min_disp = 0
                if len(subset) > 0:
                    min_disp = subset.min()
                max_disp = disp_gt.max()

                disparity = np.random.uniform(min_disp, max_disp)  # disparity

            # print('Disp from ', min_disp, ' to ', max_disp)
            depth = (calib['cam']['focal_length_x'] *
                     calib['cam']['baseline'] / disparity)
            p_cam0 = np.array([[0], [0], [0], [1]])
            p_cam0[0] = depth * (
                random_x - calib['cam']['cx']) / calib['cam']['focal_length_x']
            p_cam0[1] = depth * (
                random_y - calib['cam']['cy']) / calib['cam']['focal_length_y']
            p_cam0[2] = depth

            # transform
            T_p_cam0 = np.eye(4)
            T_p_cam0[0:4, 3:4] = p_cam0

            # transformation to generate patch points
            patch_size = -0.25
            pts = np.array([[0, 0, 0, 1], [0, patch_size, 0, 1],
                            [patch_size, 0, 0, 1],
                            [patch_size, patch_size, 0, 1]]).T
            pts = inv(imu2cam).dot(T_p_cam0.dot(pts))

            # get points in reference image
            pts_src = pose_ref.dot(pts)
            pts_src = imu2img.dot(pts_src)
            pts_src = pts_src[:3, :] / pts_src[2:3, :].repeat(3, 0)

            # get points in past image
            pts_past = pose_past.dot(pts)
            pts_past = imu2img.dot(pts_past)
            pts_past = pts_past[:3, :] / pts_past[2:3, :].repeat(3, 0)

            # get points in future image
            pts_fut = pose_fut.dot(pts)
            pts_fut = imu2img.dot(pts_fut)
            pts_fut = pts_fut[:3, :] / pts_fut[2:3, :].repeat(3, 0)

            # find homography between points
            H_past, _ = cv2.findHomography(pts_src.T, pts_past.T, cv2.RANSAC)
            H_fut, _ = cv2.findHomography(pts_src.T, pts_fut.T, cv2.RANSAC)

            # import pdb; pdb.set_trace()
            refMtrx = torch.from_numpy(H_fut).float().cuda()
            refMtrx = refMtrx.repeat(args.batch_size, 1, 1)
            # get pixel origins
            X, Y = np.meshgrid(np.arange(flow_loader_w),
                               np.arange(flow_loader_h))
            X, Y = X.flatten(), Y.flatten()
            XYhom = np.stack([X, Y, np.ones_like(X)], axis=1).T
            XYhom = np.tile(XYhom, [args.batch_size, 1, 1]).astype(np.float32)
            XYhom = torch.from_numpy(XYhom).cuda()
            XHom, YHom, Zom = torch.unbind(XYhom, dim=1)
            XHom = XHom.resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            YHom = YHom.resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            # warp the canonical coordinates
            XYwarpHom = refMtrx.matmul(XYhom)
            XwarpHom, YwarpHom, ZwarpHom = torch.unbind(XYwarpHom, dim=1)
            Xwarp = (XwarpHom / (ZwarpHom + 1e-8)).resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            Ywarp = (YwarpHom / (ZwarpHom + 1e-8)).resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            # get forward flow
            u = (XHom - Xwarp).unsqueeze(1)
            v = (YHom - Ywarp).unsqueeze(1)
            flow = torch.cat((u, v), 1)
            flow = nn.functional.upsample(flow,
                                          size=(h_gt, w_gt),
                                          mode='bilinear')
            flow[:, 0, :, :] = flow[:, 0, :, :] * (w_gt / flow_loader_w)
            flow[:, 1, :, :] = flow[:, 1, :, :] * (h_gt / flow_loader_h)
            forward_patch_flow[:, :2, :, :] = flow
            # get grid for resampling
            Xwarp = 2 * ((Xwarp / (flow_loader_w - 1)) - 0.5)
            Ywarp = 2 * ((Ywarp / (flow_loader_h - 1)) - 0.5)
            grid = torch.stack([Xwarp, Ywarp], dim=-1)
            # sampling with bilinear interpolation
            patch_var_future = torch.nn.functional.grid_sample(patch_var,
                                                               grid,
                                                               mode="bilinear")
            mask_var_future = torch.nn.functional.grid_sample(mask_var,
                                                              grid,
                                                              mode="bilinear")

            # use past homography
            refMtrxP = torch.from_numpy(H_past).float().cuda()
            refMtrx = refMtrx.repeat(args.batch_size, 1, 1)
            # warp the canonical coordinates
            XYwarpHomP = refMtrxP.matmul(XYhom)
            XwarpHomP, YwarpHomP, ZwarpHomP = torch.unbind(XYwarpHomP, dim=1)
            XwarpP = (XwarpHomP / (ZwarpHomP + 1e-8)).resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            YwarpP = (YwarpHomP / (ZwarpHomP + 1e-8)).resize_(
                (args.batch_size, flow_loader_h, flow_loader_w))
            # get grid for resampling
            XwarpP = 2 * ((XwarpP / (flow_loader_w - 1)) - 0.5)
            YwarpP = 2 * ((YwarpP / (flow_loader_h - 1)) - 0.5)
            gridP = torch.stack([XwarpP, YwarpP], dim=-1)
            # sampling with bilinear interpolation
            patch_var_past = torch.nn.functional.grid_sample(patch_var,
                                                             gridP,
                                                             mode="bilinear")
            mask_var_past = torch.nn.functional.grid_sample(mask_var,
                                                            gridP,
                                                            mode="bilinear")

        adv_tgt_img_var = torch.mul(
            (1 - mask_var), tgt_img_var) + torch.mul(mask_var, patch_var)
        adv_ref_past_img_var = torch.mul(
            (1 - mask_var_past), ref_past_img_var) + torch.mul(
                mask_var_past, patch_var_past)
        adv_ref_img_var = torch.mul(
            (1 - mask_var_future), ref_img_var) + torch.mul(
                mask_var_future, patch_var_future)

        adv_tgt_img_var = torch.clamp(adv_tgt_img_var, -1, 1)
        adv_ref_past_img_var = torch.clamp(adv_ref_past_img_var, -1, 1)
        adv_ref_img_var = torch.clamp(adv_ref_img_var, -1, 1)

        if type(flow_net).__name__ == 'Back2Future':
            adv_flow_fwd = flow_net(adv_ref_past_img_var, adv_tgt_img_var,
                                    adv_ref_img_var)
        else:
            adv_flow_fwd = flow_net(adv_tgt_img_var, adv_ref_img_var)

        # set patch to zero flow!
        mask_var_res = nn.functional.upsample(mask_var,
                                              size=(h_gt, w_gt),
                                              mode='bilinear')

        # Ignore patch motion if set!
        if args.ignore_mask_flow:
            forward_patch_flow = Variable(torch.cat((torch.zeros(
                (bt, 2, h_gt, w_gt)), torch.zeros((bt, 1, h_gt, w_gt))),
                                                    1).cuda(),
                                          volatile=True)

        flow_gt_var_adv = torch.mul(
            (1 - mask_var_res), flow_gt_var) + torch.mul(
                mask_var_res, forward_patch_flow)

        # import pdb; pdb.set_trace()
        epe = compute_epe(gt=flow_gt_var, pred=flow_fwd)
        adv_epe = compute_epe(gt=flow_gt_var_adv, pred=adv_flow_fwd)
        cos_sim = compute_cossim(flow_gt_var, flow_fwd)
        adv_cos_sim = compute_cossim(flow_gt_var_adv, adv_flow_fwd)

        errors.update([epe, adv_epe, cos_sim, adv_cos_sim])

        if i % 1 == 0:
            index = i  #int(i//10)
            imgs = normalize([tgt_img] + [ref_img_past] + [ref_img])
            norm_tgt_img = imgs[0]
            norm_ref_img_past = imgs[1]
            norm_ref_img = imgs[2]

            patch_cpu = patch_var.data[0].cpu()
            mask_cpu = mask_var.data[0].cpu()

            adv_norm_tgt_img = normalize(
                adv_tgt_img_var.data.cpu()
            )  #torch.mul((1-mask_cpu), norm_tgt_img) + torch.mul(mask_cpu, patch_cpu)
            adv_norm_ref_img_past = normalize(
                adv_ref_past_img_var.data.cpu()
            )  # torch.mul((1-mask_cpu), norm_ref_img_past) + torch.mul(mask_cpu, patch_cpu)
            adv_norm_ref_img = normalize(
                adv_ref_img_var.data.cpu()
            )  #torch.mul((1-mask_cpu), norm_ref_img) + torch.mul(mask_cpu, patch_cpu)

            output_writer.add_image(
                'val flow Input',
                transpose_image(tensor2array(norm_tgt_img[0])), 0)
            flow_to_show = flow_gt[0][:2, :, :].cpu()
            output_writer.add_image(
                'val target Flow',
                transpose_image(flow_to_image(tensor2array(flow_to_show))), 0)

            # set flow to zero
            # zero_flow = Variable(torch.zeros(flow_fwd.shape).cuda(), volatile=True)
            # flow_fwd_masked = torch.mul((1-mask_var[:,:2,:,:]), flow_fwd) + torch.mul(mask_var[:,:2,:,:], zero_flow)
            flow_fwd_masked = flow_fwd

            # get ground truth flow
            val_GT_adv = flow_gt_var_adv.data[0].cpu().numpy().transpose(
                1, 2, 0)
            # val_GT_adv = interp_gt_flow(val_GT_adv[:,:,:2], val_GT_adv[:,:,2])
            val_GT_adv = cv2.resize(val_GT_adv, (flow_loader_w, flow_loader_h),
                                    interpolation=cv2.INTER_NEAREST)
            val_GT_adv[:, :, 0] = val_GT_adv[:, :, 0] * (flow_loader_w / w_gt)
            val_GT_adv[:, :, 1] = val_GT_adv[:, :, 1] * (flow_loader_h / h_gt)

            # gt normalization for visualization
            u = val_GT_adv[:, :, 0]
            v = val_GT_adv[:, :, 1]
            idxUnknow = (abs(u) > 1e7) | (abs(v) > 1e7)
            u[idxUnknow] = 0
            v[idxUnknow] = 0
            rad = np.sqrt(u**2 + v**2)
            maxrad = np.max(rad)

            val_GT_adv_Output = flow_to_image(val_GT_adv, maxrad)
            val_GT_adv_Output = cv2.erode(val_GT_adv_Output,
                                          np.ones((3, 3), np.uint8),
                                          iterations=1)  # make points thicker
            val_GT_adv_Output = transpose_image(val_GT_adv_Output) / 255.
            val_Flow_Output = transpose_image(
                flow_to_image(tensor2array(flow_fwd.data[0].cpu()),
                              maxrad)) / 255.
            val_adv_Flow_Output = transpose_image(
                flow_to_image(tensor2array(adv_flow_fwd.data[0].cpu()),
                              maxrad)) / 255.
            val_Diff_Flow_Output = transpose_image(
                flow_to_image(
                    tensor2array(
                        (adv_flow_fwd - flow_fwd_masked).data[0].cpu()),
                    maxrad)) / 255.

            val_tgt_image = transpose_image(tensor2array(norm_tgt_img[0]))
            val_ref_image = transpose_image(tensor2array(norm_ref_img[0]))
            val_adv_tgt_image = transpose_image(
                tensor2array(adv_norm_tgt_img[0]))
            val_adv_ref_image_past = transpose_image(
                tensor2array(adv_norm_ref_img_past[0]))
            val_adv_ref_image = transpose_image(
                tensor2array(adv_norm_ref_img[0]))
            val_patch = transpose_image(tensor2array(patch_var.data.cpu()[0]))
            # print(adv_norm_tgt_img.shape)
            # print(flow_fwd.data[0].cpu().shape)

            # if type(flow_net).__name__ == 'Back2Future':
            #     val_output_viz = np.concatenate((val_adv_ref_image_past, val_adv_tgt_image, val_adv_ref_image, val_Flow_Output, val_adv_Flow_Output, val_Diff_Flow_Output), 2)
            # else:
            # val_output_viz = np.concatenate((val_adv_tgt_image, val_adv_ref_image, val_Flow_Output, val_adv_Flow_Output, val_Diff_Flow_Output, val_GT_adv_Output), 2)
            val_output_viz = np.concatenate(
                (val_ref_image, val_adv_ref_image, val_Flow_Output,
                 val_adv_Flow_Output, val_Diff_Flow_Output, val_GT_adv_Output),
                2)
            val_output_viz_im = Image.fromarray(
                (255 * val_output_viz.transpose(1, 2, 0)).astype('uint8'))
            val_output_viz_im.save(args.save_path / args.name + 'viz' +
                                   str(i).zfill(3) + '.jpg')
            output_writer.add_image('val Output viz {}'.format(index),
                                    val_output_viz, 0)

            #val_output_viz = np.vstack((val_Flow_Output, val_adv_Flow_Output, val_Diff_Flow_Output, val_adv_tgt_image, val_adv_ref_image))
            #scipy.misc.imsave('outfile.jpg', os.path.join(output_vis_dir, 'vis_{}.png'.format(index)))

            result_scene_file.write(
                "{:10d}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}\n".format(
                    i, epe, adv_epe, cos_sim, adv_cos_sim))

    print("{:>10}, {:>10}, {:>10}, {:>10}".format(*error_names))
    print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(*errors.avg))
    result_file.write(
        "{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}\n".format(*errors.avg))
    result_scene_file.write(
        "{:>10}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}\n".format(
            *(["avg"] + errors.avg)))

    result_file.close()
    result_scene_file.close()
コード例 #8
0
for i in range(len(im_all)):
    im_all[i] = cv2.resize(im_all[i], (W_, H_))

for _i, _inputs in enumerate(im_all):
    im_all[_i] = im_all[_i][:, :, ::-1]
    im_all[_i] = 1.0 * im_all[_i] / 255.0

    im_all[_i] = np.transpose(im_all[_i], (2, 0, 1))
    im_all[_i] = torch.from_numpy(im_all[_i])
    im_all[_i] = im_all[_i].expand(1, im_all[_i].size()[0],
                                   im_all[_i].size()[1], im_all[_i].size()[2])
    im_all[_i] = im_all[_i].float()

im_all = torch.autograd.Variable(torch.cat(im_all, 1).cuda(), volatile=True)

net = models.pwc_dc_net()
net = net.cuda()
net.eval()

flo = net(im_all)
flo = flo[0] * 20.0
flo = flo.cpu().data.numpy()

# scale the flow back to the input size
flo = np.swapaxes(np.swapaxes(flo, 0, 1), 1, 2)
u_ = cv2.resize(flo[:, :, 0], (W, H))
v_ = cv2.resize(flo[:, :, 1], (W, H))
u_ *= W / float(W_)
v_ *= H / float(H_)
flo = np.dstack((u_, v_))