Пример #1
0
def main():
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if not os.path.exists(opt.output_folder):
        os.makedirs(opt.output_folder)
    num_points = 1000  #number of points on the input pointcloud
    num_objects = 21
    estimator = PoseNetGlobal(num_points=num_points, num_obj=num_objects)
    estimator.cuda()
    estimator.load_state_dict(torch.load(opt.weights))
    output_format = [
        otypes.OBJECT_LABEL, otypes.QUATERNION, otypes.IMAGE_CROPPED,
        otypes.DEPTH_POINTS_MASKED_AND_INDEXES
    ]
    estimator.eval()
    pbar = trange(2, num_objects + 1)
    for cls in pbar:
        dataset = YCBDataset(
            opt.dataset_root,
            mode='grid',
            object_list=[cls],
            output_data=output_format,
            resample_on_error=True,
            preprocessors=[
                YCBOcclusionAugmentor(opt.dataset_root),
                ColorJitter(),
            ],
            postprocessors=[ImageNormalizer(),
                            PointShifter()],
            image_size=[640, 480],
            num_points=1000)

        classes = dataset.classes
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=opt.workers)
        pbar.set_description('Featurizing {}'.format(classes[cls]))

        for aug_idx in trange(opt.num_augmentations):
            for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
                if (len(data) == 0 or len(data[0]) == 0):
                    continue
                idx, quat, img, points, choose = data
                data_path = dataset.image_list[i]
                idx = idx - 1
                img = Variable(img).cuda()
                points = Variable(points).cuda()
                choose = Variable(choose).cuda()
                idx = Variable(idx).cuda()
                assert cls == data_path[1]
                assert cls - 1 == int(idx[0])
                feat, _ = estimator.globalFeature(img, points, choose, idx)
                output_filename = '{0}/{1}_{2}_{3}_feat.npz'.format(
                    opt.output_folder, data_path[0], classes[cls], aug_idx)
                os.makedirs(os.path.dirname(output_filename), exist_ok=True)
                np.savez(output_filename,
                         quat=to_np(quat)[0],
                         feat=to_np(feat)[0])
Пример #2
0
 def __getitem__(self, index):
     data = self.dataset[index]
     img = cv2.cvtColor(
         to_np(data[0]).transpose((1, 2, 0)).astype(np.uint8),
         cv2.COLOR_BGR2RGB)
     depth = to_np(data[1])
     path = '{}/data/{}-meta.mat'.format(self.dataset.dataset_root,
                                         self.dataset.getPath(index))
     meta_data = scio.loadmat(path)
     return img, depth, meta_data
Пример #3
0
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        #Z = input.detatch().numpy()
        with torch.no_grad():
            Z = to_np(input)
            #dF = bingham_dF(Z)
            # Not sure if this always prevents the NANs? Need to check
            #z_i = np.argsort(Z)
            dF = bingham_dF(np.sort(Z))
            dF = dF[np.argsort(np.argsort(Z))]
            #dF[z_i] = dF
            
            #for p in perms:
            #    p = list(p)
            #    dF = np.array(bingham_dF(Z[p]))[np.argsort(p)]
            #    if(not np.any(np.isnan(dF))):
            #        break

            if(np.any(np.isnan(dF))):
                print('BinghamConst: Gradient NaN')
                dF = np.zeros_like(dF)
        grad_input = grad_output.clone() * torch.as_tensor(dF, dtype=grad_output.dtype) 
        if(torch.cuda.is_available()):
            grad_input = grad_input.cuda()
        #grad_input *= torch.as_tensor(dF, dtype=grad_output.dtype)
        return grad_input 
Пример #4
0
 def forward(ctx, input):
     ctx.save_for_backward(input)
     #F = bingham_F(input.detach().numpy().astype(np.double))
     with torch.no_grad():
         F = bingham_F(to_np(input).astype(np.double))
     
     return torch.as_tensor(F, dtype=input.dtype)
Пример #5
0
def evaluateYCBEvery(estimator, data_prefix, sigma, return_exponent=False):
    img = Image.open('{}-color.png'.format(data_prefix))
    depth = np.array(Image.open('{}-depth.png'.format(data_prefix)))
    pose_meta = scio.loadmat('{}-meta.mat'.format(data_prefix))
    posecnn_meta = scio.loadmat('{}-posecnn.mat'.format(data_prefix))
    object_classes = set(pose_meta['cls_indexes'].flat) & \
                        set(posecnn_meta['rois'][:,1:2].flatten().astype(int))

    likelihood = {}
    for cls_idx in object_classes:
        obj_idx = np.nonzero(
            posecnn_meta['rois'][:, 1].astype(int) == cls_idx)[0][0]
        mask, bbox, object_label = preprocessPoseCNNMetaData(
            posecnn_meta, obj_idx)
        pred_r, _, pred_c = estimator(img,
                                      depth,
                                      mask,
                                      bbox,
                                      object_label,
                                      return_all=True)[3:6]
        pred_r = pred_r[0, :, [1, 2, 3, 0]]
        pred_r /= torch.norm(pred_r, dim=1).view(-1, 1)
        bingham_interp = BinghamInterpolation(vertices=to_np(pred_r.detach()),
                                              values=pred_c.detach(),
                                              sigma=sigma)

        q_gt, _ = getYCBGroundtruth(pose_meta, posecnn_meta, obj_idx)
        q_gt = torch.Tensor(q_gt).unsqueeze(0)
        if (torch.cuda.is_available()):
            q_gt = q_gt.cuda()
        likelihood[cls_idx] = (bingham_interp(q_gt, return_exponent),
                               bingham_interp.values)

    return likelihood
Пример #6
0
def evaluateYCBMax(estimator, data_prefix, sigma, return_exponent=False):
    img = Image.open('{}-color.png'.format(data_prefix))
    depth = np.array(Image.open('{}-depth.png'.format(data_prefix)))
    pose_meta = scio.loadmat('{}-meta.mat'.format(data_prefix))
    posecnn_meta = scio.loadmat('{}-posecnn.mat'.format(data_prefix))
    object_classes = set(pose_meta['cls_indexes'].flat) & \
                        set(posecnn_meta['rois'][:,1:2].flatten().astype(int))

    likelihood = {}
    for cls_idx in object_classes:
        obj_idx = np.nonzero(
            posecnn_meta['rois'][:, 1].astype(int) == cls_idx)[0][0]
        mask, bbox, object_label = preprocessPoseCNNMetaData(
            posecnn_meta, obj_idx)
        q_est, t_est = estimator(img, depth, mask, bbox, object_label)
        q_est = q_est[[1, 2, 3, 0]]
        q_est /= q_est.norm()
        bingham_interp = BinghamInterpolation(vertices=[to_np(q_est)],
                                              values=torch.Tensor([1]),
                                              sigma=sigma)

        q_gt, _ = getYCBGroundtruth(pose_meta, posecnn_meta, obj_idx)
        q_gt = torch.Tensor(q_gt).unsqueeze(0)
        if (torch.cuda.is_available()):
            q_gt = q_gt.cuda()
        likelihood[cls_idx] = (bingham_interp(q_gt, return_exponent),
                               bingham_interp.values)
        #likelihood[cls_idx] = binghamInterp(q_gt)

    return likelihood
Пример #7
0
def main():
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if not os.path.exists(opt.output_folder):
        os.makedirs(opt.output_folder)
    num_points = 1000  #number of points on the input pointcloud
    num_objects = 21
    estimator = PoseNetGlobal(num_points=num_points, num_obj=num_objects)
    estimator.cuda()
    estimator.load_state_dict(torch.load(opt.weights))
    output_format = [
        otypes.QUATERNION, otypes.IMAGE_CROPPED,
        otypes.DEPTH_POINTS_MASKED_AND_INDEXES
    ]
    estimator.eval()
    for cls in trange(1, num_objects + 1):
        dataset = YCBDataset(opt.dataset_root,
                             mode=opt.mode,
                             object_list=[cls],
                             output_data=output_format,
                             postprocessor=ImageNormalizer,
                             image_size=[640, 480],
                             num_points=1000)
        classes = dataset.classes
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=opt.workers)

        for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
            if (len(data) == 0 or len(data[0]) == 0):
                continue
            quat, img, points, choose = data
            data_path = dataset.image_list[i]
            img = Variable(img).cuda()
            points = Variable(points).cuda()
            choose = Variable(choose).cuda()
            idx = Variable(torch.LongTensor(cls - 1)).cuda()
            assert cls == data_path[1]
            feat, _ = estimator.globalFeature(img, points, choose, idx)
            output_filename = '{0}/{1}_{2}_feat.npz'.format(
                opt.output_folder, data_path[0], classes[cls])
            os.makedirs(os.path.dirname(output_filename), exist_ok=True)
            np.savez(output_filename, quat=to_np(quat)[0], feat=to_np(feat)[0])
Пример #8
0
def torch2Img(img, normalized = False):
    disp_img = to_np(img)
    if len(disp_img.shape) == 4:
        disp_img = disp_img[0]
    disp_img = disp_img.transpose((1,2,0))
    if(normalized):
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225])
        disp_img = disp_img * std + mean
    return disp_img
Пример #9
0
def unique_tol(quats, tol=0.001, **kwargs):
    n = quats.shape[0]
    d = tensorAngularAllDiffs(quats, quats) < tol
    idx = torch.arange(n)
    if torch.cuda.is_available():
        idx = idx.cuda()
    idx_all = torch.einsum("ab,b->ab", (d.float(), reversed(idx + 1).float()))
    indices = torch.argmax(idx_all, 1, keepdim=True).flatten()
    u_idxs = indices == idx
    quats_unique = quats[u_idxs, :]

    if (len(kwargs)):
        edge_idxs = [1]
        while (len(edge_idxs)):
            ret = np.unique(to_np(indices), **kwargs)
            edge_idxs = set(ret[0]) - set(
                to_np(torch.nonzero(u_idxs.flatten()).flatten()))
            for j in edge_idxs:
                indices[indices == j] = indices[j]

        #quats_unique = quats[ret[0],:]
        return (quats_unique, *ret[1:])
    else:
        return quats_unique
Пример #10
0
def subRandomSigmaSearchEvery(estimator,
                              dataset_root,
                              file_list,
                              sigma_lims=[0, 20],
                              num_samples=100):
    max_likelihood = {obj: -np.inf for obj in range(1, 22)}
    max_sigma = {obj: None for obj in range(1, 22)}

    sigmas = subrandom(num_samples) * (sigma_lims[1] -
                                       sigma_lims[0]) + sigma_lims[0]
    mean_likelihoods = []
    for j, sigma in enumerate(sigmas):
        eval_func = partial(evaluateYCBEvery,
                            estimator,
                            sigma=sigma,
                            return_exponent=True)
        likelihoods = evaluateYCBDataset(eval_func, dataset_root, file_list, 7)
        mean_likelihood = {obj: 0 for obj in range(1, 22)}
        for k, v in likelihoods.items():
            l_exp, w = zip(*v)
            l_exp = torch.cat(l_exp)
            w = torch.stack(w).squeeze()
            mean_likelihood[k] = to_np(
                torch.mean(
                    logSumExp(
                        l_exp, w,
                        bingham_const(-torch.tensor(sigma).repeat(3)).float() /
                        2)))
        print("{}: Mean Log Likelihood of Sigma {}: {}".format(
            j, sigma, mean_likelihood))
        mean_likelihoods.append(mean_likelihood)
        for k, v in mean_likelihood.items():
            if (v > max_likelihood[k]):
                max_likelihood[k] = v
                max_sigma[k] = sigma
                import IPython
                IPython.embed()
                print(
                    "Max Sigma for object {} after {} samples: {} ({})".format(
                        k, j + 1, sigma, max_likelihood[k]))
                np.savez('bingham_every_obj_{}.npz'.format(k),
                         likelihoods=likelihoods[k],
                         sigma=sigma)

    np.savez('bingham_every_sigmas_indv.npz',
             mean_likelihoods=mean_likelihoods,
             sigmas=sigmas)
Пример #11
0
def unprocessImages(imgs,
                    norm_mean=np.array([0.485, 0.456, 0.406]),
                    norm_std=np.array([0.229, 0.224, 0.225])):
    imgs = np.transpose(to_np(imgs), (0, 2, 3, 1))
    imgs = np.minimum(np.maximum(imgs * norm_std + norm_mean, 0.0), 1.0) * 255
    return imgs
Пример #12
0
def main():
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if not os.path.exists(opt.output_folder):
        os.makedirs(opt.output_folder)
    num_points = 1000  #number of points on the input pointcloud
    num_objects = 21
    if (opt.object_indices is None):
        opt.object_indices = list(range(1, num_objects + 1))
    estimator = PoseNet(num_points=num_points, num_obj=num_objects)
    estimator.cuda()
    estimator.load_state_dict(torch.load(opt.weights))
    output_format = [
        otypes.OBJECT_LABEL, otypes.QUATERNION, otypes.IMAGE_CROPPED,
        otypes.DEPTH_POINTS_MASKED_AND_INDEXES
    ]
    estimator.eval()
    with torch.no_grad():
        with std_out_err_redirect_tqdm() as orig_stdout:
            pbar = tqdm(opt.object_indices,
                        file=orig_stdout,
                        dynamic_ncols=True)
            for cls in pbar:
                preprocessors = [InplaneRotator(theta=np.pi / 2)]
                postprocessors = [ImageNormalizer()]
                if (opt.num_augmentations > 0):
                    preprocessors.extend([
                        YCBOcclusionAugmentor(opt.dataset_root),
                        ColorJitter(),
                    ])
                    postprocessors.append(PointShifter())

                dataset = YCBDataset(opt.dataset_root,
                                     mode=opt.dataset_mode,
                                     object_list=[cls],
                                     output_data=output_format,
                                     resample_on_error=False,
                                     add_syn_background=opt.add_syn_background,
                                     add_syn_noise=opt.add_syn_background,
                                     preprocessors=preprocessors,
                                     postprocessors=postprocessors,
                                     image_size=[640, 480],
                                     num_points=1000)

                classes = dataset.classes
                dataloader = torch.utils.data.DataLoader(
                    dataset,
                    batch_size=1,
                    shuffle=False,
                    num_workers=opt.workers)
                pbar.set_description('Featurizing {}'.format(classes[cls]))
                if (opt.num_augmentations > 0):
                    pbar_aug = trange(opt.start_index,
                                      opt.num_augmentations,
                                      file=orig_stdout,
                                      dynamic_ncols=True)
                else:
                    pbar_aug = [None]
                for aug_idx in pbar_aug:
                    pbar_save = tqdm(enumerate(dataloader),
                                     total=len(dataloader),
                                     file=orig_stdout,
                                     dynamic_ncols=True)
                    for i, data in pbar_save:
                        if (len(data) == 0 or len(data[0]) == 0):
                            continue
                        idx, quat, img, points, choose = data
                        data_path = dataset.image_list[i]
                        idx = idx - 1
                        img = Variable(img).cuda()
                        points = Variable(points).cuda()
                        choose = Variable(choose).cuda()
                        idx = Variable(idx).cuda()
                        assert cls == data_path[1]
                        assert cls - 1 == int(idx[0])
                        pred_r, pred_t, pred_c, emb, feat, feat_global = estimator.allFeatures(
                            img, points, choose, idx)
                        if (opt.num_augmentations > 0):
                            output_filename = '{0}/data/{1}_{2}_{3}_feat.npz'.format(
                                opt.output_folder, data_path[0], classes[cls],
                                aug_idx)
                        else:
                            output_filename = '{0}/data/{1}_{2}_feat.npz'.format(
                                opt.output_folder, data_path[0], classes[cls])
                        #pbar_save.set_description(output_filename)
                        os.makedirs(os.path.dirname(output_filename),
                                    exist_ok=True)
                        how_max, which_max = torch.max(pred_c, 1)
                        max_feat = feat[0, :, which_max[0]].view(-1)
                        pred_t = pred_t[0, :]
                        pred_q = pred_r[0, :, [1, 2, 3, 0]]
                        pred_q /= torch.norm(pred_q, dim=1).view(-1, 1)
                        np.savez(
                            output_filename,
                            max_c=to_np(how_max),
                            max_q=to_np(pred_q[which_max.item()]),
                            max_t=to_np(pred_t[which_max.item()]),
                            pred_c=to_np(pred_c),
                            pred_q=to_np(pred_q),
                            pred_t=to_np(pred_t),
                            quat=to_np(quat)[0],
                            feat=to_np(max_feat),
                            #feat_all = to_np(feat)[0].T, i
                            feat_global=to_np(feat_global)[0],
                            #max_confidence = to_np(how_max),
                            #confidence = to_np(pred_c)[0],
                        )
Пример #13
0
def toPoseCNNImage(img):
    return cv2.cvtColor(
        to_np(img).transpose((1, 2, 0)).astype(np.uint8), cv2.COLOR_BGR2RGB)
Пример #14
0
def main():
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if not os.path.exists(opt.output_folder):
        os.makedirs(opt.output_folder)
    num_points = 1000  #number of points on the input pointcloud
    num_objects = 21
    if (opt.object_indices is None):
        opt.object_indices = list(range(1, num_objects + 1))
    estimator = PoseCNNFeaturizer()

    output_format = [otypes.IMAGE, otypes.DEPTH_IMAGE]

    with std_out_err_redirect_tqdm() as orig_stdout:
        preprocessors = []
        postprocessors = []
        if (opt.num_augmentations > 0):
            preprocessors.extend([
                YCBOcclusionAugmentor(opt.dataset_root),
                ColorJitter(),
            ])
            postprocessors.append(PointShifter())

        dataset = YCBDataset(opt.dataset_root,
                             mode=opt.dataset_mode,
                             object_list=opt.object_indices,
                             output_data=output_format,
                             resample_on_error=False,
                             preprocessors=preprocessors,
                             postprocessors=postprocessors,
                             image_size=[640, 480],
                             num_points=1000)
        _, u_idxs = np.unique(zip(*dataset.image_list)[0], return_index=True)
        dataset.image_list = np.array(dataset.image_list)[u_idxs].tolist()
        dataset.list_obj = np.array(dataset.list_obj)[u_idxs].tolist()

        classes = dataset.classes
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=opt.workers)
        #pbar.set_description('Featurizing {}'.format(classes[cls]))

        if (opt.num_augmentations > 0):
            pbar_aug = trange(opt.start_index,
                              opt.num_augmentations,
                              file=orig_stdout,
                              dynamic_ncols=True)
        else:
            pbar_aug = [None]
        for aug_idx in pbar_aug:

            pbar_save = tqdm(enumerate(dataloader),
                             total=len(dataloader),
                             file=orig_stdout,
                             dynamic_ncols=True)
            for i, data in pbar_save:
                if (len(data) == 0 or len(data[0]) == 0):
                    continue
                img, depth = data
                img = toPoseCNNImage(img[0])
                depth = to_np(depth[0])
                data_path = dataset.image_list[i]

                path = '{}/data/{}-meta.mat'.format(dataset.dataset_root,
                                                    dataset.getPath(i))
                meta_data = scio.loadmat(path)
                try:
                    seg = estimator(img, depth, meta_data)
                except Exception as e:
                    print(e)
                    continue
                for pose_idx, cls in enumerate(seg['rois'][:, 1]):
                    cls = int(cls)
                    quat = getObjectGTQuaternion(meta_data, cls)
                    feat = seg['feats'][pose_idx]
                    fc6 = seg['fc6'][pose_idx]
                    if (opt.num_augmentations > 0):
                        output_filename = '{0}/data/{1}_{2}_{3}_feat.npz'.format(
                            opt.output_folder, data_path[0], classes[cls],
                            aug_idx)
                    else:
                        output_filename = '{0}/data/{1}_{2}_feat.npz'.format(
                            opt.output_folder, data_path[0], classes[cls])

                    #pbar_save.set_description(output_filename)
                    if not os.path.exists(os.path.dirname(output_filename)):
                        os.makedirs(os.path.dirname(output_filename))
                    np.savez(output_filename, quat=quat, feat=feat, fc6=fc6)