Exemplo n.º 1
0
  q = Quaternion()
  q.setToRandom()
  R_gt = q.toRot().R

  print "q True: ", q.q, np.sqrt((q.q**2).sum())
  print R_gt

  path = ["../data/middle_cRmf.csv", "../data/left_cRmf.csv"]
  if path is None:
    vMFs_A = [vMF(np.array([1.,0.,0.]), 100.), 
        vMF(np.array([0.,1.,0.]), 1000.),
        vMF(np.array([0.,0.,1.]), 10000.)]
    vMFs_B = [vMF(R_gt.dot(np.array([1.,0.,0.])), 100.),
        vMF(R_gt.dot(np.array([0.,1.,0.])), 1000.), 
        vMF(R_gt.dot(np.array([0.,0.,1.])), 10000.)]
    vMFMM_A = vMFMM(np.array([0.3, 0.3, 0.4]), vMFs_A)
    vMFMM_B = vMFMM(np.array([0.3, 0.3, 0.4]), vMFs_B)
  else:
    vMFMM_A = LoadvMFMM(path[0])
    vMFMM_B = LoadvMFMM(path[1])

  gd = GradientDescent(vMFMM_A, vMFMM_B)

  fig = plt.figure()
  plt.ylabel("Sum over angular deviation between closest vMF means.")
  for i in range(1):
    q.setToRandom()
    R0 = q.toRot().R
#    R0 = np.copy(R_gt)
#    R0 = np.eye(3)
    figm = mlab.figure(bgcolor=(1,1,1))
Exemplo n.º 2
0
      "../data/boardUp_rgb"]


  path = ["../data/middleStraightOn_cRmf.csv",
      "../data/rightStraightOn_cRmf.csv"]
  pathRGBD = ["../data/middleStraightOn_rgb",
      "../data/rightStraightOn_rgb"]

  path = ["../data/middleL50_cRmf.csv", "../data/leftL50_cRmf.csv"]
  pathRGBD = ["../data/middle_rgb", "../data/left_rgb"]

  if path is None:
    vMFs_A = [vMF(np.array([1.,0.,0.]), 1.), vMF(np.array([0.,1.,0.]), 10.)]
    vMFs_B = [vMF(R_gt.dot(np.array([1.,0.,0.])), 1.),
        vMF(R_gt.dot(np.array([0.,1.,0.])), 10.)]
    vMFMM_A = vMFMM(np.array([0.5, 0.5]), vMFs_A)
    vMFMM_B = vMFMM(np.array([0.5, 0.5]), vMFs_B)
  else:
    vMFMM_A = LoadvMFMM(path[0])
    vMFMM_B = LoadvMFMM(path[1])

  rgbdA = RgbdFrame(540.) 
  rgbdA.load(pathRGBD[0])
  rgbdB = RgbdFrame(540.) 
  rgbdB.load(pathRGBD[1])

  tetras = s3.GetTetras(0)
  tetrahedra = s3.GetTetrahedra(0)

  maxIter = 200
  fig = plt.figure()
Exemplo n.º 3
0
def update_clutter_model(net, device_ids, compnet_type='vmf'):
    idir = 'background_images/'
    updated_models = torch.zeros((0, vc_num))
    if device_ids:
        updated_models = updated_models.cuda(device_ids[0])

    if compnet_type == 'vmf':
        occ_types = occ_types_vmf
    elif compnet_type == 'bernoulli':
        occ_types = occ_types_bern

    for j in range(len(occ_types)):
        occ_type = occ_types[j]
        with torch.no_grad():
            files = glob.glob(idir + '*' + occ_type + '.JPEG')
            clutter_feats = torch.zeros((0, vc_num))
            if device_ids:
                clutter_feats = clutter_feats.cuda(device_ids[0])
            for i in range(len(files)):
                file = files[i]
                img, _ = imgLoader(file, [[]],
                                   bool_resize_images=False,
                                   bool_square_images=False)
                if device_ids:
                    img = img.cuda(device_ids[0])

                feats = net.activation_layer(
                    net.conv1o1(
                        net.backbone(
                            img.reshape(1, img.shape[0], img.shape[1],
                                        img.shape[2]))))[0].transpose(1, 2)
                feats_reshape = torch.reshape(feats,
                                              [vc_num, -1]).transpose(0, 1)
                clutter_feats = torch.cat((clutter_feats, feats_reshape))

            mean_activation = torch.reshape(
                torch.sum(clutter_feats, dim=1), (-1, 1)
            ).repeat(
                [1, vc_num]
            )  #clutter_feats.sum().reshape(-1,1).numpy().repeat(512,axis=1)#torch.reshape(torch.sum(clutter_feats,axis=1),(-1,1)).repeat(512,axis=1)
            if compnet_type == 'bernoulli':
                boo = torch.sum(mean_activation, dim=1) != 0
                mean_vec = torch.mean(clutter_feats[boo] /
                                      mean_activation[boo],
                                      dim=0)
                updated_models = torch.cat(
                    (updated_models, mean_vec.reshape(1, -1)))
            else:
                if occ_type == '_white' or occ_type == '_noise':
                    mean_vec = torch.mean(
                        clutter_feats / mean_activation, dim=0
                    )  # F.normalize(torch.mean(clutter_feats,dim=0),p=1,dim=0)#
                    updated_models = torch.cat(
                        (updated_models, mean_vec.reshape(1, -1)))
                else:
                    nc = 5
                    model = vMFMM(nc, 'k++')
                    model.fit(clutter_feats.cpu().numpy(), 30.0, max_it=150)
                    mean_vec = torch.zeros(nc, clutter_feats.shape[1]).cuda(
                        device_ids[0])
                    clust_cnt = torch.zeros(nc)
                    for v in range(model.p.shape[0]):
                        assign = np.argmax(model.p[v])
                        mean_vec[assign] += clutter_feats[v]
                        clust_cnt[assign] += 1
                    mean_vec = (mean_vec.t() /
                                clust_cnt.cuda(device_ids[0])).t()
                    updated_models = torch.cat((updated_models, mean_vec))

    return updated_models
Exemplo n.º 4
0
    def get_clutter_model(self, compnet_type, vMF_kappa):
        idir = 'background_images_for_learning/'
        vc_num = self.conv1o1.weight.shape[0]

        updated_models = torch.zeros((0, vc_num))
        boo_gpu = (self.conv1o1.weight.device.type == 'cuda')
        gpu_id = self.conv1o1.weight.device.index
        if boo_gpu:
            updated_models = updated_models.cuda(gpu_id)

        if self.compnet_type == 'vmf':
            occ_types = occ_types_vmf
        elif self.compnet_type == 'bernoulli':
            occ_types = occ_types_bern

        for j in range(len(occ_types)):
            occ_type = occ_types[j]
            with torch.no_grad():
                files = glob.glob(idir + '*' + occ_type + '.JPEG')
                clutter_feats = torch.zeros((0, vc_num))
                if boo_gpu:
                    clutter_feats = clutter_feats.cuda(gpu_id)
                for i in range(len(files)):
                    file = files[i]
                    img, _ = imgLoader(file, [[]],
                                       bool_resize_images=False,
                                       bool_square_images=False)
                    if boo_gpu:
                        img = img.cuda(gpu_id)

                    feats = self.activation_layer(
                        self.conv1o1(
                            self.backbone(
                                img.reshape(1, img.shape[0], img.shape[1],
                                            img.shape[2]))))[0].transpose(
                                                1, 2)
                    feats_reshape = torch.reshape(feats,
                                                  [vc_num, -1]).transpose(
                                                      0, 1)
                    clutter_feats = torch.cat((clutter_feats, feats_reshape))

                mean_activation = torch.reshape(
                    torch.sum(clutter_feats, dim=1),
                    (-1, 1)).repeat([1, vc_num])
                if compnet_type == 'bernoulli':
                    boo = torch.sum(mean_activation, dim=1) != 0
                    mean_vec = torch.mean(clutter_feats[boo] /
                                          mean_activation[boo],
                                          dim=0)
                    updated_models = torch.cat(
                        (updated_models, mean_vec.reshape(1, -1)))
                else:
                    if (occ_type == '_white' or occ_type == '_noise'):
                        mean_vec = torch.mean(clutter_feats / mean_activation,
                                              dim=0)
                        updated_models = torch.cat(
                            (updated_models, mean_vec.reshape(1, -1)))
                    else:
                        nc = 5
                        model = vMFMM(nc, 'k++')
                        model.fit(clutter_feats.cpu().numpy(),
                                  vMF_kappa,
                                  max_it=150,
                                  tol=1e-10)
                        mean_vec = torch.zeros(
                            nc, clutter_feats.shape[1]).cuda(gpu_id)
                        mean_act = torch.zeros(
                            nc, clutter_feats.shape[1]).cuda(gpu_id)
                        clust_cnt = torch.zeros(nc)
                        for v in range(model.p.shape[0]):
                            assign = np.argmax(model.p[v])
                            mean_vec[assign] += clutter_feats[v]
                            clust_cnt[assign] += 1

                        mean_vec_final = torch.zeros(
                            sum(clust_cnt > 0),
                            clutter_feats.shape[1]).cuda(gpu_id)
                        cnt = 0
                        for v in range(mean_vec.shape[0]):
                            if clust_cnt[v] > 0:
                                mean_vec_final[cnt] = (
                                    mean_vec[v] /
                                    clust_cnt[v].cuda(gpu_id)).t()
                        updated_models = torch.cat(
                            (updated_models, mean_vec_final))

                        if torch.isnan(updated_models.min()):
                            print('ISNAN IN CLUTTER MODEL')

        return updated_models
Exemplo n.º 5
0
        plt.show()
#        fUfLoU2L2 = (1. + U - np.exp(2.*U) + U * np.exp(2.*U))/(2.*U**3*np.exp(U))
#        L2fUU2fLoU2L2 = (3+U-3*np.exp(2.*U) + U*np.exp(2.*U))/(2.*U*np.exp(U))

    s3 = S3Grid(0)
    print s3.tetra_levels
    R = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
    vMFs_A = [
        vMF(np.array([1., 0., 0.]), 10000.),
        vMF(np.array([0., 1., 0.]), 1000.)
    ]
    vMFs_B = [
        vMF(R.dot(np.array([1., 0., 0.])), 10000.),
        vMF(R.dot(np.array([0., 1., 0.])), 1000.)
    ]
    vMFMM_A = vMFMM(np.array([0.3, 0.7]), vMFs_A)
    vMFMM_B = vMFMM(np.array([0.3, 0.7]), vMFs_B)
    print R

    # switch ordeering to make compatible with Eigen
    #  vertices = np.copy(s3.vertices)
    #  s3.vertices[:,3] = vertices[:,0]
    #  s3.vertices[:,0] = vertices[:,3]
    tetras = s3.GetTetras(0)
    #  print tetras.shape

    i = 0
    print "-- tetrahedron: "
    print s3.vertices[tetras[i, 0], :]
    print s3.vertices[tetras[i, 1], :]
    print s3.vertices[tetras[i, 2], :]
Exemplo n.º 6
0
feat_set = np.zeros((featDim, 0))
# loc_set = np.zeros((5, 0), dtype='int')
for ii in range(file_num):
    print('loading file {0}'.format(ii))
    fname = Dict['cache_path'] + '{}.pickle'.format(ii)
    with open(fname, 'rb') as fh:
        res, _, _ = pickle.load(fh)
        feat_set = np.column_stack((feat_set, res))
        # loc_set = np.column_stack((loc_set, iloc.astype('int')))

print('all feat_set')
feat_set = feat_set.T
print(feat_set.shape)

model = vMFMM(cluster_num,
              'k++',
              tmp_dir='/export/home/qliu24/tmp/vMFMM/PASCAL/')
model.fit(feat_set, 30, max_it=150)

with open(Dict['Dictionary'], 'wb') as fh:
    pickle.dump([model.p, model.mu, model.pi], fh)

############## save examples ###################
# with open(Dict['file_list'], 'r') as fh:
#     image_path = [ff.strip() for ff in fh.readlines()]

# num = 50
# print('save top {0} images for each cluster'.format(num))
# example = [None for vc_i in range(cluster_num)]
# for vc_i in range(cluster_num):
#     patch_set = np.zeros(((Arf**2)*3, num)).astype('uint8')
Exemplo n.º 7
0
feat_set = np.zeros((featDim, 0))
# loc_set = np.zeros((5, 0), dtype='int')
for ii in range(file_num):
    print('loading file {0}'.format(ii))
    fname = Dict['cache_path'] + '{}.pickle'.format(ii)
    with open(fname, 'rb') as fh:
        res, _, _ = pickle.load(fh)
        feat_set = np.column_stack((feat_set, res))
        # loc_set = np.column_stack((loc_set, iloc.astype('int')))

print('all feat_set')
feat_set = feat_set.T
print(feat_set.shape)

model = vMFMM(cluster_num, 'k++', tmp_dir='/home/qing/tmp/vMFMM/PASCAL3D+')
model.fit(feat_set, 30, max_it=150)

with open(Dict['Dictionary'], 'wb') as fh:
    pickle.dump(model.mu, fh)

bins = 4
per_bin = cluster_num // bins + 1
for bb in range(bins):
    with open(Dict['Dictionary'].replace('.pickle', '_p{}.pickle'.format(bb)),
              'wb') as fh:
        pickle.dump(model.p[:, bb * per_bin:(bb + 1) * per_bin], fh)

############## save examples ###################
# with open(Dict['file_list'], 'r') as fh:
#     image_path = [ff.strip() for ff in fh.readlines()]
Exemplo n.º 8
0
feat_set = np.zeros((featDim, 0))
# loc_set = np.zeros((5, 0), dtype='int')
for ii in range(file_num):
    print('loading file {0}'.format(ii))
    fname = Dict['cache_path_sub']+'{}_set{}.pickle'.format(ii, subset_idx)
    # fname = Dict['cache_path']+'{}.pickle'.format(ii)
    with open(fname, 'rb') as fh:
        res, _ = pickle.load(fh)
        feat_set = np.column_stack((feat_set, res))
        # loc_set = np.column_stack((loc_set, iloc.astype('int')))

print('all feat_set')
feat_set = feat_set.T
print(feat_set.shape)

model = vMFMM(cluster_num, 'k++', tmp_dir='/export/home/qliu24/tmp/vMFMM/set{}/'.format(subset_idx))
model.fit(feat_set, 30, max_it=150)

with open(Dict['Dictionary_sub'].format(cluster_num,subset_idx), 'wb') as fh:
    pickle.dump([model.p, model.mu, model.pi], fh)

############## save examples ###################
# with open(Dict['file_list'], 'r') as fh:
#     image_path = [ff.strip() for ff in fh.readlines()]

# num = 50
# print('save top {0} images for each cluster'.format(num))
# example = [None for vc_i in range(cluster_num)]
# for vc_i in range(cluster_num):
#     patch_set = np.zeros(((Arf**2)*3, num)).astype('uint8')
#     sort_idx = np.argsort(-model.p[:,vc_i])[0:num]
Exemplo n.º 9
0
from config_PASCAL_VC import *
from vMFMM import *

cluster_num = 200
fname = '/export/home/bdeng4/features_untrained.npy'
feat_set = np.load(fname)

print('all feat_set')
print(feat_set.shape)

model = vMFMM(cluster_num, 'k++')
model.fit(feat_set, 300, max_it=1000)

savefile = '/export/home/qliu24/tmp/vMFMM/Dictionary_PASCAL3D+_pool4_all_5shots.pickle'
with open(savefile, 'wb') as fh:
    pickle.dump([model.p, model.mu, model.pi], fh, protocol=2)

############## save examples ###################
# with open(Dict['file_list'], 'r') as fh:
#     image_path = [ff.strip() for ff in fh.readlines()]

# num = 50
# print('save top {0} images for each cluster'.format(num))
# example = [None for vc_i in range(cluster_num)]
# for vc_i in range(cluster_num):
#     patch_set = np.zeros(((Arf**2)*3, num)).astype('uint8')
#     sort_idx = np.argsort(-model.p[:,vc_i])[0:num]
#     for idx in range(num):
#         iloc = loc_set[:,sort_idx[idx]]
#         img = cv2.imread(os.path.join(Dict['file_dir'], image_path[iloc[0]]))
#         img = myresize(img, scale_size, 'short')