Пример #1
0
    def to_ply(self, filepath):
        """
        Write mesh to OBJ file.

        :param filepath: path to OBJ file
        :type filepath: str
        """

        utils.write_ply(filepath, self.vertices.tolist(), self.faces.tolist())
Пример #2
0
## For loop over the input images
count = 0
listImgs = glob("tmp_ims/*.png")
for image_path in listImgs:
	start_each_image = time.time()
	count = count + 1
	fig_name = ntpath.basename(image_path)
	outFile = data_out + "/" + fig_name[:-4]
	print '> Processing image: ', image_path, ' ', fig_name, ' ', str(count) + '/' + str(len(listImgs))
	net.blobs['data'].reshape(1,3,trg_size,trg_size)
	im = caffe.io.load_image(image_path)
	## Transforming the image into the right format
	net.blobs['data'].data[...] = transformer.preprocess('data', im)
	## Forward pass into the CNN
	net_output = net.forward()
	## Getting the output
	features = np.hstack( [net.blobs[layer_name].data[0].flatten()] )
	## Writing the regressed 3DMM parameters
	np.savetxt(outFile + '.ply.alpha', features[0:99])
	np.savetxt(outFile + '.ply.beta', features[99:198])
	#################################
	## Mapping back the regressed 3DMM into the original
	## Basel Face Model (Shape)
	##################################
	S,T = utils.projectBackBFM(model,features)
	print '> Writing 3D file in: ', outFile + '.ply' + 'cost {0}s.'.format(time.time()-start_each_image)
	utils.write_ply(outFile + '.ply', S, T, faces)

end_time = time.time()
print "Costtime{0}s".format(end_time - start_time)
Пример #3
0
import os

import numpy as np
import tqdm

import utils

PLY_DIR = "dataset/ply_files"

if __name__ == "__main__":
    if not os.path.exists(PLY_DIR):
        os.mkdir(PLY_DIR)

    # Get all npy files paths
    files = glob.glob("{}/**/*.npy".format(utils.NPY_DIR), recursive=True)

    # For each file, generate its ply file
    for f in tqdm.tqdm(files):
        data = np.load(f)

        # File name without extension
        file_name = f.split("/")[-1][:-4]

        utils.write_ply(
            "{}/{}.ply".format(PLY_DIR, file_name),
            [data[:, 0], data[:, 1], data[:, 2],
                data[:, 3].astype(np.uint8), data[:, 4].astype(np.uint8),
                data[:, 5].astype(np.uint8), data[:, 6].astype(np.int32)],
            ["x", "y", "z", "red", "green", "blue", "label"],
        )
Пример #4
0
    for voxel_center_index in progress_bar:
        neighbors_filter = neighbors_indexes[voxel_center_index]
        points_in_voxel = point_cloud[neighbors_filter]

        if points_in_voxel.shape[0] > 0:

            matrix = voxel_values[voxel_center_index -
                                  nb_voxel_in_voxel_grid //
                                  2:voxel_center_index +
                                  nb_voxel_in_voxel_grid // 2]

            matrix = np.reshape(matrix, (1, 1, args.N, args.N, args.N))

            out = model(torch.from_numpy(matrix).float())
            label = torch.argmax(out)

            predicted_labels[neighbors_filter] = label

    # Create ply
    # ---------------
    utils.write_ply(
        "{}/{}.ply".format(RESULTS_DIR,
                           args.data.split("/")[-1].split(".")[-2]),
        [
            point_cloud[:, 0], point_cloud[:, 1], point_cloud[:, 2],
            predicted_labels.astype(np.int32), point_cloud[:, 6].astype(
                np.int32)
        ],
        ["x", "y", "z", "predicted_labels", "true_labels"],
    )
Пример #5
0
def run():

	###################################################
	##### Prepare images ##############################
	countIms = 0
	with open(fileList, "r") as ins, open(data_out + "/imList.txt","w") as outs:
		for image_path in ins:
			# image_path = image_path[:-1]
			print("> Prepare image "+image_path + ":")
			imname = ntpath.basename(image_path)
			#imname = imname[:-4]
			imname = imname.split(imname.split('.')[-1])[0][0:-1]
			img = cv2.imread(image_path)
			## If we have input landmarks
			if len(landmarkDir) > 0:
				lms = np.loadtxt(landmarkDir + '/' + imname + '.pts')
				img2 = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
				nLM = lms.shape[0]
				for i in range(0,nLM):
					cv2.circle(img2, (lms[i,0], lms[i,1]), 5, (255,0,0))
				img, lms = utils.cropByInputLM(img, lms, img2)
			else:
				dlib_img = io.imread(image_path)
				img2 = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
				dets = detector(img, 1)
				print(">     Number of faces detected: {}".format(len(dets)))
				if len(dets) == 0:
					print('> Could not detect the face, skipping the image...' + image_path)
					continue
				if len(dets) > 1:
					print("> Process only the first detected face!")
				detected_face = dets[0]
				## If we are using landmarks to crop
				shape = predictor(dlib_img, detected_face)
				nLM = shape.num_parts
				for i in range(0,nLM):
					cv2.circle(img2, (shape.part(i).x, shape.part(i).y), 5, (255,0,0))
				img, lms = utils.cropByLM(img, shape, img2)
			cv2.imwrite(data_out + "/imgs/"+imname+"_detect.png",img2)

			lms = lms * 500.0/img.shape[0]
			fileout = open(data_out + "/imgs/"+imname + ".pts","w")
			for i in range(0,lms.shape[0]):
				fileout.write("%f %f\n" % (lms[i,0], lms[i,1]))
			fileout.close()
			img = cv2.resize(img,(500, 500))
			cv2.imwrite(data_out + "/imgs/"+imname+ ".png",img)
			outs.write("%s\n" % (data_out + "/imgs/"+imname+ ".png"))
			countIms = countIms + 1

	###################################################
	##### Shape fitting ############################## 
	# load net
	MainModel = imp.load_source('MainModel', "../CNN/shape_model.py")
	net = torch.load(model_path)
	net.eval()

	mean0 = np.load(mean_path, encoding='latin1')
	mean = mean0['arr_0']
	net.cuda()

	print('> CNN Model loaded to regress 3D Shape and Texture!')
	model = scipy.io.loadmat(BFM_path,squeeze_me=True,struct_as_record=False)
	model = model["BFM"]
	faces = model.faces-1
	print('> Loaded the Basel Face Model to write the 3D output!')
	## For loop over the input images
	count = 0
	with open(data_out + "/imList.txt", "r") as ins:
		for image_path in ins:
			if len(image_path) < 3:
				continue
			image_path = image_path[:-1]
			count = count + 1
			fig_name = ntpath.basename(image_path)
			outFile = data_out + "/shape/" + fig_name[:-4]
			print('> Processing image: ' + image_path)
			im = cv2.imread(image_path)
			im = cv2.resize(im, (224, 224)).astype(float).transpose((2,0,1))
			im = im - mean
			#im = im/255
			im = Variable(torch.from_numpy(im).unsqueeze(0).float().cuda())
			features = net(im).data.cpu().numpy()
			## Writing the regressed 3DMM parameters
			np.savetxt(outFile + '.ply.alpha', features[0,0:99])
			S,T = utils.projectBackBFM(model,features[0,:])
			print('> Writing 3D file in: ', outFile + '.ply')
			utils.write_ply(outFile + '.ply', S, T, faces)

	##################################################
	##### Bump map regression ########################
	print("Regress bump maps")
	bumpMapRegressor.estimateBump(bumpModel_path, data_out + "/imList.txt", data_out + "/bump/")
	##################################################
	##### Recover the 3D models ##################
	print("Recover the 3D models")
	print("./TestBump -batch " + data_out + "/imList.txt " + data_out + "/3D/ " + data_out + "/shape " + data_out + "/bump " + data_out + "/bump ../3DMM_model/BaselFaceModel_mod.h5 ../dlib_model/shape_predictor_68_face_landmarks.dat " + data_out + "/imgs " + data_out + "/imgs/ 1");
	os.system("./TestBump -batch " + data_out + "/imList.txt " + data_out + "/3D/ " + data_out + "/shape " + data_out + "/bump " + data_out + "/bump ../3DMM_model/BaselFaceModel_mod.h5 ../dlib_model/shape_predictor_68_face_landmarks.dat " + data_out + "/imgs " + data_out + "/imgs/ 1");
Пример #6
0
    def process(self):
        for path in self.processed_paths:
            os.makedirs(path)
        for pc_path in glob.glob(os.path.join(self.raw_paths[0], '*.txt')):
            print('Processing {} ...'.format(pc_path))
            cloud_name = pc_path.split('/')[-1][:-4]
            if os.path.exists(os.path.join(self.processed_paths[1], cloud_name + '_KDTree.pkl')):
                continue
            pc = self._load_cloud(pc_path)
            label_path = pc_path[:-4] + '.labels'
            if os.path.exists(label_path):
                labels = self._load_label(label_path)
                org_ply_path = os.path.join(self.processed_paths[0], cloud_name + '.ply')
                # Subsample the training set cloud to the same resolution 0.01 as the test set
                xyz, rgb, labels = self._grid_sub_sampling(pc[:, :3].astype(np.float32),
                                                           pc[:, 4:7].astype(np.uint8),
                                                           labels, grid_size=0.01)
                labels = np.squeeze(labels)
                # save sub-sampled original cloud
                write_ply(org_ply_path, [xyz, rgb, labels], ['x', 'y', 'z', 'r', 'g', 'b', 'class'])

                # save sub_cloud and KDTree file
                sub_xyz, sub_rgb, sub_labels = self._grid_sub_sampling(xyz, rgb, labels, grid_size=self.grid_size)
                sub_rgb = sub_rgb / 255.
                sub_labels = np.squeeze(sub_labels)
                sub_ply_file = os.path.join(self.processed_paths[1], cloud_name + '.ply')
                write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'r', 'g', 'b', 'class'])

                search_tree = KDTree(sub_xyz, leaf_size=50)
                kd_tree_file = os.path.join(self.processed_paths[1], cloud_name + '_KDTree.pkl')
                with open(kd_tree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
                proj_idx = proj_idx.astype(np.int32)
                proj_file = os.path.join(self.processed_paths[1], cloud_name + '_proj.pkl')
                with open(proj_file, 'wb') as f:
                    pickle.dump([proj_idx, labels], f)

            else:
                org_ply_path = os.path.join(self.processed_paths[0], cloud_name + '.ply')
                write_ply(org_ply_path, [pc[:, :3].astype(np.float32), pc[:, 4:7].astype(np.uint8)],
                          ['x', 'y', 'z', 'r', 'g', 'b'])

                sub_xyz, sub_rgb = self._grid_sub_sampling(pc[:, :3].astype(np.float32),
                                                           pc[:, 4:7].astype(np.uint8),
                                                           grid_size=self.grid_size)

                sub_rgb = sub_rgb / 255.
                sub_ply_file = os.path.join(self.processed_paths[1], cloud_name + '.ply')
                write_ply(sub_ply_file, [sub_xyz, sub_rgb], ['x', 'y', 'z', 'r', 'g', 'b'])

                search_tree = KDTree(sub_xyz, leaf_size=50)
                kd_tree_file = os.path.join(self.processed_paths[1], cloud_name + '_KDTree.pkl')
                with open(kd_tree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                labels = np.zeros(pc.shape[0], dtype=np.uint8)
                proj_idx = np.squeeze(search_tree.query(pc[:, :3].astype(np.float32), return_distance=False))
                proj_idx = proj_idx.astype(np.int32)
                proj_file = os.path.join(self.processed_paths[1], cloud_name + '_proj.pkl')
                with open(proj_file, 'wb') as f:
                    pickle.dump([proj_idx, labels], f)
Пример #7
0
def cnn_3dmm( img, outputPath ):
    """
    use 3dmm cnn to deal with
    """
    # load net
    try:
        caffe.set_mode_gpu()
        caffe.set_device(GPU_ID)
    except Exception as ex:
        print '> Could not setup Caffe in GPU ' +str(GPU_ID) + ' - Error: ' + ex
        print '> Reverting into CPU mode'
        caffe.set_mode_cpu()

    ## Opening mean average image
    proto_data = open(FLAGS.mean_path, "rb").read()
    a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
    mean  = caffe.io.blobproto_to_array(a)[0]
    ## Loading the CNN
    net = caffe.Classifier(FLAGS.deploy_path, FLAGS.model_path)

    ## Setting up the right transformer for an input image
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2,0,1))
    transformer.set_channel_swap('data', (2,1,0))
    transformer.set_raw_scale('data', 255.0)
    transformer.set_mean('data',mean)
    print '> CNN Model loaded to regress 3D Shape and Texture!'
    ## Loading the Basel Face Model to write the 3D output
    model = scipy.io.loadmat(FLAGS.BFM_path,squeeze_me=True,struct_as_record=False)
    model = model["BFM"]
    faces = model.faces-1
    print '> Loaded the Basel Face Model to write the 3D output!'

    net.blobs['data'].reshape(1,3,trg_size,trg_size)
   # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
   # im = img / 255
    imname = ntpath.basename(FLAGS.imagePath)
    imname = imname.split(imname.split('.')[-1])[0][0:-1]       # 记录图片basename

    im = caffe.io.load_image(os.path.join(FLAGS.tmp_ims, imname + '.png'))

    ## Transforming the image into the right format
    net.blobs['data'].data[...] = transformer.preprocess('data', im)
    ## Forward pass into the CNN
    net_output = net.forward()
    ## Getting the output
    features = np.hstack( [net.blobs[layer_name].data[0].flatten()] )

    #imname = ntpath.basename(FLAGS.imagePath)
    #imname = imname.split(imname.split('.')[-1])[0][0:-1]       # 记录图片basename

    ## Writing the regressed 3DMM parameters
    np.savetxt(os.path.join( outputPath, imname+ '.ply.alpha'), features[0:99])
    np.savetxt(os.path.join( outputPath, imname + '.ply.beta'), features[99:198])

    #################################
    ## Mapping back the regressed 3DMM into the original
    ## Basel Face Model (Shape)
    ##################################
    S,T = utils.projectBackBFM(model, features)
    print '> Writing 3D file in: ', os.path.join( outputPath, imname+ '.ply')
    utils.write_ply(os.path.join( outputPath, imname+ '.ply'), S, T, faces)
Пример #8
0
    verts = pc_np[mask_np.reshape(-1), :]
    normal = normal.cpu().squeeze(0).view(
        3, -1).t().numpy()[mask_np.reshape(-1), :]

    props = [
        verts[:, 0], verts[:, 1], verts[:, 2], normal[:, 0], normal[:, 1],
        normal[:, 2]
    ]
    prop_names = ['x', 'y', 'z', 'nx', 'ny', 'nz']
    prop_types = ['float32' for _ in range(0, 6)]

    #has albedo
    if albedo_path is not None:
        albedo = albedo_np.reshape(-1, 3)[mask_np.reshape(-1), :]
        props += [albedo[:, 0], albedo[:, 1], albedo[:, 2]]
        prop_names += ['albedo_r', 'albedo_g', 'albedo_b']
        prop_types += ['float32' for _ in range(0, 3)]

    #has rough
    if rough_path is not None:
        rough = rough_np.reshape(-1)[mask_np.reshape(-1)]
        props += [rough]
        prop_names += ['rough_r']
        prop_types += ['float32']

    write_ply(output_path,
              props,
              prop_names=prop_names,
              prop_types=prop_types,
              faces=faces)
Пример #9
0
    def test(self, num_votes=100):
        logging.info('Test {} on {} ...'.format(self.cfg.model_name,
                                                self.cfg.dataset))
        test_smooth = 0.98
        saving_path = 'results/Semantic3D/predictions'
        os.makedirs(saving_path) if not os.path.exists(saving_path) else None

        # load model checkpoints
        self.model.load(
            'checkpoints/PointConvBig_on_Semantic3D_bs_8_epochs_100_big_crf.ckpt'
        )
        self.model.to(self.device)
        self.model.eval()

        epoch = 0
        last_min = -0.5
        while last_min < num_votes:
            # test one epoch
            with Ctq(self.dataset.val_loader) as tq_loader:
                for i, data in enumerate(tq_loader):
                    tq_loader.set_description('Evaluation')
                    # model inference
                    data = data.to(self.device)
                    with torch.no_grad():
                        probs = F.softmax(self.model(data),
                                          dim=-1)  # get pred probs

                    # running means for each epoch on Test set
                    point_idx = data.point_idx.cpu().numpy()  # the point idx
                    cloud_idx = data.cloud_idx.cpu().numpy()  # the cloud idx
                    probs = probs.reshape(
                        self.cfg.batch_size, -1,
                        self.cfg.num_classes).cpu().numpy()  # [B, N, C]
                    for b in range(
                            self.cfg.batch_size):  # for each sample in batch
                        prob = probs[b, :, :]  # [N, C]
                        p_idx = point_idx[b, :]  # [N]
                        c_idx = cloud_idx[b][0]  # int
                        self.test_probs[c_idx][p_idx] = test_smooth * self.test_probs[c_idx][p_idx] \
                                                        + (1 - test_smooth) * prob  # running means

            # after each epoch
            new_min = np.min(self.dataset.val_set.min_possibility)
            print('Epoch {:3d} end, current min possibility = {:.2f}'.format(
                epoch, new_min))
            if last_min + 4 < new_min:
                print('Test procedure done, saving predicted clouds ...')
                last_min = new_min
                # projection prediction to original point cloud
                t1 = time.time()
                for i, file in enumerate(self.dataset.val_set.val_files):
                    proj_idx = self.dataset.val_set.test_proj[
                        i]  # already get the shape
                    probs = self.test_probs[i][
                        proj_idx, :]  # same shape with proj_idx
                    # [0 ~ 7] + 1 -> [1 ~ 8], because 0 for unlabeled
                    preds = np.argmax(probs, axis=1).astype(np.uint8) + 1
                    # saving prediction results
                    cloud_name = file.split('/')[-1]
                    # ascii_name = os.path.join(saving_path, self.dataset.test_set.ascii_files[cloud_name])
                    # np.savetxt(ascii_name, preds, fmt='%d')
                    # print('Save {:s} succeed !'.format(ascii_name))
                    filename = os.path.join(saving_path, cloud_name)
                    write_ply(filename, [preds], ['pred'])
                    print('Save {:s} succeed !'.format(filename))
                t2 = time.time()
                print('Done in {:.2f} s.'.format(t2 - t1))
                return
            epoch += 1
        return
Пример #10
0
    def process(self):
        # Note: there is an extra character in line 180389 of Area_5/hallway_6/Annotations/ceiling_1.txt
        for path in self.processed_paths:
            os.makedirs(path)
        for i, path in enumerate(self.raw_paths):
            print("Processing Area_{}...".format(i + 1))
            anno_paths = [line.rstrip() for line in open(path)]
            anno_paths = [
                os.path.join(self.raw_dir, self.data_dir, p)
                for p in anno_paths
            ]
            for anno_path in anno_paths:
                print('Processing {}...'.format(anno_path))
                elements = anno_path.split('/')
                filename = elements[-3] + '_' + elements[-2]
                data_list = []
                for f in glob.glob(os.path.join(anno_path, '*.txt')):
                    print('Collecting {}...'.format(f))
                    label = os.path.basename(f).split('_')[0]
                    if label not in CLASS_NAMES:
                        label = 'clutter'
                    # cls_points = np.loadtxt(f)
                    cls_points = pd.read_csv(
                        f, header=None, delim_whitespace=True
                    ).values  # pandas for faster reading
                    cls_labels = np.full((cls_points.shape[0], 1),
                                         CLASS_NAMES[label],
                                         dtype=np.int32)
                    data_list.append(
                        np.concatenate([cls_points, cls_labels],
                                       axis=1))  # Nx7

                points_labels = np.concatenate(data_list, axis=0)

                xyz_min = np.amin(points_labels, axis=0)[0:3]
                points_labels[:,
                              0:3] -= xyz_min  # aligned to the minimal point
                xyz = points_labels[:, 0:3].astype(np.float32)
                rgb = points_labels[:, 3:6].astype(np.uint8)
                labels = points_labels[:, 6].astype(np.uint8)

                org_ply_file = os.path.join(self.processed_paths[0],
                                            filename + '.ply')
                write_ply(org_ply_file, [xyz, rgb, labels],
                          ['x', 'y', 'z', 'r', 'g', 'b', 'class'])

                # save sub_cloud and KDTree files
                sub_xyz, sub_rgb, sub_labels = self._grid_sub_sampling(
                    xyz, rgb, labels, self.grid_size)
                sub_rgb = sub_rgb / 255.

                sub_ply_file = os.path.join(self.processed_paths[1],
                                            filename + '.ply')
                write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels],
                          ['x', 'y', 'z', 'r', 'g', 'b', 'class'])

                search_tree = KDTree(sub_xyz)
                kd_tree_file = os.path.join(self.processed_paths[1],
                                            filename + '_KDTree.pkl')
                with open(kd_tree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                proj_idx = np.squeeze(
                    search_tree.query(xyz, return_distance=False))
                proj_idx = proj_idx.astype(np.int32)
                proj_file = os.path.join(self.processed_paths[1],
                                         filename + '_proj.pkl')
                with open(proj_file, 'wb') as f:
                    pickle.dump([proj_idx, labels], f)